You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

861 lines
29KB

  1. /*
  2. * Copyright (c) 2011 Michael Niedermayer
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Parts of this file have been stolen from mplayer
  21. */
  22. /**
  23. * @file
  24. */
  25. #include "avfilter.h"
  26. #include "video.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "libavutil/intreadwrite.h"
  32. #include "libavutil/imgutils.h"
  33. #include "libavutil/opt.h"
  34. #include "libmpcodecs/vf.h"
  35. #include "libmpcodecs/img_format.h"
  36. #include "libmpcodecs/cpudetect.h"
  37. #include "libmpcodecs/av_helpers.h"
  38. #include "libmpcodecs/vf_scale.h"
  39. #include "libmpcodecs/libvo/fastmemcpy.h"
  40. #include "libswscale/swscale.h"
  41. //FIXME maybe link the orig in
  42. //XXX: identical pix_fmt must be following with each others
  43. static const struct {
  44. int fmt;
  45. enum AVPixelFormat pix_fmt;
  46. } conversion_map[] = {
  47. {IMGFMT_ARGB, AV_PIX_FMT_ARGB},
  48. {IMGFMT_BGRA, AV_PIX_FMT_BGRA},
  49. {IMGFMT_BGR24, AV_PIX_FMT_BGR24},
  50. {IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE},
  51. {IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE},
  52. {IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE},
  53. {IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE},
  54. {IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE},
  55. {IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE},
  56. {IMGFMT_BGR8, AV_PIX_FMT_RGB8},
  57. {IMGFMT_BGR4, AV_PIX_FMT_RGB4},
  58. {IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK},
  59. {IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK},
  60. {IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE},
  61. {IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE},
  62. {IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE},
  63. {IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE},
  64. {IMGFMT_ABGR, AV_PIX_FMT_ABGR},
  65. {IMGFMT_RGBA, AV_PIX_FMT_RGBA},
  66. {IMGFMT_RGB24, AV_PIX_FMT_RGB24},
  67. {IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE},
  68. {IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE},
  69. {IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE},
  70. {IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE},
  71. {IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE},
  72. {IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE},
  73. {IMGFMT_RGB8, AV_PIX_FMT_BGR8},
  74. {IMGFMT_RGB4, AV_PIX_FMT_BGR4},
  75. {IMGFMT_BGR8, AV_PIX_FMT_PAL8},
  76. {IMGFMT_YUY2, AV_PIX_FMT_YUYV422},
  77. {IMGFMT_UYVY, AV_PIX_FMT_UYVY422},
  78. {IMGFMT_NV12, AV_PIX_FMT_NV12},
  79. {IMGFMT_NV21, AV_PIX_FMT_NV21},
  80. {IMGFMT_Y800, AV_PIX_FMT_GRAY8},
  81. {IMGFMT_Y8, AV_PIX_FMT_GRAY8},
  82. {IMGFMT_YVU9, AV_PIX_FMT_YUV410P},
  83. {IMGFMT_IF09, AV_PIX_FMT_YUV410P},
  84. {IMGFMT_YV12, AV_PIX_FMT_YUV420P},
  85. {IMGFMT_I420, AV_PIX_FMT_YUV420P},
  86. {IMGFMT_IYUV, AV_PIX_FMT_YUV420P},
  87. {IMGFMT_411P, AV_PIX_FMT_YUV411P},
  88. {IMGFMT_422P, AV_PIX_FMT_YUV422P},
  89. {IMGFMT_444P, AV_PIX_FMT_YUV444P},
  90. {IMGFMT_440P, AV_PIX_FMT_YUV440P},
  91. {IMGFMT_420A, AV_PIX_FMT_YUVA420P},
  92. {IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE},
  93. {IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE},
  94. {IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE},
  95. {IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE},
  96. {IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE},
  97. {IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE},
  98. // YUVJ are YUV formats that use the full Y range and not just
  99. // 16 - 235 (see colorspaces.txt).
  100. // Currently they are all treated the same way.
  101. {IMGFMT_YV12, AV_PIX_FMT_YUVJ420P},
  102. {IMGFMT_422P, AV_PIX_FMT_YUVJ422P},
  103. {IMGFMT_444P, AV_PIX_FMT_YUVJ444P},
  104. {IMGFMT_440P, AV_PIX_FMT_YUVJ440P},
  105. {IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC},
  106. {IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT},
  107. {IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1},
  108. {IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2},
  109. {IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264},
  110. {IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3},
  111. {IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1},
  112. {IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4},
  113. {0, AV_PIX_FMT_NONE}
  114. };
  115. extern const vf_info_t ff_vf_info_dint;
  116. extern const vf_info_t ff_vf_info_eq2;
  117. extern const vf_info_t ff_vf_info_eq;
  118. extern const vf_info_t ff_vf_info_fil;
  119. extern const vf_info_t ff_vf_info_fspp;
  120. extern const vf_info_t ff_vf_info_ilpack;
  121. extern const vf_info_t ff_vf_info_mcdeint;
  122. extern const vf_info_t ff_vf_info_ow;
  123. extern const vf_info_t ff_vf_info_perspective;
  124. extern const vf_info_t ff_vf_info_phase;
  125. extern const vf_info_t ff_vf_info_pp7;
  126. extern const vf_info_t ff_vf_info_pullup;
  127. extern const vf_info_t ff_vf_info_qp;
  128. extern const vf_info_t ff_vf_info_sab;
  129. extern const vf_info_t ff_vf_info_softpulldown;
  130. extern const vf_info_t ff_vf_info_spp;
  131. extern const vf_info_t ff_vf_info_tinterlace;
  132. extern const vf_info_t ff_vf_info_uspp;
  133. static const vf_info_t* const filters[]={
  134. &ff_vf_info_dint,
  135. &ff_vf_info_eq2,
  136. &ff_vf_info_eq,
  137. &ff_vf_info_fil,
  138. &ff_vf_info_fspp,
  139. &ff_vf_info_ilpack,
  140. &ff_vf_info_mcdeint,
  141. &ff_vf_info_ow,
  142. &ff_vf_info_perspective,
  143. &ff_vf_info_phase,
  144. &ff_vf_info_pp7,
  145. &ff_vf_info_pullup,
  146. &ff_vf_info_qp,
  147. &ff_vf_info_sab,
  148. &ff_vf_info_softpulldown,
  149. &ff_vf_info_spp,
  150. &ff_vf_info_tinterlace,
  151. &ff_vf_info_uspp,
  152. NULL
  153. };
  154. /*
  155. Unsupported filters
  156. 1bpp
  157. ass
  158. bmovl
  159. crop
  160. dvbscale
  161. flip
  162. expand
  163. format
  164. halfpack
  165. lavc
  166. lavcdeint
  167. noformat
  168. pp
  169. scale
  170. tfields
  171. vo
  172. yadif
  173. zrmjpeg
  174. */
  175. CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work
  176. enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){
  177. int i;
  178. for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++)
  179. ;
  180. return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE;
  181. }
  182. static void ff_sws_getFlagsAndFilterFromCmdLine(int *flags, SwsFilter **srcFilterParam, SwsFilter **dstFilterParam)
  183. {
  184. static int firstTime=1;
  185. *flags=0;
  186. #if ARCH_X86
  187. if(ff_gCpuCaps.hasMMX)
  188. __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
  189. #endif
  190. if(firstTime)
  191. {
  192. firstTime=0;
  193. *flags= SWS_PRINT_INFO;
  194. }
  195. else if( ff_mp_msg_test(MSGT_VFILTER,MSGL_DBG2) ) *flags= SWS_PRINT_INFO;
  196. switch(SWS_BILINEAR)
  197. {
  198. case 0: *flags|= SWS_FAST_BILINEAR; break;
  199. case 1: *flags|= SWS_BILINEAR; break;
  200. case 2: *flags|= SWS_BICUBIC; break;
  201. case 3: *flags|= SWS_X; break;
  202. case 4: *flags|= SWS_POINT; break;
  203. case 5: *flags|= SWS_AREA; break;
  204. case 6: *flags|= SWS_BICUBLIN; break;
  205. case 7: *flags|= SWS_GAUSS; break;
  206. case 8: *flags|= SWS_SINC; break;
  207. case 9: *flags|= SWS_LANCZOS; break;
  208. case 10:*flags|= SWS_SPLINE; break;
  209. default:*flags|= SWS_BILINEAR; break;
  210. }
  211. *srcFilterParam= NULL;
  212. *dstFilterParam= NULL;
  213. }
  214. //exact copy from vf_scale.c
  215. // will use sws_flags & src_filter (from cmd line)
  216. struct SwsContext *ff_sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat)
  217. {
  218. int flags, i;
  219. SwsFilter *dstFilterParam, *srcFilterParam;
  220. enum AVPixelFormat dfmt, sfmt;
  221. for(i=0; conversion_map[i].fmt && dstFormat != conversion_map[i].fmt; i++);
  222. dfmt= conversion_map[i].pix_fmt;
  223. for(i=0; conversion_map[i].fmt && srcFormat != conversion_map[i].fmt; i++);
  224. sfmt= conversion_map[i].pix_fmt;
  225. if (srcFormat == IMGFMT_RGB8 || srcFormat == IMGFMT_BGR8) sfmt = AV_PIX_FMT_PAL8;
  226. ff_sws_getFlagsAndFilterFromCmdLine(&flags, &srcFilterParam, &dstFilterParam);
  227. return sws_getContext(srcW, srcH, sfmt, dstW, dstH, dfmt, flags , srcFilterParam, dstFilterParam, NULL);
  228. }
  229. typedef struct {
  230. const AVClass *class;
  231. vf_instance_t vf;
  232. vf_instance_t next_vf;
  233. AVFilterContext *avfctx;
  234. int frame_returned;
  235. char *filter;
  236. } MPContext;
  237. #define OFFSET(x) offsetof(MPContext, x)
  238. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  239. static const AVOption mp_options[] = {
  240. { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
  241. { NULL }
  242. };
  243. AVFILTER_DEFINE_CLASS(mp);
  244. void ff_mp_msg(int mod, int lev, const char *format, ... ){
  245. va_list va;
  246. va_start(va, format);
  247. //FIXME convert lev/mod
  248. av_vlog(NULL, AV_LOG_DEBUG, format, va);
  249. va_end(va);
  250. }
  251. int ff_mp_msg_test(int mod, int lev){
  252. return 123;
  253. }
  254. void ff_init_avcodec(void)
  255. {
  256. //we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here
  257. }
  258. //Exact copy of vf.c
  259. void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){
  260. dst->pict_type= src->pict_type;
  261. dst->fields = src->fields;
  262. dst->qscale_type= src->qscale_type;
  263. if(dst->width == src->width && dst->height == src->height){
  264. dst->qstride= src->qstride;
  265. dst->qscale= src->qscale;
  266. }
  267. }
  268. //Exact copy of vf.c
  269. void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){
  270. if (vf->next->draw_slice) {
  271. vf->next->draw_slice(vf->next,src,stride,w,h,x,y);
  272. return;
  273. }
  274. if (!vf->dmpi) {
  275. ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name);
  276. return;
  277. }
  278. if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) {
  279. memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x,
  280. src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]);
  281. return;
  282. }
  283. memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0],
  284. w, h, vf->dmpi->stride[0], stride[0]);
  285. memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift),
  286. src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]);
  287. memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift),
  288. src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]);
  289. }
  290. //Exact copy of vf.c
  291. void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){
  292. int y;
  293. if(mpi->flags&MP_IMGFLAG_PLANAR){
  294. y0&=~1;h+=h&1;
  295. if(x0==0 && w==mpi->width){
  296. // full width clear:
  297. memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h);
  298. memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift));
  299. memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift));
  300. } else
  301. for(y=y0;y<y0+h;y+=2){
  302. memset(mpi->planes[0]+x0+mpi->stride[0]*y,0,w);
  303. memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w);
  304. memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
  305. memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift));
  306. }
  307. return;
  308. }
  309. // packed:
  310. for(y=y0;y<y0+h;y++){
  311. unsigned char* dst=mpi->planes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0;
  312. if(mpi->flags&MP_IMGFLAG_YUV){
  313. unsigned int* p=(unsigned int*) dst;
  314. int size=(mpi->bpp>>3)*w/4;
  315. int i;
  316. #if HAVE_BIGENDIAN
  317. #define CLEAR_PACKEDYUV_PATTERN 0x00800080
  318. #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000
  319. #else
  320. #define CLEAR_PACKEDYUV_PATTERN 0x80008000
  321. #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080
  322. #endif
  323. if(mpi->flags&MP_IMGFLAG_SWAPPED){
  324. for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
  325. for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN_SWAPPED;
  326. } else {
  327. for(i=0;i<size-3;i+=4) p[i]=p[i+1]=p[i+2]=p[i+3]=CLEAR_PACKEDYUV_PATTERN;
  328. for(;i<size;i++) p[i]=CLEAR_PACKEDYUV_PATTERN;
  329. }
  330. } else
  331. memset(dst,0,(mpi->bpp>>3)*w);
  332. }
  333. }
  334. int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){
  335. return 1;
  336. }
  337. //used by delogo
  338. unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){
  339. return preferred;
  340. }
  341. mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){
  342. MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf));
  343. mp_image_t* mpi=NULL;
  344. int w2;
  345. int number = mp_imgtype >> 16;
  346. av_assert0(vf->next == NULL); // all existing filters call this just on next
  347. //vf_dint needs these as it calls ff_vf_get_image() before configuring the output
  348. if(vf->w==0 && w>0) vf->w=w;
  349. if(vf->h==0 && h>0) vf->h=h;
  350. av_assert0(w == -1 || w >= vf->w);
  351. av_assert0(h == -1 || h >= vf->h);
  352. av_assert0(vf->w > 0);
  353. av_assert0(vf->h > 0);
  354. av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h);
  355. if (w == -1) w = vf->w;
  356. if (h == -1) h = vf->h;
  357. w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w;
  358. // Note: we should call libvo first to check if it supports direct rendering
  359. // and if not, then fallback to software buffers:
  360. switch(mp_imgtype & 0xff){
  361. case MP_IMGTYPE_EXPORT:
  362. if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h);
  363. mpi=vf->imgctx.export_images[0];
  364. break;
  365. case MP_IMGTYPE_STATIC:
  366. if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h);
  367. mpi=vf->imgctx.static_images[0];
  368. break;
  369. case MP_IMGTYPE_TEMP:
  370. if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
  371. mpi=vf->imgctx.temp_images[0];
  372. break;
  373. case MP_IMGTYPE_IPB:
  374. if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame:
  375. if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h);
  376. mpi=vf->imgctx.temp_images[0];
  377. break;
  378. }
  379. case MP_IMGTYPE_IP:
  380. if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h);
  381. mpi=vf->imgctx.static_images[vf->imgctx.static_idx];
  382. vf->imgctx.static_idx^=1;
  383. break;
  384. case MP_IMGTYPE_NUMBERED:
  385. if (number == -1) {
  386. int i;
  387. for (i = 0; i < NUM_NUMBERED_MPI; i++)
  388. if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count)
  389. break;
  390. number = i;
  391. }
  392. if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL;
  393. if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h);
  394. mpi = vf->imgctx.numbered_images[number];
  395. mpi->number = number;
  396. break;
  397. }
  398. if(mpi){
  399. mpi->type=mp_imgtype;
  400. mpi->w=vf->w; mpi->h=vf->h;
  401. // keep buffer allocation status & color flags only:
  402. // mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT);
  403. mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS;
  404. // accept restrictions, draw_slice and palette flags only:
  405. mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE);
  406. if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK;
  407. if(mpi->width!=w2 || mpi->height!=h){
  408. // printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h);
  409. if(mpi->flags&MP_IMGFLAG_ALLOCATED){
  410. if(mpi->width<w2 || mpi->height<h){
  411. // need to re-allocate buffer memory:
  412. av_free(mpi->planes[0]);
  413. mpi->flags&=~MP_IMGFLAG_ALLOCATED;
  414. ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n");
  415. }
  416. // } else {
  417. } {
  418. mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
  419. mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift;
  420. }
  421. }
  422. if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt);
  423. if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){
  424. av_assert0(!vf->get_image);
  425. // check libvo first!
  426. if(vf->get_image) vf->get_image(vf,mpi);
  427. if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
  428. // non-direct and not yet allocated image. allocate it!
  429. if (!mpi->bpp) { // no way we can allocate this
  430. ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL,
  431. "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n");
  432. return NULL;
  433. }
  434. // check if codec prefer aligned stride:
  435. if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){
  436. int align=(mpi->flags&MP_IMGFLAG_PLANAR &&
  437. mpi->flags&MP_IMGFLAG_YUV) ?
  438. (8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME
  439. w2=((w+align)&(~align));
  440. if(mpi->width!=w2){
  441. #if 0
  442. // we have to change width... check if we CAN co it:
  443. int flags=vf->query_format(vf,outfmt); // should not fail
  444. if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n");
  445. // printf("query -> 0x%X \n",flags);
  446. if(flags&VFCAP_ACCEPT_STRIDE){
  447. #endif
  448. mpi->width=w2;
  449. mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
  450. // }
  451. }
  452. }
  453. ff_mp_image_alloc_planes(mpi);
  454. // printf("clearing img!\n");
  455. ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height);
  456. }
  457. }
  458. av_assert0(!vf->start_slice);
  459. if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)
  460. if(vf->start_slice) vf->start_slice(vf,mpi);
  461. if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){
  462. ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n",
  463. "NULL"/*vf->info->name*/,
  464. (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting":
  465. ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"),
  466. (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"",
  467. mpi->width,mpi->height,mpi->bpp,
  468. (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"),
  469. (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed",
  470. mpi->bpp*mpi->width*mpi->height/8);
  471. ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n",
  472. mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2],
  473. mpi->stride[0], mpi->stride[1], mpi->stride[2],
  474. mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift);
  475. mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED;
  476. }
  477. mpi->qscale = NULL;
  478. mpi->usage_count++;
  479. }
  480. // printf("\rVF_MPI: %p %p %p %d %d %d \n",
  481. // mpi->planes[0],mpi->planes[1],mpi->planes[2],
  482. // mpi->stride[0],mpi->stride[1],mpi->stride[2]);
  483. return mpi;
  484. }
  485. static void dummy_free(void *opaque, uint8_t *data){}
  486. int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){
  487. MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
  488. AVFilterLink *outlink = m->avfctx->outputs[0];
  489. AVFrame *picref = av_frame_alloc();
  490. int i;
  491. av_assert0(vf->next);
  492. av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n");
  493. if (!picref)
  494. goto fail;
  495. picref->width = mpi->w;
  496. picref->height = mpi->h;
  497. picref->type = AVMEDIA_TYPE_VIDEO;
  498. for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++);
  499. picref->format = conversion_map[i].pix_fmt;
  500. memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride)));
  501. for(i=0; i<4 && mpi->stride[i]; i++){
  502. picref->data[i] = mpi->planes[i];
  503. }
  504. if(pts != MP_NOPTS_VALUE)
  505. picref->pts= pts * av_q2d(outlink->time_base);
  506. if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy
  507. AVFrame *tofree = picref;
  508. picref = av_frame_clone(picref);
  509. av_frame_free(&tofree);
  510. }
  511. ff_filter_frame(outlink, picref);
  512. m->frame_returned++;
  513. return 1;
  514. fail:
  515. av_frame_free(&picref);
  516. return 0;
  517. }
  518. int ff_vf_next_config(struct vf_instance *vf,
  519. int width, int height, int d_width, int d_height,
  520. unsigned int voflags, unsigned int outfmt){
  521. av_assert0(width>0 && height>0);
  522. vf->next->w = width; vf->next->h = height;
  523. return 1;
  524. #if 0
  525. int flags=vf->next->query_format(vf->next,outfmt);
  526. if(!flags){
  527. // hmm. colorspace mismatch!!!
  528. //this is fatal for us ATM
  529. return 0;
  530. }
  531. ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs);
  532. miss=vf->default_reqs - (flags&vf->default_reqs);
  533. if(miss&VFCAP_ACCEPT_STRIDE){
  534. // vf requires stride support but vf->next doesn't support it!
  535. // let's insert the 'expand' filter, it does the job for us:
  536. vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL);
  537. if(!vf2) return 0; // shouldn't happen!
  538. vf->next=vf2;
  539. }
  540. vf->next->w = width; vf->next->h = height;
  541. return 1;
  542. #endif
  543. }
  544. int ff_vf_next_control(struct vf_instance *vf, int request, void* data){
  545. MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
  546. av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request);
  547. return 0;
  548. }
  549. static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){
  550. MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf));
  551. int i;
  552. av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt);
  553. for(i=0; conversion_map[i].fmt; i++){
  554. if(fmt==conversion_map[i].fmt)
  555. return 1; //we suport all
  556. }
  557. return 0;
  558. }
  559. static av_cold int init(AVFilterContext *ctx)
  560. {
  561. MPContext *m = ctx->priv;
  562. int cpu_flags = av_get_cpu_flags();
  563. char name[256];
  564. const char *args;
  565. int i;
  566. ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX;
  567. ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2;
  568. ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE;
  569. ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2;
  570. ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3;
  571. ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3;
  572. ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4;
  573. ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42;
  574. ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX;
  575. ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW;
  576. ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT;
  577. m->avfctx= ctx;
  578. args = m->filter;
  579. if(!args || 1!=sscanf(args, "%255[^:=]", name)){
  580. av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n");
  581. return AVERROR(EINVAL);
  582. }
  583. args += strlen(name);
  584. if (args[0] == '=')
  585. args++;
  586. for(i=0; ;i++){
  587. if(!filters[i] || !strcmp(name, filters[i]->name))
  588. break;
  589. }
  590. if(!filters[i]){
  591. av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name);
  592. return AVERROR(EINVAL);
  593. }
  594. av_log(ctx, AV_LOG_WARNING,
  595. "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n"
  596. "once it has been ported to a native libavfilter.\n", name);
  597. memset(&m->vf,0,sizeof(m->vf));
  598. m->vf.info= filters[i];
  599. m->vf.next = &m->next_vf;
  600. m->vf.put_image = ff_vf_next_put_image;
  601. m->vf.config = ff_vf_next_config;
  602. m->vf.query_format= vf_default_query_format;
  603. m->vf.control = ff_vf_next_control;
  604. m->vf.default_caps=VFCAP_ACCEPT_STRIDE;
  605. m->vf.default_reqs=0;
  606. if(m->vf.info->opts)
  607. av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n");
  608. #if 0
  609. if(vf->info->opts) { // vf_vo get some special argument
  610. const m_struct_t* st = vf->info->opts;
  611. void* vf_priv = m_struct_alloc(st);
  612. int n;
  613. for(n = 0 ; args && args[2*n] ; n++)
  614. m_struct_set(st,vf_priv,args[2*n],args[2*n+1]);
  615. vf->priv = vf_priv;
  616. args = NULL;
  617. } else // Otherwise we should have the '_oldargs_'
  618. if(args && !strcmp(args[0],"_oldargs_"))
  619. args = (char**)args[1];
  620. else
  621. args = NULL;
  622. #endif
  623. if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){
  624. av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args);
  625. return -1;
  626. }
  627. return 0;
  628. }
  629. static av_cold void uninit(AVFilterContext *ctx)
  630. {
  631. MPContext *m = ctx->priv;
  632. vf_instance_t *vf = &m->vf;
  633. while(vf){
  634. vf_instance_t *next = vf->next;
  635. if(vf->uninit)
  636. vf->uninit(vf);
  637. ff_free_mp_image(vf->imgctx.static_images[0]);
  638. ff_free_mp_image(vf->imgctx.static_images[1]);
  639. ff_free_mp_image(vf->imgctx.temp_images[0]);
  640. ff_free_mp_image(vf->imgctx.export_images[0]);
  641. vf = next;
  642. }
  643. }
  644. static int query_formats(AVFilterContext *ctx)
  645. {
  646. AVFilterFormats *avfmts=NULL;
  647. MPContext *m = ctx->priv;
  648. enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE;
  649. int i;
  650. for(i=0; conversion_map[i].fmt; i++){
  651. av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt);
  652. if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){
  653. av_log(ctx, AV_LOG_DEBUG, "supported,adding\n");
  654. if (conversion_map[i].pix_fmt != lastpixfmt) {
  655. ff_add_format(&avfmts, conversion_map[i].pix_fmt);
  656. lastpixfmt = conversion_map[i].pix_fmt;
  657. }
  658. }
  659. }
  660. if (!avfmts)
  661. return -1;
  662. //We assume all allowed input formats are also allowed output formats
  663. ff_set_common_formats(ctx, avfmts);
  664. return 0;
  665. }
  666. static int config_inprops(AVFilterLink *inlink)
  667. {
  668. MPContext *m = inlink->dst->priv;
  669. int i;
  670. for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
  671. av_assert0(conversion_map[i].fmt && inlink->w && inlink->h);
  672. m->vf.fmt.have_configured = 1;
  673. m->vf.fmt.orig_height = inlink->h;
  674. m->vf.fmt.orig_width = inlink->w;
  675. m->vf.fmt.orig_fmt = conversion_map[i].fmt;
  676. if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0)
  677. return -1;
  678. return 0;
  679. }
  680. static int config_outprops(AVFilterLink *outlink)
  681. {
  682. MPContext *m = outlink->src->priv;
  683. outlink->w = m->next_vf.w;
  684. outlink->h = m->next_vf.h;
  685. return 0;
  686. }
  687. static int request_frame(AVFilterLink *outlink)
  688. {
  689. MPContext *m = outlink->src->priv;
  690. int ret;
  691. av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n");
  692. for(m->frame_returned=0; !m->frame_returned;){
  693. ret=ff_request_frame(outlink->src->inputs[0]);
  694. if(ret<0)
  695. break;
  696. }
  697. av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret);
  698. return ret;
  699. }
  700. static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
  701. {
  702. MPContext *m = inlink->dst->priv;
  703. int i;
  704. double pts= MP_NOPTS_VALUE;
  705. mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height);
  706. if(inpic->pts != AV_NOPTS_VALUE)
  707. pts= inpic->pts / av_q2d(inlink->time_base);
  708. for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++);
  709. ff_mp_image_setfmt(mpi,conversion_map[i].fmt);
  710. memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes)));
  711. memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride)));
  712. //FIXME pass interleced & tff flags around
  713. // mpi->flags|=MP_IMGFLAG_ALLOCATED; ?
  714. mpi->flags |= MP_IMGFLAG_READABLE;
  715. if(!av_frame_is_writable(inpic))
  716. mpi->flags |= MP_IMGFLAG_PRESERVE;
  717. if(m->vf.put_image(&m->vf, mpi, pts) == 0){
  718. av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n");
  719. }else{
  720. av_frame_free(&inpic);
  721. }
  722. ff_free_mp_image(mpi);
  723. return 0;
  724. }
  725. static const AVFilterPad mp_inputs[] = {
  726. {
  727. .name = "default",
  728. .type = AVMEDIA_TYPE_VIDEO,
  729. .filter_frame = filter_frame,
  730. .config_props = config_inprops,
  731. },
  732. { NULL }
  733. };
  734. static const AVFilterPad mp_outputs[] = {
  735. {
  736. .name = "default",
  737. .type = AVMEDIA_TYPE_VIDEO,
  738. .request_frame = request_frame,
  739. .config_props = config_outprops,
  740. },
  741. { NULL }
  742. };
  743. AVFilter avfilter_vf_mp = {
  744. .name = "mp",
  745. .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
  746. .init = init,
  747. .uninit = uninit,
  748. .priv_size = sizeof(MPContext),
  749. .query_formats = query_formats,
  750. .inputs = mp_inputs,
  751. .outputs = mp_outputs,
  752. .priv_class = &mp_class,
  753. };