You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

468 lines
21KB

  1. /*
  2. * Copyright (c) 2017 Thomas Mundt <tmundt75@gmail.com>
  3. * Copyright (c) 2011 Stefano Sabatini
  4. * Copyright (c) 2010 Baptiste Coudurier
  5. * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along
  20. * with FFmpeg if not, write to the Free Software Foundation, Inc.,
  21. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. */
  23. /**
  24. * @file
  25. * temporal field interlace filter, ported from MPlayer/libmpcodecs
  26. */
  27. #include "libavutil/opt.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/avassert.h"
  30. #include "avfilter.h"
  31. #include "internal.h"
  32. #include "tinterlace.h"
  33. #define OFFSET(x) offsetof(TInterlaceContext, x)
  34. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  35. static const AVOption tinterlace_options[] = {
  36. {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_MERGE}, 0, MODE_NB-1, FLAGS, "mode"},
  37. {"merge", "merge fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGE}, INT_MIN, INT_MAX, FLAGS, "mode"},
  38. {"drop_even", "drop even fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_EVEN}, INT_MIN, INT_MAX, FLAGS, "mode"},
  39. {"drop_odd", "drop odd fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_ODD}, INT_MIN, INT_MAX, FLAGS, "mode"},
  40. {"pad", "pad alternate lines with black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PAD}, INT_MIN, INT_MAX, FLAGS, "mode"},
  41. {"interleave_top", "interleave top and bottom fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_TOP}, INT_MIN, INT_MAX, FLAGS, "mode"},
  42. {"interleave_bottom", "interleave bottom and top fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "mode"},
  43. {"interlacex2", "interlace fields from two consecutive frames", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLACEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
  44. {"mergex2", "merge fields keeping same frame rate", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
  45. {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, 0, "flags" },
  46. {"low_pass_filter", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
  47. {"vlpf", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
  48. {"complex_filter", "enable complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" },
  49. {"cvlpf", "enable complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" },
  50. {"exact_tb", "force a timebase which can represent timestamps exactly", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_EXACT_TB}, INT_MIN, INT_MAX, FLAGS, "flags" },
  51. {NULL}
  52. };
  53. AVFILTER_DEFINE_CLASS(tinterlace);
  54. #define FULL_SCALE_YUVJ_FORMATS \
  55. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
  56. static const enum AVPixelFormat full_scale_yuvj_pix_fmts[] = {
  57. FULL_SCALE_YUVJ_FORMATS, AV_PIX_FMT_NONE
  58. };
  59. static const AVRational standard_tbs[] = {
  60. {1, 25},
  61. {1, 30},
  62. {1001, 30000},
  63. };
  64. static int query_formats(AVFilterContext *ctx)
  65. {
  66. static const enum AVPixelFormat pix_fmts[] = {
  67. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
  68. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
  69. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  70. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
  71. AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
  72. AV_PIX_FMT_NONE
  73. };
  74. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  75. if (!fmts_list)
  76. return AVERROR(ENOMEM);
  77. return ff_set_common_formats(ctx, fmts_list);
  78. }
  79. static void lowpass_line_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp,
  80. ptrdiff_t mref, ptrdiff_t pref)
  81. {
  82. const uint8_t *srcp_above = srcp + mref;
  83. const uint8_t *srcp_below = srcp + pref;
  84. int i;
  85. for (i = 0; i < width; i++) {
  86. // this calculation is an integer representation of
  87. // '0.5 * current + 0.25 * above + 0.25 * below'
  88. // '1 +' is for rounding.
  89. dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
  90. }
  91. }
  92. static void lowpass_line_complex_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp,
  93. ptrdiff_t mref, ptrdiff_t pref)
  94. {
  95. const uint8_t *srcp_above = srcp + mref;
  96. const uint8_t *srcp_below = srcp + pref;
  97. const uint8_t *srcp_above2 = srcp + mref * 2;
  98. const uint8_t *srcp_below2 = srcp + pref * 2;
  99. int i;
  100. for (i = 0; i < width; i++) {
  101. // this calculation is an integer representation of
  102. // '0.75 * current + 0.25 * above + 0.25 * below - 0.125 * above2 - 0.125 * below2'
  103. // '4 +' is for rounding.
  104. dstp[i] = av_clip_uint8((4 + (srcp[i] << 2)
  105. + ((srcp[i] + srcp_above[i] + srcp_below[i]) << 1)
  106. - srcp_above2[i] - srcp_below2[i]) >> 3);
  107. }
  108. }
  109. static av_cold void uninit(AVFilterContext *ctx)
  110. {
  111. TInterlaceContext *tinterlace = ctx->priv;
  112. av_frame_free(&tinterlace->cur );
  113. av_frame_free(&tinterlace->next);
  114. av_freep(&tinterlace->black_data[0]);
  115. }
  116. static int config_out_props(AVFilterLink *outlink)
  117. {
  118. AVFilterContext *ctx = outlink->src;
  119. AVFilterLink *inlink = outlink->src->inputs[0];
  120. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
  121. TInterlaceContext *tinterlace = ctx->priv;
  122. int i;
  123. tinterlace->vsub = desc->log2_chroma_h;
  124. outlink->w = inlink->w;
  125. outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2?
  126. inlink->h*2 : inlink->h;
  127. if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2)
  128. outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
  129. av_make_q(2, 1));
  130. if (tinterlace->mode == MODE_PAD) {
  131. uint8_t black[4] = { 16, 128, 128, 16 };
  132. int i, ret;
  133. if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
  134. black[0] = black[3] = 0;
  135. ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
  136. outlink->w, outlink->h, outlink->format, 16);
  137. if (ret < 0)
  138. return ret;
  139. /* fill black picture with black */
  140. for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
  141. int h = i == 1 || i == 2 ? AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
  142. memset(tinterlace->black_data[i], black[i],
  143. tinterlace->black_linesize[i] * h);
  144. }
  145. }
  146. if ((tinterlace->flags & TINTERLACE_FLAG_VLPF
  147. || tinterlace->flags & TINTERLACE_FLAG_CVLPF)
  148. && !(tinterlace->mode == MODE_INTERLEAVE_TOP
  149. || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
  150. av_log(ctx, AV_LOG_WARNING, "low_pass_filter flags ignored with mode %d\n",
  151. tinterlace->mode);
  152. tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
  153. tinterlace->flags &= ~TINTERLACE_FLAG_CVLPF;
  154. }
  155. tinterlace->preout_time_base = inlink->time_base;
  156. if (tinterlace->mode == MODE_INTERLACEX2) {
  157. tinterlace->preout_time_base.den *= 2;
  158. outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
  159. outlink->time_base = av_mul_q(inlink->time_base , (AVRational){1,2});
  160. } else if (tinterlace->mode == MODE_MERGEX2) {
  161. outlink->frame_rate = inlink->frame_rate;
  162. outlink->time_base = inlink->time_base;
  163. } else if (tinterlace->mode != MODE_PAD) {
  164. outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
  165. outlink->time_base = av_mul_q(inlink->time_base , (AVRational){2,1});
  166. }
  167. for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
  168. if (!av_cmp_q(standard_tbs[i], outlink->time_base))
  169. break;
  170. }
  171. if (i == FF_ARRAY_ELEMS(standard_tbs) ||
  172. (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
  173. outlink->time_base = tinterlace->preout_time_base;
  174. if (tinterlace->flags & TINTERLACE_FLAG_CVLPF) {
  175. tinterlace->lowpass_line = lowpass_line_complex_c;
  176. if (ARCH_X86)
  177. ff_tinterlace_init_x86(tinterlace);
  178. } else if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
  179. tinterlace->lowpass_line = lowpass_line_c;
  180. if (ARCH_X86)
  181. ff_tinterlace_init_x86(tinterlace);
  182. }
  183. av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n", tinterlace->mode,
  184. (tinterlace->flags & TINTERLACE_FLAG_CVLPF) ? "complex" :
  185. (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "linear" : "off",
  186. inlink->h, outlink->h);
  187. return 0;
  188. }
  189. #define FIELD_UPPER 0
  190. #define FIELD_LOWER 1
  191. #define FIELD_UPPER_AND_LOWER 2
  192. /**
  193. * Copy picture field from src to dst.
  194. *
  195. * @param src_field copy from upper, lower field or both
  196. * @param interleave leave a padding line between each copied line
  197. * @param dst_field copy to upper or lower field,
  198. * only meaningful when interleave is selected
  199. * @param flags context flags
  200. */
  201. static inline
  202. void copy_picture_field(TInterlaceContext *tinterlace,
  203. uint8_t *dst[4], int dst_linesize[4],
  204. const uint8_t *src[4], int src_linesize[4],
  205. enum AVPixelFormat format, int w, int src_h,
  206. int src_field, int interleave, int dst_field,
  207. int flags)
  208. {
  209. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
  210. int hsub = desc->log2_chroma_w;
  211. int plane, vsub = desc->log2_chroma_h;
  212. int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
  213. int h;
  214. for (plane = 0; plane < desc->nb_components; plane++) {
  215. int lines = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(src_h, vsub) : src_h;
  216. int cols = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT( w, hsub) : w;
  217. uint8_t *dstp = dst[plane];
  218. const uint8_t *srcp = src[plane];
  219. lines = (lines + (src_field == FIELD_UPPER)) / k;
  220. if (src_field == FIELD_LOWER)
  221. srcp += src_linesize[plane];
  222. if (interleave && dst_field == FIELD_LOWER)
  223. dstp += dst_linesize[plane];
  224. // Low-pass filtering is required when creating an interlaced destination from
  225. // a progressive source which contains high-frequency vertical detail.
  226. // Filtering will reduce interlace 'twitter' and Moire patterning.
  227. if (flags & TINTERLACE_FLAG_CVLPF) {
  228. int srcp_linesize = src_linesize[plane] * k;
  229. int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
  230. for (h = lines; h > 0; h--) {
  231. ptrdiff_t pref = src_linesize[plane];
  232. ptrdiff_t mref = -pref;
  233. if (h >= (lines - 1)) mref = 0;
  234. else if (h <= 2) pref = 0;
  235. tinterlace->lowpass_line(dstp, cols, srcp, mref, pref);
  236. dstp += dstp_linesize;
  237. srcp += srcp_linesize;
  238. }
  239. } else if (flags & TINTERLACE_FLAG_VLPF) {
  240. int srcp_linesize = src_linesize[plane] * k;
  241. int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
  242. for (h = lines; h > 0; h--) {
  243. ptrdiff_t pref = src_linesize[plane];
  244. ptrdiff_t mref = -pref;
  245. if (h == lines) mref = 0; // there is no line above
  246. else if (h == 1) pref = 0; // there is no line below
  247. tinterlace->lowpass_line(dstp, cols, srcp, mref, pref);
  248. dstp += dstp_linesize;
  249. srcp += srcp_linesize;
  250. }
  251. } else {
  252. av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
  253. srcp, src_linesize[plane]*k, cols, lines);
  254. }
  255. }
  256. }
  257. static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
  258. {
  259. AVFilterContext *ctx = inlink->dst;
  260. AVFilterLink *outlink = ctx->outputs[0];
  261. TInterlaceContext *tinterlace = ctx->priv;
  262. AVFrame *cur, *next, *out;
  263. int field, tff, ret;
  264. av_frame_free(&tinterlace->cur);
  265. tinterlace->cur = tinterlace->next;
  266. tinterlace->next = picref;
  267. cur = tinterlace->cur;
  268. next = tinterlace->next;
  269. /* we need at least two frames */
  270. if (!tinterlace->cur)
  271. return 0;
  272. switch (tinterlace->mode) {
  273. case MODE_MERGEX2: /* move the odd frame into the upper field of the new image, even into
  274. * the lower field, generating a double-height video at same framerate */
  275. case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
  276. * the lower field, generating a double-height video at half framerate */
  277. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  278. if (!out)
  279. return AVERROR(ENOMEM);
  280. av_frame_copy_props(out, cur);
  281. out->height = outlink->h;
  282. out->interlaced_frame = 1;
  283. out->top_field_first = 1;
  284. out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
  285. /* write odd frame lines into the upper field of the new frame */
  286. copy_picture_field(tinterlace, out->data, out->linesize,
  287. (const uint8_t **)cur->data, cur->linesize,
  288. inlink->format, inlink->w, inlink->h,
  289. FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? inlink->frame_count_out & 1 ? FIELD_LOWER : FIELD_UPPER : FIELD_UPPER, tinterlace->flags);
  290. /* write even frame lines into the lower field of the new frame */
  291. copy_picture_field(tinterlace, out->data, out->linesize,
  292. (const uint8_t **)next->data, next->linesize,
  293. inlink->format, inlink->w, inlink->h,
  294. FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? inlink->frame_count_out & 1 ? FIELD_UPPER : FIELD_LOWER : FIELD_LOWER, tinterlace->flags);
  295. if (tinterlace->mode != MODE_MERGEX2)
  296. av_frame_free(&tinterlace->next);
  297. break;
  298. case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
  299. case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
  300. out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
  301. if (!out)
  302. return AVERROR(ENOMEM);
  303. av_frame_free(&tinterlace->next);
  304. break;
  305. case MODE_PAD: /* expand each frame to double height, but pad alternate
  306. * lines with black; framerate unchanged */
  307. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  308. if (!out)
  309. return AVERROR(ENOMEM);
  310. av_frame_copy_props(out, cur);
  311. out->height = outlink->h;
  312. out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
  313. field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
  314. /* copy upper and lower fields */
  315. copy_picture_field(tinterlace, out->data, out->linesize,
  316. (const uint8_t **)cur->data, cur->linesize,
  317. inlink->format, inlink->w, inlink->h,
  318. FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
  319. /* pad with black the other field */
  320. copy_picture_field(tinterlace, out->data, out->linesize,
  321. (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
  322. inlink->format, inlink->w, inlink->h,
  323. FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
  324. break;
  325. /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
  326. * halving the frame rate and preserving image height */
  327. case MODE_INTERLEAVE_TOP: /* top field first */
  328. case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
  329. tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
  330. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  331. if (!out)
  332. return AVERROR(ENOMEM);
  333. av_frame_copy_props(out, cur);
  334. out->interlaced_frame = 1;
  335. out->top_field_first = tff;
  336. /* copy upper/lower field from cur */
  337. copy_picture_field(tinterlace, out->data, out->linesize,
  338. (const uint8_t **)cur->data, cur->linesize,
  339. inlink->format, inlink->w, inlink->h,
  340. tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
  341. tinterlace->flags);
  342. /* copy lower/upper field from next */
  343. copy_picture_field(tinterlace, out->data, out->linesize,
  344. (const uint8_t **)next->data, next->linesize,
  345. inlink->format, inlink->w, inlink->h,
  346. tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
  347. tinterlace->flags);
  348. av_frame_free(&tinterlace->next);
  349. break;
  350. case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
  351. /* output current frame first */
  352. out = av_frame_clone(cur);
  353. if (!out)
  354. return AVERROR(ENOMEM);
  355. out->interlaced_frame = 1;
  356. if (cur->pts != AV_NOPTS_VALUE)
  357. out->pts = cur->pts*2;
  358. out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
  359. if ((ret = ff_filter_frame(outlink, out)) < 0)
  360. return ret;
  361. /* output mix of current and next frame */
  362. tff = next->top_field_first;
  363. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  364. if (!out)
  365. return AVERROR(ENOMEM);
  366. av_frame_copy_props(out, next);
  367. out->interlaced_frame = 1;
  368. out->top_field_first = !tff;
  369. if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
  370. out->pts = cur->pts + next->pts;
  371. else
  372. out->pts = AV_NOPTS_VALUE;
  373. /* write current frame second field lines into the second field of the new frame */
  374. copy_picture_field(tinterlace, out->data, out->linesize,
  375. (const uint8_t **)cur->data, cur->linesize,
  376. inlink->format, inlink->w, inlink->h,
  377. tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
  378. tinterlace->flags);
  379. /* write next frame first field lines into the first field of the new frame */
  380. copy_picture_field(tinterlace, out->data, out->linesize,
  381. (const uint8_t **)next->data, next->linesize,
  382. inlink->format, inlink->w, inlink->h,
  383. tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
  384. tinterlace->flags);
  385. break;
  386. default:
  387. av_assert0(0);
  388. }
  389. out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
  390. ret = ff_filter_frame(outlink, out);
  391. tinterlace->frame++;
  392. return ret;
  393. }
  394. static const AVFilterPad tinterlace_inputs[] = {
  395. {
  396. .name = "default",
  397. .type = AVMEDIA_TYPE_VIDEO,
  398. .filter_frame = filter_frame,
  399. },
  400. { NULL }
  401. };
  402. static const AVFilterPad tinterlace_outputs[] = {
  403. {
  404. .name = "default",
  405. .type = AVMEDIA_TYPE_VIDEO,
  406. .config_props = config_out_props,
  407. },
  408. { NULL }
  409. };
  410. AVFilter ff_vf_tinterlace = {
  411. .name = "tinterlace",
  412. .description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
  413. .priv_size = sizeof(TInterlaceContext),
  414. .uninit = uninit,
  415. .query_formats = query_formats,
  416. .inputs = tinterlace_inputs,
  417. .outputs = tinterlace_outputs,
  418. .priv_class = &tinterlace_class,
  419. };