You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

528 lines
23KB

  1. /*
  2. * Copyright (c) 2017 Thomas Mundt <tmundt75@gmail.com>
  3. * Copyright (c) 2011 Stefano Sabatini
  4. * Copyright (c) 2010 Baptiste Coudurier
  5. * Copyright (c) 2003 Michael Zucchi <notzed@ximian.com>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along
  20. * with FFmpeg if not, write to the Free Software Foundation, Inc.,
  21. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  22. */
  23. /**
  24. * @file
  25. * temporal field interlace filter, ported from MPlayer/libmpcodecs
  26. */
  27. #include "libavutil/opt.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/avassert.h"
  30. #include "avfilter.h"
  31. #include "internal.h"
  32. #include "tinterlace.h"
  33. #define OFFSET(x) offsetof(TInterlaceContext, x)
  34. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  35. static const AVOption tinterlace_options[] = {
  36. {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_MERGE}, 0, MODE_NB-1, FLAGS, "mode"},
  37. {"merge", "merge fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGE}, INT_MIN, INT_MAX, FLAGS, "mode"},
  38. {"drop_even", "drop even fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_EVEN}, INT_MIN, INT_MAX, FLAGS, "mode"},
  39. {"drop_odd", "drop odd fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_DROP_ODD}, INT_MIN, INT_MAX, FLAGS, "mode"},
  40. {"pad", "pad alternate lines with black", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PAD}, INT_MIN, INT_MAX, FLAGS, "mode"},
  41. {"interleave_top", "interleave top and bottom fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_TOP}, INT_MIN, INT_MAX, FLAGS, "mode"},
  42. {"interleave_bottom", "interleave bottom and top fields", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLEAVE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "mode"},
  43. {"interlacex2", "interlace fields from two consecutive frames", 0, AV_OPT_TYPE_CONST, {.i64=MODE_INTERLACEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
  44. {"mergex2", "merge fields keeping same frame rate", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MERGEX2}, INT_MIN, INT_MAX, FLAGS, "mode"},
  45. {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, 0, INT_MAX, 0, "flags" },
  46. {"low_pass_filter", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
  47. {"vlpf", "enable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" },
  48. {"complex_filter", "enable complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" },
  49. {"cvlpf", "enable complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" },
  50. {"exact_tb", "force a timebase which can represent timestamps exactly", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_EXACT_TB}, INT_MIN, INT_MAX, FLAGS, "flags" },
  51. {NULL}
  52. };
  53. AVFILTER_DEFINE_CLASS(tinterlace);
  54. #define FULL_SCALE_YUVJ_FORMATS \
  55. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
  56. static const enum AVPixelFormat full_scale_yuvj_pix_fmts[] = {
  57. FULL_SCALE_YUVJ_FORMATS, AV_PIX_FMT_NONE
  58. };
  59. static const AVRational standard_tbs[] = {
  60. {1, 25},
  61. {1, 30},
  62. {1001, 30000},
  63. };
  64. static int query_formats(AVFilterContext *ctx)
  65. {
  66. static const enum AVPixelFormat pix_fmts[] = {
  67. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
  68. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
  69. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  70. AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV422P10LE,
  71. AV_PIX_FMT_YUV440P10LE, AV_PIX_FMT_YUV444P10LE,
  72. AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV422P12LE,
  73. AV_PIX_FMT_YUV440P12LE, AV_PIX_FMT_YUV444P12LE,
  74. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
  75. AV_PIX_FMT_YUVA420P10LE, AV_PIX_FMT_YUVA422P10LE, AV_PIX_FMT_YUVA444P10LE,
  76. AV_PIX_FMT_GRAY8, FULL_SCALE_YUVJ_FORMATS,
  77. AV_PIX_FMT_NONE
  78. };
  79. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  80. if (!fmts_list)
  81. return AVERROR(ENOMEM);
  82. return ff_set_common_formats(ctx, fmts_list);
  83. }
  84. static void lowpass_line_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp,
  85. ptrdiff_t mref, ptrdiff_t pref, int clip_max)
  86. {
  87. const uint8_t *srcp_above = srcp + mref;
  88. const uint8_t *srcp_below = srcp + pref;
  89. int i;
  90. for (i = 0; i < width; i++) {
  91. // this calculation is an integer representation of
  92. // '0.5 * current + 0.25 * above + 0.25 * below'
  93. // '1 +' is for rounding.
  94. dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
  95. }
  96. }
  97. static void lowpass_line_c_16(uint8_t *dst8, ptrdiff_t width, const uint8_t *src8,
  98. ptrdiff_t mref, ptrdiff_t pref, int clip_max)
  99. {
  100. uint16_t *dstp = (uint16_t *)dst8;
  101. const uint16_t *srcp = (const uint16_t *)src8;
  102. const uint16_t *srcp_above = srcp + mref / 2;
  103. const uint16_t *srcp_below = srcp + pref / 2;
  104. int i, src_x;
  105. for (i = 0; i < width; i++) {
  106. // this calculation is an integer representation of
  107. // '0.5 * current + 0.25 * above + 0.25 * below'
  108. // '1 +' is for rounding.
  109. src_x = av_le2ne16(srcp[i]) << 1;
  110. dstp[i] = av_le2ne16((1 + src_x + av_le2ne16(srcp_above[i])
  111. + av_le2ne16(srcp_below[i])) >> 2);
  112. }
  113. }
  114. static void lowpass_line_complex_c(uint8_t *dstp, ptrdiff_t width, const uint8_t *srcp,
  115. ptrdiff_t mref, ptrdiff_t pref, int clip_max)
  116. {
  117. const uint8_t *srcp_above = srcp + mref;
  118. const uint8_t *srcp_below = srcp + pref;
  119. const uint8_t *srcp_above2 = srcp + mref * 2;
  120. const uint8_t *srcp_below2 = srcp + pref * 2;
  121. int i, src_x, src_ab;
  122. for (i = 0; i < width; i++) {
  123. // this calculation is an integer representation of
  124. // '0.75 * current + 0.25 * above + 0.25 * below - 0.125 * above2 - 0.125 * below2'
  125. // '4 +' is for rounding.
  126. src_x = srcp[i] << 1;
  127. src_ab = srcp_above[i] + srcp_below[i];
  128. dstp[i] = av_clip_uint8((4 + ((srcp[i] + src_x + src_ab) << 1)
  129. - srcp_above2[i] - srcp_below2[i]) >> 3);
  130. // Prevent over-sharpening:
  131. // dst must not exceed src when the average of above and below
  132. // is less than src. And the other way around.
  133. if (src_ab > src_x) {
  134. if (dstp[i] < srcp[i])
  135. dstp[i] = srcp[i];
  136. } else if (dstp[i] > srcp[i])
  137. dstp[i] = srcp[i];
  138. }
  139. }
  140. static void lowpass_line_complex_c_16(uint8_t *dst8, ptrdiff_t width, const uint8_t *src8,
  141. ptrdiff_t mref, ptrdiff_t pref, int clip_max)
  142. {
  143. uint16_t *dstp = (uint16_t *)dst8;
  144. const uint16_t *srcp = (const uint16_t *)src8;
  145. const uint16_t *srcp_above = srcp + mref / 2;
  146. const uint16_t *srcp_below = srcp + pref / 2;
  147. const uint16_t *srcp_above2 = srcp + mref;
  148. const uint16_t *srcp_below2 = srcp + pref;
  149. int i, dst_le, src_le, src_x, src_ab;
  150. for (i = 0; i < width; i++) {
  151. // this calculation is an integer representation of
  152. // '0.75 * current + 0.25 * above + 0.25 * below - 0.125 * above2 - 0.125 * below2'
  153. // '4 +' is for rounding.
  154. src_le = av_le2ne16(srcp[i]);
  155. src_x = src_le << 1;
  156. src_ab = av_le2ne16(srcp_above[i]) + av_le2ne16(srcp_below[i]);
  157. dst_le = av_clip((4 + ((src_le + src_x + src_ab) << 1)
  158. - av_le2ne16(srcp_above2[i])
  159. - av_le2ne16(srcp_below2[i])) >> 3, 0, clip_max);
  160. // Prevent over-sharpening:
  161. // dst must not exceed src when the average of above and below
  162. // is less than src. And the other way around.
  163. if (src_ab > src_x) {
  164. if (dst_le < src_le)
  165. dstp[i] = av_le2ne16(src_le);
  166. else
  167. dstp[i] = av_le2ne16(dst_le);
  168. } else if (dst_le > src_le) {
  169. dstp[i] = av_le2ne16(src_le);
  170. } else
  171. dstp[i] = av_le2ne16(dst_le);
  172. }
  173. }
  174. static av_cold void uninit(AVFilterContext *ctx)
  175. {
  176. TInterlaceContext *tinterlace = ctx->priv;
  177. av_frame_free(&tinterlace->cur );
  178. av_frame_free(&tinterlace->next);
  179. av_freep(&tinterlace->black_data[0]);
  180. }
  181. static int config_out_props(AVFilterLink *outlink)
  182. {
  183. AVFilterContext *ctx = outlink->src;
  184. AVFilterLink *inlink = outlink->src->inputs[0];
  185. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
  186. TInterlaceContext *tinterlace = ctx->priv;
  187. int i;
  188. tinterlace->vsub = desc->log2_chroma_h;
  189. outlink->w = inlink->w;
  190. outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2?
  191. inlink->h*2 : inlink->h;
  192. if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2)
  193. outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
  194. av_make_q(2, 1));
  195. if (tinterlace->mode == MODE_PAD) {
  196. uint8_t black[4] = { 0, 0, 0, 16 };
  197. int ret;
  198. ff_draw_init(&tinterlace->draw, outlink->format, 0);
  199. ff_draw_color(&tinterlace->draw, &tinterlace->color, black);
  200. if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
  201. tinterlace->color.comp[0].u8[0] = 0;
  202. ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
  203. outlink->w, outlink->h, outlink->format, 16);
  204. if (ret < 0)
  205. return ret;
  206. ff_fill_rectangle(&tinterlace->draw, &tinterlace->color, tinterlace->black_data,
  207. tinterlace->black_linesize, 0, 0, outlink->w, outlink->h);
  208. }
  209. if (tinterlace->flags & (TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF)
  210. && !(tinterlace->mode == MODE_INTERLEAVE_TOP
  211. || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
  212. av_log(ctx, AV_LOG_WARNING, "low_pass_filter flags ignored with mode %d\n",
  213. tinterlace->mode);
  214. tinterlace->flags &= ~(TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF);
  215. }
  216. tinterlace->preout_time_base = inlink->time_base;
  217. if (tinterlace->mode == MODE_INTERLACEX2) {
  218. tinterlace->preout_time_base.den *= 2;
  219. outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
  220. outlink->time_base = av_mul_q(inlink->time_base , (AVRational){1,2});
  221. } else if (tinterlace->mode == MODE_MERGEX2) {
  222. outlink->frame_rate = inlink->frame_rate;
  223. outlink->time_base = inlink->time_base;
  224. } else if (tinterlace->mode != MODE_PAD) {
  225. outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
  226. outlink->time_base = av_mul_q(inlink->time_base , (AVRational){2,1});
  227. }
  228. for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
  229. if (!av_cmp_q(standard_tbs[i], outlink->time_base))
  230. break;
  231. }
  232. if (i == FF_ARRAY_ELEMS(standard_tbs) ||
  233. (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
  234. outlink->time_base = tinterlace->preout_time_base;
  235. tinterlace->csp = av_pix_fmt_desc_get(outlink->format);
  236. if (tinterlace->flags & TINTERLACE_FLAG_CVLPF) {
  237. if (tinterlace->csp->comp[0].depth > 8)
  238. tinterlace->lowpass_line = lowpass_line_complex_c_16;
  239. else
  240. tinterlace->lowpass_line = lowpass_line_complex_c;
  241. if (ARCH_X86)
  242. ff_tinterlace_init_x86(tinterlace);
  243. } else if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
  244. if (tinterlace->csp->comp[0].depth > 8)
  245. tinterlace->lowpass_line = lowpass_line_c_16;
  246. else
  247. tinterlace->lowpass_line = lowpass_line_c;
  248. if (ARCH_X86)
  249. ff_tinterlace_init_x86(tinterlace);
  250. }
  251. av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n", tinterlace->mode,
  252. (tinterlace->flags & TINTERLACE_FLAG_CVLPF) ? "complex" :
  253. (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "linear" : "off",
  254. inlink->h, outlink->h);
  255. return 0;
  256. }
  257. #define FIELD_UPPER 0
  258. #define FIELD_LOWER 1
  259. #define FIELD_UPPER_AND_LOWER 2
  260. /**
  261. * Copy picture field from src to dst.
  262. *
  263. * @param src_field copy from upper, lower field or both
  264. * @param interleave leave a padding line between each copied line
  265. * @param dst_field copy to upper or lower field,
  266. * only meaningful when interleave is selected
  267. * @param flags context flags
  268. */
  269. static inline
  270. void copy_picture_field(TInterlaceContext *tinterlace,
  271. uint8_t *dst[4], int dst_linesize[4],
  272. const uint8_t *src[4], int src_linesize[4],
  273. enum AVPixelFormat format, int w, int src_h,
  274. int src_field, int interleave, int dst_field,
  275. int flags)
  276. {
  277. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
  278. int hsub = desc->log2_chroma_w;
  279. int plane, vsub = desc->log2_chroma_h;
  280. int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
  281. int h;
  282. for (plane = 0; plane < desc->nb_components; plane++) {
  283. int lines = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(src_h, vsub) : src_h;
  284. int cols = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT( w, hsub) : w;
  285. uint8_t *dstp = dst[plane];
  286. const uint8_t *srcp = src[plane];
  287. int srcp_linesize = src_linesize[plane] * k;
  288. int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
  289. int clip_max = (1 << tinterlace->csp->comp[plane].depth) - 1;
  290. lines = (lines + (src_field == FIELD_UPPER)) / k;
  291. if (src_field == FIELD_LOWER)
  292. srcp += src_linesize[plane];
  293. if (interleave && dst_field == FIELD_LOWER)
  294. dstp += dst_linesize[plane];
  295. // Low-pass filtering is required when creating an interlaced destination from
  296. // a progressive source which contains high-frequency vertical detail.
  297. // Filtering will reduce interlace 'twitter' and Moire patterning.
  298. if (flags & (TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF)) {
  299. int x = !!(flags & TINTERLACE_FLAG_CVLPF);
  300. for (h = lines; h > 0; h--) {
  301. ptrdiff_t pref = src_linesize[plane];
  302. ptrdiff_t mref = -pref;
  303. if (h >= (lines - x)) mref = 0; // there is no line above
  304. else if (h <= (1 + x)) pref = 0; // there is no line below
  305. tinterlace->lowpass_line(dstp, cols, srcp, mref, pref, clip_max);
  306. dstp += dstp_linesize;
  307. srcp += srcp_linesize;
  308. }
  309. } else {
  310. if (tinterlace->csp->comp[plane].depth > 8)
  311. cols *= 2;
  312. av_image_copy_plane(dstp, dstp_linesize, srcp, srcp_linesize, cols, lines);
  313. }
  314. }
  315. }
  316. static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
  317. {
  318. AVFilterContext *ctx = inlink->dst;
  319. AVFilterLink *outlink = ctx->outputs[0];
  320. TInterlaceContext *tinterlace = ctx->priv;
  321. AVFrame *cur, *next, *out;
  322. int field, tff, ret;
  323. av_frame_free(&tinterlace->cur);
  324. tinterlace->cur = tinterlace->next;
  325. tinterlace->next = picref;
  326. cur = tinterlace->cur;
  327. next = tinterlace->next;
  328. /* we need at least two frames */
  329. if (!tinterlace->cur)
  330. return 0;
  331. switch (tinterlace->mode) {
  332. case MODE_MERGEX2: /* move the odd frame into the upper field of the new image, even into
  333. * the lower field, generating a double-height video at same framerate */
  334. case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
  335. * the lower field, generating a double-height video at half framerate */
  336. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  337. if (!out)
  338. return AVERROR(ENOMEM);
  339. av_frame_copy_props(out, cur);
  340. out->height = outlink->h;
  341. out->interlaced_frame = 1;
  342. out->top_field_first = 1;
  343. out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
  344. /* write odd frame lines into the upper field of the new frame */
  345. copy_picture_field(tinterlace, out->data, out->linesize,
  346. (const uint8_t **)cur->data, cur->linesize,
  347. inlink->format, inlink->w, inlink->h,
  348. FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? inlink->frame_count_out & 1 ? FIELD_LOWER : FIELD_UPPER : FIELD_UPPER, tinterlace->flags);
  349. /* write even frame lines into the lower field of the new frame */
  350. copy_picture_field(tinterlace, out->data, out->linesize,
  351. (const uint8_t **)next->data, next->linesize,
  352. inlink->format, inlink->w, inlink->h,
  353. FIELD_UPPER_AND_LOWER, 1, tinterlace->mode == MODE_MERGEX2 ? inlink->frame_count_out & 1 ? FIELD_UPPER : FIELD_LOWER : FIELD_LOWER, tinterlace->flags);
  354. if (tinterlace->mode != MODE_MERGEX2)
  355. av_frame_free(&tinterlace->next);
  356. break;
  357. case MODE_DROP_ODD: /* only output even frames, odd frames are dropped; height unchanged, half framerate */
  358. case MODE_DROP_EVEN: /* only output odd frames, even frames are dropped; height unchanged, half framerate */
  359. out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
  360. if (!out)
  361. return AVERROR(ENOMEM);
  362. av_frame_free(&tinterlace->next);
  363. break;
  364. case MODE_PAD: /* expand each frame to double height, but pad alternate
  365. * lines with black; framerate unchanged */
  366. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  367. if (!out)
  368. return AVERROR(ENOMEM);
  369. av_frame_copy_props(out, cur);
  370. out->height = outlink->h;
  371. out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));
  372. field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
  373. /* copy upper and lower fields */
  374. copy_picture_field(tinterlace, out->data, out->linesize,
  375. (const uint8_t **)cur->data, cur->linesize,
  376. inlink->format, inlink->w, inlink->h,
  377. FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
  378. /* pad with black the other field */
  379. copy_picture_field(tinterlace, out->data, out->linesize,
  380. (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
  381. inlink->format, inlink->w, inlink->h,
  382. FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
  383. break;
  384. /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
  385. * halving the frame rate and preserving image height */
  386. case MODE_INTERLEAVE_TOP: /* top field first */
  387. case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
  388. tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
  389. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  390. if (!out)
  391. return AVERROR(ENOMEM);
  392. av_frame_copy_props(out, cur);
  393. out->interlaced_frame = 1;
  394. out->top_field_first = tff;
  395. /* copy upper/lower field from cur */
  396. copy_picture_field(tinterlace, out->data, out->linesize,
  397. (const uint8_t **)cur->data, cur->linesize,
  398. inlink->format, inlink->w, inlink->h,
  399. tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
  400. tinterlace->flags);
  401. /* copy lower/upper field from next */
  402. copy_picture_field(tinterlace, out->data, out->linesize,
  403. (const uint8_t **)next->data, next->linesize,
  404. inlink->format, inlink->w, inlink->h,
  405. tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
  406. tinterlace->flags);
  407. av_frame_free(&tinterlace->next);
  408. break;
  409. case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
  410. /* output current frame first */
  411. out = av_frame_clone(cur);
  412. if (!out)
  413. return AVERROR(ENOMEM);
  414. out->interlaced_frame = 1;
  415. if (cur->pts != AV_NOPTS_VALUE)
  416. out->pts = cur->pts*2;
  417. out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
  418. if ((ret = ff_filter_frame(outlink, out)) < 0)
  419. return ret;
  420. /* output mix of current and next frame */
  421. tff = next->top_field_first;
  422. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  423. if (!out)
  424. return AVERROR(ENOMEM);
  425. av_frame_copy_props(out, next);
  426. out->interlaced_frame = 1;
  427. out->top_field_first = !tff;
  428. if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
  429. out->pts = cur->pts + next->pts;
  430. else
  431. out->pts = AV_NOPTS_VALUE;
  432. /* write current frame second field lines into the second field of the new frame */
  433. copy_picture_field(tinterlace, out->data, out->linesize,
  434. (const uint8_t **)cur->data, cur->linesize,
  435. inlink->format, inlink->w, inlink->h,
  436. tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
  437. tinterlace->flags);
  438. /* write next frame first field lines into the first field of the new frame */
  439. copy_picture_field(tinterlace, out->data, out->linesize,
  440. (const uint8_t **)next->data, next->linesize,
  441. inlink->format, inlink->w, inlink->h,
  442. tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
  443. tinterlace->flags);
  444. break;
  445. default:
  446. av_assert0(0);
  447. }
  448. out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
  449. ret = ff_filter_frame(outlink, out);
  450. tinterlace->frame++;
  451. return ret;
  452. }
  453. static const AVFilterPad tinterlace_inputs[] = {
  454. {
  455. .name = "default",
  456. .type = AVMEDIA_TYPE_VIDEO,
  457. .filter_frame = filter_frame,
  458. },
  459. { NULL }
  460. };
  461. static const AVFilterPad tinterlace_outputs[] = {
  462. {
  463. .name = "default",
  464. .type = AVMEDIA_TYPE_VIDEO,
  465. .config_props = config_out_props,
  466. },
  467. { NULL }
  468. };
  469. AVFilter ff_vf_tinterlace = {
  470. .name = "tinterlace",
  471. .description = NULL_IF_CONFIG_SMALL("Perform temporal field interlacing."),
  472. .priv_size = sizeof(TInterlaceContext),
  473. .uninit = uninit,
  474. .query_formats = query_formats,
  475. .inputs = tinterlace_inputs,
  476. .outputs = tinterlace_outputs,
  477. .priv_class = &tinterlace_class,
  478. };