You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1056 lines
42KB

  1. /*
  2. * Copyright (c) 2012 Fredrik Mellbin
  3. * Copyright (c) 2013 Clément Bœsch
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Fieldmatching filter, ported from VFM filter (VapourSynth) by Clément.
  24. * Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a
  25. * light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone
  26. * (tritical), the original author.
  27. *
  28. * @see http://bengal.missouri.edu/~kes25c/
  29. * @see http://www.vapoursynth.com/about/
  30. */
  31. #include <inttypes.h>
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/timestamp.h"
  36. #include "avfilter.h"
  37. #include "filters.h"
  38. #include "internal.h"
  39. #define INPUT_MAIN 0
  40. #define INPUT_CLEANSRC 1
  41. enum fieldmatch_parity {
  42. FM_PARITY_AUTO = -1,
  43. FM_PARITY_BOTTOM = 0,
  44. FM_PARITY_TOP = 1,
  45. };
  46. enum matching_mode {
  47. MODE_PC,
  48. MODE_PC_N,
  49. MODE_PC_U,
  50. MODE_PC_N_UB,
  51. MODE_PCN,
  52. MODE_PCN_UB,
  53. NB_MODE
  54. };
  55. enum comb_matching_mode {
  56. COMBMATCH_NONE,
  57. COMBMATCH_SC,
  58. COMBMATCH_FULL,
  59. NB_COMBMATCH
  60. };
  61. enum comb_dbg {
  62. COMBDBG_NONE,
  63. COMBDBG_PCN,
  64. COMBDBG_PCNUB,
  65. NB_COMBDBG
  66. };
  67. typedef struct FieldMatchContext {
  68. const AVClass *class;
  69. AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
  70. AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
  71. int got_frame[2]; ///< frame request flag for each input stream
  72. int hsub, vsub; ///< chroma subsampling values
  73. int bpc; ///< bytes per component
  74. uint32_t eof; ///< bitmask for end of stream
  75. int64_t lastscdiff;
  76. int64_t lastn;
  77. /* options */
  78. int order;
  79. int ppsrc;
  80. int mode; ///< matching_mode
  81. int field;
  82. int mchroma;
  83. int y0, y1;
  84. int64_t scthresh;
  85. double scthresh_flt;
  86. int combmatch; ///< comb_matching_mode
  87. int combdbg;
  88. int cthresh;
  89. int chroma;
  90. int blockx, blocky;
  91. int combpel;
  92. /* misc buffers */
  93. uint8_t *map_data[4];
  94. int map_linesize[4];
  95. uint8_t *cmask_data[4];
  96. int cmask_linesize[4];
  97. int *c_array;
  98. int tpitchy, tpitchuv;
  99. uint8_t *tbuffer;
  100. } FieldMatchContext;
  101. #define OFFSET(x) offsetof(FieldMatchContext, x)
  102. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  103. static const AVOption fieldmatch_options[] = {
  104. { "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" },
  105. { "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" },
  106. { "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" },
  107. { "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" },
  108. { "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" },
  109. { "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" },
  110. { "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" },
  111. { "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" },
  112. { "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
  113. { "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" },
  114. { "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
  115. { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  116. { "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" },
  117. { "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" },
  118. { "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" },
  119. { "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" },
  120. { "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
  121. { "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  122. { "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  123. { "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS },
  124. { "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" },
  125. { "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
  126. { "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
  127. { "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
  128. { "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" },
  129. { "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
  130. { "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
  131. { "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
  132. { "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
  133. { "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_BOOL,{.i64= 0}, 0, 1, FLAGS },
  134. { "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
  135. { "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
  136. { "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
  137. { NULL }
  138. };
  139. AVFILTER_DEFINE_CLASS(fieldmatch);
  140. static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
  141. {
  142. return plane ? AV_CEIL_RSHIFT(f->width, fm->hsub) : f->width;
  143. }
  144. static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
  145. {
  146. return plane ? AV_CEIL_RSHIFT(f->height, fm->vsub) : f->height;
  147. }
  148. static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
  149. {
  150. int x, y;
  151. const uint8_t *srcp1 = f1->data[0];
  152. const uint8_t *srcp2 = f2->data[0];
  153. const int src1_linesize = f1->linesize[0];
  154. const int src2_linesize = f2->linesize[0];
  155. const int width = f1->width;
  156. const int height = f1->height;
  157. int64_t acc = 0;
  158. for (y = 0; y < height; y++) {
  159. for (x = 0; x < width; x++)
  160. acc += abs(srcp1[x] - srcp2[x]);
  161. srcp1 += src1_linesize;
  162. srcp2 += src2_linesize;
  163. }
  164. return acc;
  165. }
  166. static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
  167. {
  168. int y;
  169. for (y = 0; y < h; y++) {
  170. memset(data, v, w);
  171. data += linesize;
  172. }
  173. }
  174. static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
  175. {
  176. int x, y, plane, max_v = 0;
  177. const int cthresh = fm->cthresh;
  178. const int cthresh6 = cthresh * 6;
  179. for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) {
  180. const uint8_t *srcp = src->data[plane];
  181. const int src_linesize = src->linesize[plane];
  182. const int width = get_width (fm, src, plane);
  183. const int height = get_height(fm, src, plane);
  184. uint8_t *cmkp = fm->cmask_data[plane];
  185. const int cmk_linesize = fm->cmask_linesize[plane];
  186. if (cthresh < 0) {
  187. fill_buf(cmkp, width, height, cmk_linesize, 0xff);
  188. continue;
  189. }
  190. fill_buf(cmkp, width, height, cmk_linesize, 0);
  191. /* [1 -3 4 -3 1] vertical filter */
  192. #define FILTER(xm2, xm1, xp1, xp2) \
  193. abs( 4 * srcp[x] \
  194. -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
  195. + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
  196. /* first line */
  197. for (x = 0; x < width; x++) {
  198. const int s1 = abs(srcp[x] - srcp[x + src_linesize]);
  199. if (s1 > cthresh && FILTER(2, 1, 1, 2))
  200. cmkp[x] = 0xff;
  201. }
  202. srcp += src_linesize;
  203. cmkp += cmk_linesize;
  204. /* second line */
  205. for (x = 0; x < width; x++) {
  206. const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
  207. const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
  208. if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2))
  209. cmkp[x] = 0xff;
  210. }
  211. srcp += src_linesize;
  212. cmkp += cmk_linesize;
  213. /* all lines minus first two and last two */
  214. for (y = 2; y < height-2; y++) {
  215. for (x = 0; x < width; x++) {
  216. const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
  217. const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
  218. if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2))
  219. cmkp[x] = 0xff;
  220. }
  221. srcp += src_linesize;
  222. cmkp += cmk_linesize;
  223. }
  224. /* before-last line */
  225. for (x = 0; x < width; x++) {
  226. const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
  227. const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
  228. if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2))
  229. cmkp[x] = 0xff;
  230. }
  231. srcp += src_linesize;
  232. cmkp += cmk_linesize;
  233. /* last line */
  234. for (x = 0; x < width; x++) {
  235. const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
  236. if (s1 > cthresh && FILTER(-2, -1, -1, -2))
  237. cmkp[x] = 0xff;
  238. }
  239. }
  240. if (fm->chroma) {
  241. uint8_t *cmkp = fm->cmask_data[0];
  242. uint8_t *cmkpU = fm->cmask_data[1];
  243. uint8_t *cmkpV = fm->cmask_data[2];
  244. const int width = AV_CEIL_RSHIFT(src->width, fm->hsub);
  245. const int height = AV_CEIL_RSHIFT(src->height, fm->vsub);
  246. const int cmk_linesize = fm->cmask_linesize[0] << 1;
  247. const int cmk_linesizeUV = fm->cmask_linesize[2];
  248. uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
  249. uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
  250. uint8_t *cmkpnn = cmkp + cmk_linesize;
  251. for (y = 1; y < height - 1; y++) {
  252. cmkpp += cmk_linesize;
  253. cmkp += cmk_linesize;
  254. cmkpn += cmk_linesize;
  255. cmkpnn += cmk_linesize;
  256. cmkpV += cmk_linesizeUV;
  257. cmkpU += cmk_linesizeUV;
  258. for (x = 1; x < width - 1; x++) {
  259. #define HAS_FF_AROUND(p, lz) (p[(x)-1 - (lz)] == 0xff || p[(x) - (lz)] == 0xff || p[(x)+1 - (lz)] == 0xff || \
  260. p[(x)-1 ] == 0xff || p[(x)+1 ] == 0xff || \
  261. p[(x)-1 + (lz)] == 0xff || p[(x) + (lz)] == 0xff || p[(x)+1 + (lz)] == 0xff)
  262. if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
  263. (cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
  264. ((uint16_t*)cmkp)[x] = 0xffff;
  265. ((uint16_t*)cmkpn)[x] = 0xffff;
  266. if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
  267. else ((uint16_t*)cmkpnn)[x] = 0xffff;
  268. }
  269. }
  270. }
  271. }
  272. {
  273. const int blockx = fm->blockx;
  274. const int blocky = fm->blocky;
  275. const int xhalf = blockx/2;
  276. const int yhalf = blocky/2;
  277. const int cmk_linesize = fm->cmask_linesize[0];
  278. const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize;
  279. const int width = src->width;
  280. const int height = src->height;
  281. const int xblocks = ((width+xhalf)/blockx) + 1;
  282. const int xblocks4 = xblocks<<2;
  283. const int yblocks = ((height+yhalf)/blocky) + 1;
  284. int *c_array = fm->c_array;
  285. const int arraysize = (xblocks*yblocks)<<2;
  286. int heighta = (height/(blocky/2))*(blocky/2);
  287. const int widtha = (width /(blockx/2))*(blockx/2);
  288. if (heighta == height)
  289. heighta = height - yhalf;
  290. memset(c_array, 0, arraysize * sizeof(*c_array));
  291. #define C_ARRAY_ADD(v) do { \
  292. const int box1 = (x / blockx) * 4; \
  293. const int box2 = ((x + xhalf) / blockx) * 4; \
  294. c_array[temp1 + box1 ] += v; \
  295. c_array[temp1 + box2 + 1] += v; \
  296. c_array[temp2 + box1 + 2] += v; \
  297. c_array[temp2 + box2 + 3] += v; \
  298. } while (0)
  299. #define VERTICAL_HALF(y_start, y_end) do { \
  300. for (y = y_start; y < y_end; y++) { \
  301. const int temp1 = (y / blocky) * xblocks4; \
  302. const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
  303. for (x = 0; x < width; x++) \
  304. if (cmkp[x - cmk_linesize] == 0xff && \
  305. cmkp[x ] == 0xff && \
  306. cmkp[x + cmk_linesize] == 0xff) \
  307. C_ARRAY_ADD(1); \
  308. cmkp += cmk_linesize; \
  309. } \
  310. } while (0)
  311. VERTICAL_HALF(1, yhalf);
  312. for (y = yhalf; y < heighta; y += yhalf) {
  313. const int temp1 = (y / blocky) * xblocks4;
  314. const int temp2 = ((y + yhalf) / blocky) * xblocks4;
  315. for (x = 0; x < widtha; x += xhalf) {
  316. const uint8_t *cmkp_tmp = cmkp + x;
  317. int u, v, sum = 0;
  318. for (u = 0; u < yhalf; u++) {
  319. for (v = 0; v < xhalf; v++)
  320. if (cmkp_tmp[v - cmk_linesize] == 0xff &&
  321. cmkp_tmp[v ] == 0xff &&
  322. cmkp_tmp[v + cmk_linesize] == 0xff)
  323. sum++;
  324. cmkp_tmp += cmk_linesize;
  325. }
  326. if (sum)
  327. C_ARRAY_ADD(sum);
  328. }
  329. for (x = widtha; x < width; x++) {
  330. const uint8_t *cmkp_tmp = cmkp + x;
  331. int u, sum = 0;
  332. for (u = 0; u < yhalf; u++) {
  333. if (cmkp_tmp[-cmk_linesize] == 0xff &&
  334. cmkp_tmp[ 0] == 0xff &&
  335. cmkp_tmp[ cmk_linesize] == 0xff)
  336. sum++;
  337. cmkp_tmp += cmk_linesize;
  338. }
  339. if (sum)
  340. C_ARRAY_ADD(sum);
  341. }
  342. cmkp += cmk_linesize * yhalf;
  343. }
  344. VERTICAL_HALF(heighta, height - 1);
  345. for (x = 0; x < arraysize; x++)
  346. if (c_array[x] > max_v)
  347. max_v = c_array[x];
  348. }
  349. return max_v;
  350. }
  351. // the secret is that tbuffer is an interlaced, offset subset of all the lines
  352. static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize,
  353. const uint8_t *nxtp, int nxt_linesize,
  354. uint8_t *tbuffer, int tbuf_linesize,
  355. int width, int height)
  356. {
  357. int y, x;
  358. prvp -= prv_linesize;
  359. nxtp -= nxt_linesize;
  360. for (y = 0; y < height; y++) {
  361. for (x = 0; x < width; x++)
  362. tbuffer[x] = FFABS(prvp[x] - nxtp[x]);
  363. prvp += prv_linesize;
  364. nxtp += nxt_linesize;
  365. tbuffer += tbuf_linesize;
  366. }
  367. }
  368. /**
  369. * Build a map over which pixels differ a lot/a little
  370. */
  371. static void build_diff_map(FieldMatchContext *fm,
  372. const uint8_t *prvp, int prv_linesize,
  373. const uint8_t *nxtp, int nxt_linesize,
  374. uint8_t *dstp, int dst_linesize, int height,
  375. int width, int plane)
  376. {
  377. int x, y, u, diff, count;
  378. int tpitch = plane ? fm->tpitchuv : fm->tpitchy;
  379. const uint8_t *dp = fm->tbuffer + tpitch;
  380. build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize,
  381. fm->tbuffer, tpitch, width, height>>1);
  382. for (y = 2; y < height - 2; y += 2) {
  383. for (x = 1; x < width - 1; x++) {
  384. diff = dp[x];
  385. if (diff > 3) {
  386. for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
  387. count += dp[u-tpitch] > 3;
  388. count += dp[u ] > 3;
  389. count += dp[u+tpitch] > 3;
  390. }
  391. if (count > 1) {
  392. dstp[x] = 1;
  393. if (diff > 19) {
  394. int upper = 0, lower = 0;
  395. for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
  396. if (dp[u-tpitch] > 19) { count++; upper = 1; }
  397. if (dp[u ] > 19) count++;
  398. if (dp[u+tpitch] > 19) { count++; lower = 1; }
  399. }
  400. if (count > 3) {
  401. if (upper && lower) {
  402. dstp[x] |= 1<<1;
  403. } else {
  404. int upper2 = 0, lower2 = 0;
  405. for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) {
  406. if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
  407. if ( dp[u- tpitch] > 19) upper = 1;
  408. if ( dp[u+ tpitch] > 19) lower = 1;
  409. if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
  410. }
  411. if ((upper && (lower || upper2)) ||
  412. (lower && (upper || lower2)))
  413. dstp[x] |= 1<<1;
  414. else if (count > 5)
  415. dstp[x] |= 1<<2;
  416. }
  417. }
  418. }
  419. }
  420. }
  421. }
  422. dp += tpitch;
  423. dstp += dst_linesize;
  424. }
  425. }
  426. enum { mP, mC, mN, mB, mU };
  427. static int get_field_base(int match, int field)
  428. {
  429. return match < 3 ? 2 - field : 1 + field;
  430. }
  431. static AVFrame *select_frame(FieldMatchContext *fm, int match)
  432. {
  433. if (match == mP || match == mB) return fm->prv;
  434. else if (match == mN || match == mU) return fm->nxt;
  435. else /* match == mC */ return fm->src;
  436. }
  437. static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
  438. {
  439. int plane, ret;
  440. uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
  441. uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
  442. int norm1, norm2, mtn1, mtn2;
  443. float c1, c2, mr;
  444. const AVFrame *src = fm->src;
  445. for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) {
  446. int x, y, temp1, temp2, fbase;
  447. const AVFrame *prev, *next;
  448. uint8_t *mapp = fm->map_data[plane];
  449. int map_linesize = fm->map_linesize[plane];
  450. const uint8_t *srcp = src->data[plane];
  451. const int src_linesize = src->linesize[plane];
  452. const int srcf_linesize = src_linesize << 1;
  453. int prv_linesize, nxt_linesize;
  454. int prvf_linesize, nxtf_linesize;
  455. const int width = get_width (fm, src, plane);
  456. const int height = get_height(fm, src, plane);
  457. const int y0a = fm->y0 >> (plane ? fm->vsub : 0);
  458. const int y1a = fm->y1 >> (plane ? fm->vsub : 0);
  459. const int startx = (plane == 0 ? 8 : 8 >> fm->hsub);
  460. const int stopx = width - startx;
  461. const uint8_t *srcpf, *srcf, *srcnf;
  462. const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
  463. fill_buf(mapp, width, height, map_linesize, 0);
  464. /* match1 */
  465. fbase = get_field_base(match1, field);
  466. srcf = srcp + (fbase + 1) * src_linesize;
  467. srcpf = srcf - srcf_linesize;
  468. srcnf = srcf + srcf_linesize;
  469. mapp = mapp + fbase * map_linesize;
  470. prev = select_frame(fm, match1);
  471. prv_linesize = prev->linesize[plane];
  472. prvf_linesize = prv_linesize << 1;
  473. prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field
  474. prvnf = prvpf + prvf_linesize; // previous frame, next field
  475. /* match2 */
  476. fbase = get_field_base(match2, field);
  477. next = select_frame(fm, match2);
  478. nxt_linesize = next->linesize[plane];
  479. nxtf_linesize = nxt_linesize << 1;
  480. nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field
  481. nxtnf = nxtpf + nxtf_linesize; // next frame, next field
  482. map_linesize <<= 1;
  483. if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
  484. build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize,
  485. mapp, map_linesize, height, width, plane);
  486. else
  487. build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize,
  488. mapp + map_linesize, map_linesize, height, width, plane);
  489. for (y = 2; y < height - 2; y += 2) {
  490. if (y0a == y1a || y < y0a || y > y1a) {
  491. for (x = startx; x < stopx; x++) {
  492. if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
  493. temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1]
  494. temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1);
  495. if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
  496. accumPc += temp2;
  497. if (temp2 > 42) {
  498. if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
  499. accumPm += temp2;
  500. if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
  501. accumPml += temp2;
  502. }
  503. temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
  504. if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
  505. accumNc += temp2;
  506. if (temp2 > 42) {
  507. if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
  508. accumNm += temp2;
  509. if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
  510. accumNml += temp2;
  511. }
  512. }
  513. }
  514. }
  515. prvpf += prvf_linesize;
  516. prvnf += prvf_linesize;
  517. srcpf += srcf_linesize;
  518. srcf += srcf_linesize;
  519. srcnf += srcf_linesize;
  520. nxtpf += nxtf_linesize;
  521. nxtnf += nxtf_linesize;
  522. mapp += map_linesize;
  523. }
  524. }
  525. if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
  526. FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) {
  527. accumPm = accumPml;
  528. accumNm = accumNml;
  529. }
  530. norm1 = (int)((accumPc / 6.0f) + 0.5f);
  531. norm2 = (int)((accumNc / 6.0f) + 0.5f);
  532. mtn1 = (int)((accumPm / 6.0f) + 0.5f);
  533. mtn2 = (int)((accumNm / 6.0f) + 0.5f);
  534. c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1));
  535. c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1));
  536. mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1));
  537. if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
  538. ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
  539. ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
  540. ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
  541. ret = mtn1 > mtn2 ? match2 : match1;
  542. else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
  543. ret = mtn1 > mtn2 ? match2 : match1;
  544. else
  545. ret = norm1 > norm2 ? match2 : match1;
  546. return ret;
  547. }
  548. static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
  549. const AVFrame *src, int field)
  550. {
  551. int plane;
  552. for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++) {
  553. const int plane_h = get_height(fm, src, plane);
  554. const int nb_copy_fields = (plane_h >> 1) + (field ? 0 : (plane_h & 1));
  555. av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
  556. src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
  557. get_width(fm, src, plane) * fm->bpc, nb_copy_fields);
  558. }
  559. }
  560. static AVFrame *create_weave_frame(AVFilterContext *ctx, int match, int field,
  561. const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
  562. {
  563. AVFrame *dst;
  564. FieldMatchContext *fm = ctx->priv;
  565. if (match == mC) {
  566. dst = av_frame_clone(src);
  567. } else {
  568. AVFilterLink *outlink = ctx->outputs[0];
  569. dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  570. if (!dst)
  571. return NULL;
  572. av_frame_copy_props(dst, src);
  573. switch (match) {
  574. case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break;
  575. case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break;
  576. case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break;
  577. case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break;
  578. default: av_assert0(0);
  579. }
  580. }
  581. return dst;
  582. }
  583. static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2,
  584. AVFrame **gen_frames, int field)
  585. {
  586. const FieldMatchContext *fm = ctx->priv;
  587. #define LOAD_COMB(mid) do { \
  588. if (combs[mid] < 0) { \
  589. if (!gen_frames[mid]) \
  590. gen_frames[mid] = create_weave_frame(ctx, mid, field, \
  591. fm->prv, fm->src, fm->nxt); \
  592. combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
  593. } \
  594. } while (0)
  595. LOAD_COMB(m1);
  596. LOAD_COMB(m2);
  597. if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) &&
  598. abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel)
  599. return m2;
  600. else
  601. return m1;
  602. }
  603. static const int fxo0m[] = { mP, mC, mN, mB, mU };
  604. static const int fxo1m[] = { mN, mC, mP, mU, mB };
  605. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  606. {
  607. AVFilterContext *ctx = inlink->dst;
  608. AVFilterLink *outlink = ctx->outputs[0];
  609. FieldMatchContext *fm = ctx->priv;
  610. int combs[] = { -1, -1, -1, -1, -1 };
  611. int order, field, i, match, sc = 0, ret = 0;
  612. const int *fxo;
  613. AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL };
  614. AVFrame *dst;
  615. /* update frames queue(s) */
  616. #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
  617. if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \
  618. av_frame_free(&prv); \
  619. prv = src; \
  620. src = nxt; \
  621. if (in) \
  622. nxt = in; \
  623. if (!prv) \
  624. prv = src; \
  625. if (!prv) /* received only one frame at that point */ \
  626. return 0; \
  627. av_assert0(prv && src && nxt); \
  628. } while (0)
  629. if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
  630. av_assert0(fm->got_frame[INPUT_MAIN] == 0);
  631. SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt);
  632. fm->got_frame[INPUT_MAIN] = 1;
  633. } else {
  634. av_assert0(fm->got_frame[INPUT_CLEANSRC] == 0);
  635. SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2);
  636. fm->got_frame[INPUT_CLEANSRC] = 1;
  637. }
  638. if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC]))
  639. return 0;
  640. fm->got_frame[INPUT_MAIN] = fm->got_frame[INPUT_CLEANSRC] = 0;
  641. in = fm->src;
  642. /* parity */
  643. order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1);
  644. field = fm->field != FM_PARITY_AUTO ? fm->field : order;
  645. av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
  646. fxo = field ^ order ? fxo1m : fxo0m;
  647. /* debug mode: we generate all the fields combinations and their associated
  648. * combed score. XXX: inject as frame metadata? */
  649. if (fm->combdbg) {
  650. for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) {
  651. if (i > mN && fm->combdbg == COMBDBG_PCN)
  652. break;
  653. gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt);
  654. if (!gen_frames[i]) {
  655. ret = AVERROR(ENOMEM);
  656. goto fail;
  657. }
  658. combs[i] = calc_combed_score(fm, gen_frames[i]);
  659. }
  660. av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n",
  661. combs[0], combs[1], combs[2], combs[3], combs[4]);
  662. } else {
  663. gen_frames[mC] = av_frame_clone(fm->src);
  664. if (!gen_frames[mC]) {
  665. ret = AVERROR(ENOMEM);
  666. goto fail;
  667. }
  668. }
  669. /* p/c selection and optional 3-way p/c/n matches */
  670. match = compare_fields(fm, fxo[mC], fxo[mP], field);
  671. if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB)
  672. match = compare_fields(fm, match, fxo[mN], field);
  673. /* scene change check */
  674. if (fm->combmatch == COMBMATCH_SC) {
  675. if (fm->lastn == outlink->frame_count_in - 1) {
  676. if (fm->lastscdiff > fm->scthresh)
  677. sc = 1;
  678. } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
  679. sc = 1;
  680. }
  681. if (!sc) {
  682. fm->lastn = outlink->frame_count_in;
  683. fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
  684. sc = fm->lastscdiff > fm->scthresh;
  685. }
  686. }
  687. if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) {
  688. switch (fm->mode) {
  689. /* 2-way p/c matches */
  690. case MODE_PC:
  691. match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
  692. break;
  693. case MODE_PC_N:
  694. match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
  695. break;
  696. case MODE_PC_U:
  697. match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
  698. break;
  699. case MODE_PC_N_UB:
  700. match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
  701. match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
  702. match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
  703. break;
  704. /* 3-way p/c/n matches */
  705. case MODE_PCN:
  706. match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
  707. break;
  708. case MODE_PCN_UB:
  709. match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
  710. match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
  711. break;
  712. default:
  713. av_assert0(0);
  714. }
  715. }
  716. /* get output frame and drop the others */
  717. if (fm->ppsrc) {
  718. /* field matching was based on a filtered/post-processed input, we now
  719. * pick the untouched fields from the clean source */
  720. dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2);
  721. } else {
  722. if (!gen_frames[match]) { // XXX: is that possible?
  723. dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt);
  724. } else {
  725. dst = gen_frames[match];
  726. gen_frames[match] = NULL;
  727. }
  728. }
  729. if (!dst) {
  730. ret = AVERROR(ENOMEM);
  731. goto fail;
  732. }
  733. /* mark the frame we are unable to match properly as interlaced so a proper
  734. * de-interlacer can take the relay */
  735. dst->interlaced_frame = combs[match] >= fm->combpel;
  736. if (dst->interlaced_frame) {
  737. av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
  738. outlink->frame_count_in, av_ts2timestr(in->pts, &inlink->time_base));
  739. dst->top_field_first = field;
  740. }
  741. av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
  742. " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
  743. fm->combpel, match, dst->interlaced_frame ? "YES" : "NO");
  744. fail:
  745. for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++)
  746. av_frame_free(&gen_frames[i]);
  747. if (ret >= 0)
  748. return ff_filter_frame(outlink, dst);
  749. return ret;
  750. }
  751. static int activate(AVFilterContext *ctx)
  752. {
  753. FieldMatchContext *fm = ctx->priv;
  754. AVFrame *frame = NULL;
  755. int ret = 0, status;
  756. int64_t pts;
  757. FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
  758. if ((fm->got_frame[INPUT_MAIN] == 0) &&
  759. (ret = ff_inlink_consume_frame(ctx->inputs[INPUT_MAIN], &frame)) > 0) {
  760. ret = filter_frame(ctx->inputs[INPUT_MAIN], frame);
  761. if (ret < 0)
  762. return ret;
  763. }
  764. if (ret < 0)
  765. return ret;
  766. if (fm->ppsrc &&
  767. (fm->got_frame[INPUT_CLEANSRC] == 0) &&
  768. (ret = ff_inlink_consume_frame(ctx->inputs[INPUT_CLEANSRC], &frame)) > 0) {
  769. ret = filter_frame(ctx->inputs[INPUT_CLEANSRC], frame);
  770. if (ret < 0)
  771. return ret;
  772. }
  773. if (ret < 0) {
  774. return ret;
  775. } else if (ff_inlink_acknowledge_status(ctx->inputs[INPUT_MAIN], &status, &pts)) {
  776. if (status == AVERROR_EOF) { // flushing
  777. fm->eof |= 1 << INPUT_MAIN;
  778. ret = filter_frame(ctx->inputs[INPUT_MAIN], NULL);
  779. }
  780. ff_outlink_set_status(ctx->outputs[0], status, pts);
  781. return ret;
  782. } else if (fm->ppsrc && ff_inlink_acknowledge_status(ctx->inputs[INPUT_CLEANSRC], &status, &pts)) {
  783. if (status == AVERROR_EOF) { // flushing
  784. fm->eof |= 1 << INPUT_CLEANSRC;
  785. ret = filter_frame(ctx->inputs[INPUT_CLEANSRC], NULL);
  786. }
  787. ff_outlink_set_status(ctx->outputs[0], status, pts);
  788. return ret;
  789. } else {
  790. if (ff_outlink_frame_wanted(ctx->outputs[0])) {
  791. if (fm->got_frame[INPUT_MAIN] == 0)
  792. ff_inlink_request_frame(ctx->inputs[INPUT_MAIN]);
  793. if (fm->ppsrc && (fm->got_frame[INPUT_CLEANSRC] == 0))
  794. ff_inlink_request_frame(ctx->inputs[INPUT_CLEANSRC]);
  795. }
  796. return 0;
  797. }
  798. }
  799. static int query_formats(AVFilterContext *ctx)
  800. {
  801. FieldMatchContext *fm = ctx->priv;
  802. static const enum AVPixelFormat pix_fmts[] = {
  803. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
  804. AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  805. AV_PIX_FMT_NONE
  806. };
  807. static const enum AVPixelFormat unproc_pix_fmts[] = {
  808. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
  809. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
  810. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  811. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
  812. AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
  813. AV_PIX_FMT_YUVJ411P,
  814. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  815. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  816. AV_PIX_FMT_YUV440P10,
  817. AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
  818. AV_PIX_FMT_YUV440P12,
  819. AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
  820. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  821. AV_PIX_FMT_NONE
  822. };
  823. int ret;
  824. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  825. if (!fmts_list)
  826. return AVERROR(ENOMEM);
  827. if (!fm->ppsrc) {
  828. return ff_set_common_formats(ctx, fmts_list);
  829. }
  830. if ((ret = ff_formats_ref(fmts_list, &ctx->inputs[INPUT_MAIN]->outcfg.formats)) < 0)
  831. return ret;
  832. fmts_list = ff_make_format_list(unproc_pix_fmts);
  833. if (!fmts_list)
  834. return AVERROR(ENOMEM);
  835. if ((ret = ff_formats_ref(fmts_list, &ctx->outputs[0]->incfg.formats)) < 0)
  836. return ret;
  837. if ((ret = ff_formats_ref(fmts_list, &ctx->inputs[INPUT_CLEANSRC]->outcfg.formats)) < 0)
  838. return ret;
  839. return 0;
  840. }
  841. static int config_input(AVFilterLink *inlink)
  842. {
  843. int ret;
  844. AVFilterContext *ctx = inlink->dst;
  845. FieldMatchContext *fm = ctx->priv;
  846. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  847. const int w = inlink->w;
  848. const int h = inlink->h;
  849. fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0);
  850. if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 ||
  851. (ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0)
  852. return ret;
  853. fm->hsub = pix_desc->log2_chroma_w;
  854. fm->vsub = pix_desc->log2_chroma_h;
  855. fm->tpitchy = FFALIGN(w, 16);
  856. fm->tpitchuv = FFALIGN(w >> 1, 16);
  857. fm->tbuffer = av_calloc((h/2 + 4) * fm->tpitchy, sizeof(*fm->tbuffer));
  858. fm->c_array = av_malloc_array((((w + fm->blockx/2)/fm->blockx)+1) *
  859. (((h + fm->blocky/2)/fm->blocky)+1),
  860. 4 * sizeof(*fm->c_array));
  861. if (!fm->tbuffer || !fm->c_array)
  862. return AVERROR(ENOMEM);
  863. return 0;
  864. }
  865. static av_cold int fieldmatch_init(AVFilterContext *ctx)
  866. {
  867. const FieldMatchContext *fm = ctx->priv;
  868. AVFilterPad pad = {
  869. .name = "main",
  870. .type = AVMEDIA_TYPE_VIDEO,
  871. .config_props = config_input,
  872. };
  873. int ret;
  874. if ((ret = ff_insert_inpad(ctx, INPUT_MAIN, &pad)) < 0)
  875. return ret;
  876. if (fm->ppsrc) {
  877. pad.name = "clean_src";
  878. pad.config_props = NULL;
  879. if ((ret = ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad)) < 0)
  880. return ret;
  881. }
  882. if ((fm->blockx & (fm->blockx - 1)) ||
  883. (fm->blocky & (fm->blocky - 1))) {
  884. av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
  885. return AVERROR(EINVAL);
  886. }
  887. if (fm->combpel > fm->blockx * fm->blocky) {
  888. av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n");
  889. return AVERROR(EINVAL);
  890. }
  891. return 0;
  892. }
  893. static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
  894. {
  895. FieldMatchContext *fm = ctx->priv;
  896. if (fm->prv != fm->src)
  897. av_frame_free(&fm->prv);
  898. if (fm->nxt != fm->src)
  899. av_frame_free(&fm->nxt);
  900. if (fm->prv2 != fm->src2)
  901. av_frame_free(&fm->prv2);
  902. if (fm->nxt2 != fm->src2)
  903. av_frame_free(&fm->nxt2);
  904. av_frame_free(&fm->src);
  905. av_frame_free(&fm->src2);
  906. av_freep(&fm->map_data[0]);
  907. av_freep(&fm->cmask_data[0]);
  908. av_freep(&fm->tbuffer);
  909. av_freep(&fm->c_array);
  910. }
  911. static int config_output(AVFilterLink *outlink)
  912. {
  913. AVFilterContext *ctx = outlink->src;
  914. FieldMatchContext *fm = ctx->priv;
  915. const AVFilterLink *inlink =
  916. ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
  917. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  918. fm->bpc = (desc->comp[0].depth + 7) / 8;
  919. outlink->time_base = inlink->time_base;
  920. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  921. outlink->frame_rate = inlink->frame_rate;
  922. outlink->w = inlink->w;
  923. outlink->h = inlink->h;
  924. return 0;
  925. }
  926. static const AVFilterPad fieldmatch_outputs[] = {
  927. {
  928. .name = "default",
  929. .type = AVMEDIA_TYPE_VIDEO,
  930. .config_props = config_output,
  931. },
  932. { NULL }
  933. };
  934. AVFilter ff_vf_fieldmatch = {
  935. .name = "fieldmatch",
  936. .description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."),
  937. .query_formats = query_formats,
  938. .priv_size = sizeof(FieldMatchContext),
  939. .init = fieldmatch_init,
  940. .activate = activate,
  941. .uninit = fieldmatch_uninit,
  942. .inputs = NULL,
  943. .outputs = fieldmatch_outputs,
  944. .priv_class = &fieldmatch_class,
  945. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  946. };