You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

671 lines
24KB

  1. /*
  2. * Copyright (c) 2016 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/attributes.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/eval.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "drawutils.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "framesync.h"
  31. static const char *const var_names[] = {
  32. "w", ///< width of the input video
  33. "h", ///< height of the input video
  34. "x", ///< input value for the pixel from input #1
  35. "y", ///< input value for the pixel from input #2
  36. "bdx", ///< input #1 video bitdepth
  37. "bdy", ///< input #2 video bitdepth
  38. NULL
  39. };
  40. enum var_name {
  41. VAR_W,
  42. VAR_H,
  43. VAR_X,
  44. VAR_Y,
  45. VAR_BITDEPTHX,
  46. VAR_BITDEPTHY,
  47. VAR_VARS_NB
  48. };
  49. typedef struct LUT2Context {
  50. const AVClass *class;
  51. FFFrameSync fs;
  52. int odepth;
  53. char *comp_expr_str[4];
  54. AVExpr *comp_expr[4];
  55. double var_values[VAR_VARS_NB];
  56. uint16_t *lut[4]; ///< lookup table for each component
  57. int width[4], height[4];
  58. int widthx[4], heightx[4];
  59. int widthy[4], heighty[4];
  60. int nb_planesx;
  61. int nb_planesy;
  62. int nb_planes;
  63. int depth, depthx, depthy;
  64. int tlut2;
  65. AVFrame *prev_frame; /* only used with tlut2 */
  66. int (*lut2)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  67. } LUT2Context;
  68. typedef struct ThreadData {
  69. AVFrame *out, *srcx, *srcy;
  70. } ThreadData;
  71. #define OFFSET(x) offsetof(LUT2Context, x)
  72. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  73. #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  74. static const AVOption options[] = {
  75. { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  76. { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  77. { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  78. { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  79. { "d", "set output depth", OFFSET(odepth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 16, .flags = FLAGS },
  80. { NULL }
  81. };
  82. static av_cold void uninit(AVFilterContext *ctx)
  83. {
  84. LUT2Context *s = ctx->priv;
  85. int i;
  86. ff_framesync_uninit(&s->fs);
  87. av_frame_free(&s->prev_frame);
  88. for (i = 0; i < 4; i++) {
  89. av_expr_free(s->comp_expr[i]);
  90. s->comp_expr[i] = NULL;
  91. av_freep(&s->comp_expr_str[i]);
  92. av_freep(&s->lut[i]);
  93. }
  94. }
  95. #define BIT8_FMTS \
  96. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, \
  97. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, \
  98. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, \
  99. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
  100. AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, \
  101. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
  102. #define BIT9_FMTS \
  103. AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, \
  104. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, \
  105. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
  106. #define BIT10_FMTS \
  107. AV_PIX_FMT_GRAY10, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, \
  108. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, \
  109. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
  110. #define BIT12_FMTS \
  111. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, \
  112. AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12, \
  113. AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRP12,
  114. #define BIT14_FMTS \
  115. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, \
  116. AV_PIX_FMT_GRAY14, AV_PIX_FMT_GBRP14,
  117. #define BIT16_FMTS \
  118. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, \
  119. AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, \
  120. AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16,
  121. static int query_formats(AVFilterContext *ctx)
  122. {
  123. LUT2Context *s = ctx->priv;
  124. static const enum AVPixelFormat all_pix_fmts[] = {
  125. BIT8_FMTS
  126. BIT9_FMTS
  127. BIT10_FMTS
  128. BIT12_FMTS
  129. AV_PIX_FMT_NONE
  130. };
  131. static const enum AVPixelFormat bit8_pix_fmts[] = {
  132. BIT8_FMTS
  133. AV_PIX_FMT_NONE
  134. };
  135. static const enum AVPixelFormat bit9_pix_fmts[] = {
  136. BIT9_FMTS
  137. AV_PIX_FMT_NONE
  138. };
  139. static const enum AVPixelFormat bit10_pix_fmts[] = {
  140. BIT10_FMTS
  141. AV_PIX_FMT_NONE
  142. };
  143. static const enum AVPixelFormat bit12_pix_fmts[] = {
  144. BIT12_FMTS
  145. AV_PIX_FMT_NONE
  146. };
  147. static const enum AVPixelFormat bit14_pix_fmts[] = {
  148. BIT14_FMTS
  149. AV_PIX_FMT_NONE
  150. };
  151. static const enum AVPixelFormat bit16_pix_fmts[] = {
  152. BIT16_FMTS
  153. AV_PIX_FMT_NONE
  154. };
  155. const enum AVPixelFormat *pix_fmts;
  156. int ret;
  157. if (s->tlut2 || !s->odepth)
  158. return ff_set_common_formats(ctx, ff_make_format_list(all_pix_fmts));
  159. ret = ff_formats_ref(ff_make_format_list(all_pix_fmts), &ctx->inputs[0]->outcfg.formats);
  160. if (ret < 0)
  161. return ret;
  162. switch (s->odepth) {
  163. case 8: pix_fmts = bit8_pix_fmts; break;
  164. case 9: pix_fmts = bit9_pix_fmts; break;
  165. case 10: pix_fmts = bit10_pix_fmts; break;
  166. case 12: pix_fmts = bit12_pix_fmts; break;
  167. case 14: pix_fmts = bit14_pix_fmts; break;
  168. case 16: pix_fmts = bit16_pix_fmts; break;
  169. default: av_log(ctx, AV_LOG_ERROR, "Unsupported output bit depth %d.\n", s->odepth);
  170. return AVERROR(EINVAL);
  171. }
  172. return ff_formats_ref(ff_make_format_list(pix_fmts), &ctx->outputs[0]->incfg.formats);
  173. }
  174. static int config_inputx(AVFilterLink *inlink)
  175. {
  176. AVFilterContext *ctx = inlink->dst;
  177. LUT2Context *s = ctx->priv;
  178. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  179. int hsub = desc->log2_chroma_w;
  180. int vsub = desc->log2_chroma_h;
  181. s->nb_planesx = av_pix_fmt_count_planes(inlink->format);
  182. s->heightx[1] = s->heightx[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
  183. s->heightx[0] = s->heightx[3] = inlink->h;
  184. s->widthx[1] = s->widthx[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
  185. s->widthx[0] = s->widthx[3] = inlink->w;
  186. s->var_values[VAR_W] = inlink->w;
  187. s->var_values[VAR_H] = inlink->h;
  188. s->depthx = desc->comp[0].depth;
  189. s->var_values[VAR_BITDEPTHX] = s->depthx;
  190. if (s->tlut2) {
  191. s->depthy = desc->comp[0].depth;
  192. s->var_values[VAR_BITDEPTHY] = s->depthy;
  193. }
  194. return 0;
  195. }
  196. static int config_inputy(AVFilterLink *inlink)
  197. {
  198. AVFilterContext *ctx = inlink->dst;
  199. LUT2Context *s = ctx->priv;
  200. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  201. int hsub = desc->log2_chroma_w;
  202. int vsub = desc->log2_chroma_h;
  203. s->nb_planesy = av_pix_fmt_count_planes(inlink->format);
  204. s->depthy = desc->comp[0].depth;
  205. s->var_values[VAR_BITDEPTHY] = s->depthy;
  206. s->heighty[1] = s->heighty[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
  207. s->heighty[0] = s->heighty[3] = inlink->h;
  208. s->widthy[1] = s->widthy[2] = AV_CEIL_RSHIFT(inlink->w, hsub);
  209. s->widthy[0] = s->widthy[3] = inlink->w;
  210. return 0;
  211. }
  212. #define DEFINE_LUT2(zname, xname, yname, ztype, xtype, ytype, zdiv, xdiv, ydiv) \
  213. static int lut2_##zname##_##xname##_##yname(AVFilterContext *ctx, \
  214. void *arg, \
  215. int jobnr, int nb_jobs) \
  216. { \
  217. LUT2Context *s = ctx->priv; \
  218. ThreadData *td = arg; \
  219. AVFrame *out = td->out; \
  220. AVFrame *srcx = td->srcx; \
  221. AVFrame *srcy = td->srcy; \
  222. const int odepth = s->odepth; \
  223. int p, y, x; \
  224. \
  225. for (p = 0; p < s->nb_planes; p++) { \
  226. const int slice_start = (s->heightx[p] * jobnr) / nb_jobs; \
  227. const int slice_end = (s->heightx[p] * (jobnr+1)) / nb_jobs; \
  228. const uint16_t *lut = s->lut[p]; \
  229. const xtype *srcxx; \
  230. const ytype *srcyy; \
  231. ztype *dst; \
  232. \
  233. dst = (ztype *)(out->data[p] + slice_start * out->linesize[p]); \
  234. srcxx = (const xtype *)(srcx->data[p] + slice_start * srcx->linesize[p]);\
  235. srcyy = (const ytype *)(srcy->data[p] + slice_start * srcy->linesize[p]);\
  236. \
  237. for (y = slice_start; y < slice_end; y++) { \
  238. for (x = 0; x < s->widthx[p]; x++) { \
  239. dst[x] = av_clip_uintp2_c(lut[(srcyy[x] << s->depthx) | srcxx[x]], odepth); \
  240. } \
  241. \
  242. dst += out->linesize[p] / zdiv; \
  243. srcxx += srcx->linesize[p] / xdiv; \
  244. srcyy += srcy->linesize[p] / ydiv; \
  245. } \
  246. } \
  247. return 0; \
  248. }
  249. DEFINE_LUT2(8, 8, 8, uint8_t, uint8_t, uint8_t, 1, 1, 1)
  250. DEFINE_LUT2(8, 8, 16, uint8_t, uint8_t, uint16_t, 1, 1, 2)
  251. DEFINE_LUT2(8, 16, 8, uint8_t, uint16_t, uint8_t, 1, 2, 1)
  252. DEFINE_LUT2(8, 16, 16, uint8_t, uint16_t, uint16_t, 1, 2, 2)
  253. DEFINE_LUT2(16, 8, 8, uint16_t, uint8_t, uint8_t, 2, 1, 1)
  254. DEFINE_LUT2(16, 8, 16, uint16_t, uint8_t, uint16_t, 2, 1, 2)
  255. DEFINE_LUT2(16, 16, 8, uint16_t, uint16_t, uint8_t, 2, 2, 1)
  256. DEFINE_LUT2(16, 16, 16, uint16_t, uint16_t, uint16_t, 2, 2, 2)
  257. static int process_frame(FFFrameSync *fs)
  258. {
  259. AVFilterContext *ctx = fs->parent;
  260. LUT2Context *s = fs->opaque;
  261. AVFilterLink *outlink = ctx->outputs[0];
  262. AVFrame *out, *srcx = NULL, *srcy = NULL;
  263. int ret;
  264. if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
  265. (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
  266. return ret;
  267. if (ctx->is_disabled || !srcy) {
  268. out = av_frame_clone(srcx);
  269. if (!out)
  270. return AVERROR(ENOMEM);
  271. } else {
  272. ThreadData td;
  273. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  274. if (!out)
  275. return AVERROR(ENOMEM);
  276. av_frame_copy_props(out, srcx);
  277. td.out = out;
  278. td.srcx = srcx;
  279. td.srcy = srcy;
  280. ctx->internal->execute(ctx, s->lut2, &td, NULL, FFMIN(s->heightx[1], ff_filter_get_nb_threads(ctx)));
  281. }
  282. out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
  283. return ff_filter_frame(outlink, out);
  284. }
  285. static int config_output(AVFilterLink *outlink)
  286. {
  287. AVFilterContext *ctx = outlink->src;
  288. LUT2Context *s = ctx->priv;
  289. int p, ret;
  290. s->depth = s->depthx + s->depthy;
  291. s->nb_planes = s->nb_planesx;
  292. s->lut2 = s->depth > 16 ? lut2_16_16_16 : lut2_8_8_8;
  293. if (s->odepth) {
  294. if (s->depthx == 8 && s->depthy == 8 && s->odepth > 8)
  295. s->lut2 = lut2_16_8_8;
  296. if (s->depthx > 8 && s->depthy == 8 && s->odepth > 8)
  297. s->lut2 = lut2_16_16_8;
  298. if (s->depthx == 8 && s->depthy > 8 && s->odepth > 8)
  299. s->lut2 = lut2_16_8_16;
  300. if (s->depthx == 8 && s->depthy == 8 && s->odepth == 8)
  301. s->lut2 = lut2_8_8_8;
  302. if (s->depthx > 8 && s->depthy == 8 && s->odepth == 8)
  303. s->lut2 = lut2_8_16_8;
  304. if (s->depthx == 8 && s->depthy > 8 && s->odepth == 8)
  305. s->lut2 = lut2_8_8_16;
  306. if (s->depthx > 8 && s->depthy > 8 && s->odepth == 8)
  307. s->lut2 = lut2_8_16_16;
  308. } else {
  309. s->odepth = s->depthx;
  310. }
  311. for (p = 0; p < s->nb_planes; p++) {
  312. if (!s->lut[p])
  313. s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
  314. if (!s->lut[p])
  315. return AVERROR(ENOMEM);
  316. }
  317. for (p = 0; p < s->nb_planes; p++) {
  318. double res;
  319. int x, y;
  320. /* create the parsed expression */
  321. av_expr_free(s->comp_expr[p]);
  322. s->comp_expr[p] = NULL;
  323. ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
  324. var_names, NULL, NULL, NULL, NULL, 0, ctx);
  325. if (ret < 0) {
  326. av_log(ctx, AV_LOG_ERROR,
  327. "Error when parsing the expression '%s' for the component %d.\n",
  328. s->comp_expr_str[p], p);
  329. return AVERROR(EINVAL);
  330. }
  331. /* compute the lut */
  332. for (y = 0; y < (1 << s->depthy); y++) {
  333. s->var_values[VAR_Y] = y;
  334. for (x = 0; x < (1 << s->depthx); x++) {
  335. s->var_values[VAR_X] = x;
  336. res = av_expr_eval(s->comp_expr[p], s->var_values, s);
  337. if (isnan(res)) {
  338. av_log(ctx, AV_LOG_ERROR,
  339. "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
  340. s->comp_expr_str[p], x, y, p);
  341. return AVERROR(EINVAL);
  342. }
  343. s->lut[p][(y << s->depthx) + x] = res;
  344. }
  345. }
  346. }
  347. return 0;
  348. }
  349. static int lut2_config_output(AVFilterLink *outlink)
  350. {
  351. AVFilterContext *ctx = outlink->src;
  352. LUT2Context *s = ctx->priv;
  353. AVFilterLink *srcx = ctx->inputs[0];
  354. AVFilterLink *srcy = ctx->inputs[1];
  355. FFFrameSyncIn *in;
  356. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
  357. int hsub = desc->log2_chroma_w;
  358. int vsub = desc->log2_chroma_h;
  359. int ret;
  360. outlink->w = srcx->w;
  361. outlink->h = srcx->h;
  362. outlink->time_base = srcx->time_base;
  363. outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
  364. outlink->frame_rate = srcx->frame_rate;
  365. s->nb_planes = av_pix_fmt_count_planes(outlink->format);
  366. s->height[1] = s->height[2] = AV_CEIL_RSHIFT(outlink->h, vsub);
  367. s->height[0] = s->height[3] = outlink->h;
  368. s->width[1] = s->width[2] = AV_CEIL_RSHIFT(outlink->w, hsub);
  369. s->width[0] = s->width[3] = outlink->w;
  370. if (!s->odepth && srcx->format != srcy->format) {
  371. av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
  372. return AVERROR(EINVAL);
  373. }
  374. if (srcx->w != srcy->w || srcx->h != srcy->h) {
  375. av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
  376. "(size %dx%d) do not match the corresponding "
  377. "second input link %s parameters (size %dx%d)\n",
  378. ctx->input_pads[0].name, srcx->w, srcx->h,
  379. ctx->input_pads[1].name,
  380. srcy->w, srcy->h);
  381. return AVERROR(EINVAL);
  382. }
  383. if (s->nb_planesx != s->nb_planesy) {
  384. av_log(ctx, AV_LOG_ERROR, "First input link %s number of planes "
  385. "(%d) do not match the corresponding "
  386. "second input link %s number of planes (%d)\n",
  387. ctx->input_pads[0].name, s->nb_planesx,
  388. ctx->input_pads[1].name, s->nb_planesy);
  389. return AVERROR(EINVAL);
  390. }
  391. if (s->nb_planesx != s->nb_planes) {
  392. av_log(ctx, AV_LOG_ERROR, "First input link %s number of planes "
  393. "(%d) do not match the corresponding "
  394. "output link %s number of planes (%d)\n",
  395. ctx->input_pads[0].name, s->nb_planesx,
  396. ctx->output_pads[0].name, s->nb_planes);
  397. return AVERROR(EINVAL);
  398. }
  399. if (s->widthx[1] != s->widthy[1] || s->heightx[1] != s->heighty[1]) {
  400. av_log(ctx, AV_LOG_ERROR, "First input link %s 2nd plane "
  401. "(size %dx%d) do not match the corresponding "
  402. "second input link %s 2nd plane (size %dx%d)\n",
  403. ctx->input_pads[0].name, s->widthx[1], s->heightx[1],
  404. ctx->input_pads[1].name,
  405. s->widthy[1], s->heighty[1]);
  406. return AVERROR(EINVAL);
  407. }
  408. if (s->widthx[2] != s->widthy[2] || s->heightx[2] != s->heighty[2]) {
  409. av_log(ctx, AV_LOG_ERROR, "First input link %s 3rd plane "
  410. "(size %dx%d) do not match the corresponding "
  411. "second input link %s 3rd plane (size %dx%d)\n",
  412. ctx->input_pads[0].name, s->widthx[2], s->heightx[2],
  413. ctx->input_pads[1].name,
  414. s->widthy[2], s->heighty[2]);
  415. return AVERROR(EINVAL);
  416. }
  417. if (s->widthx[1] != s->width[1] || s->heightx[1] != s->height[1]) {
  418. av_log(ctx, AV_LOG_ERROR, "First input link %s 2nd plane "
  419. "(size %dx%d) do not match the corresponding "
  420. "output link %s 2nd plane (size %dx%d)\n",
  421. ctx->input_pads[0].name, s->widthx[1], s->heightx[1],
  422. ctx->output_pads[0].name, s->width[1], s->height[1]);
  423. return AVERROR(EINVAL);
  424. }
  425. if (s->widthx[2] != s->width[2] || s->heightx[2] != s->height[2]) {
  426. av_log(ctx, AV_LOG_ERROR, "First input link %s 3rd plane "
  427. "(size %dx%d) do not match the corresponding "
  428. "output link %s 3rd plane (size %dx%d)\n",
  429. ctx->input_pads[0].name, s->widthx[2], s->heightx[2],
  430. ctx->output_pads[0].name, s->width[2], s->height[2]);
  431. return AVERROR(EINVAL);
  432. }
  433. if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
  434. return ret;
  435. in = s->fs.in;
  436. in[0].time_base = srcx->time_base;
  437. in[1].time_base = srcy->time_base;
  438. in[0].sync = 2;
  439. in[0].before = EXT_STOP;
  440. in[0].after = EXT_INFINITY;
  441. in[1].sync = 1;
  442. in[1].before = EXT_STOP;
  443. in[1].after = EXT_INFINITY;
  444. s->fs.opaque = s;
  445. s->fs.on_event = process_frame;
  446. if ((ret = config_output(outlink)) < 0)
  447. return ret;
  448. ret = ff_framesync_configure(&s->fs);
  449. outlink->time_base = s->fs.time_base;
  450. return ret;
  451. }
  452. static int activate(AVFilterContext *ctx)
  453. {
  454. LUT2Context *s = ctx->priv;
  455. return ff_framesync_activate(&s->fs);
  456. }
  457. static const AVFilterPad inputs[] = {
  458. {
  459. .name = "srcx",
  460. .type = AVMEDIA_TYPE_VIDEO,
  461. .config_props = config_inputx,
  462. },
  463. {
  464. .name = "srcy",
  465. .type = AVMEDIA_TYPE_VIDEO,
  466. .config_props = config_inputy,
  467. },
  468. { NULL }
  469. };
  470. static const AVFilterPad outputs[] = {
  471. {
  472. .name = "default",
  473. .type = AVMEDIA_TYPE_VIDEO,
  474. .config_props = lut2_config_output,
  475. },
  476. { NULL }
  477. };
  478. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  479. char *res, int res_len, int flags)
  480. {
  481. int ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
  482. if (ret < 0)
  483. return ret;
  484. return config_output(ctx->outputs[0]);
  485. }
  486. #define lut2_options options
  487. FRAMESYNC_DEFINE_CLASS(lut2, LUT2Context, fs);
  488. AVFilter ff_vf_lut2 = {
  489. .name = "lut2",
  490. .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
  491. .preinit = lut2_framesync_preinit,
  492. .priv_size = sizeof(LUT2Context),
  493. .priv_class = &lut2_class,
  494. .uninit = uninit,
  495. .query_formats = query_formats,
  496. .activate = activate,
  497. .inputs = inputs,
  498. .outputs = outputs,
  499. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  500. AVFILTER_FLAG_SLICE_THREADS,
  501. .process_command = process_command,
  502. };
  503. #if CONFIG_TLUT2_FILTER
  504. static av_cold int init(AVFilterContext *ctx)
  505. {
  506. LUT2Context *s = ctx->priv;
  507. s->tlut2 = !strcmp(ctx->filter->name, "tlut2");
  508. return 0;
  509. }
  510. static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  511. {
  512. AVFilterContext *ctx = inlink->dst;
  513. LUT2Context *s = ctx->priv;
  514. AVFilterLink *outlink = ctx->outputs[0];
  515. if (s->prev_frame) {
  516. AVFrame *out;
  517. if (ctx->is_disabled) {
  518. out = av_frame_clone(frame);
  519. } else {
  520. ThreadData td;
  521. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  522. if (!out) {
  523. av_frame_free(&s->prev_frame);
  524. s->prev_frame = frame;
  525. return AVERROR(ENOMEM);
  526. }
  527. av_frame_copy_props(out, frame);
  528. td.out = out;
  529. td.srcx = frame;
  530. td.srcy = s->prev_frame;
  531. ctx->internal->execute(ctx, s->lut2, &td, NULL, FFMIN(s->heightx[1], ff_filter_get_nb_threads(ctx)));
  532. }
  533. av_frame_free(&s->prev_frame);
  534. s->prev_frame = frame;
  535. return ff_filter_frame(outlink, out);
  536. }
  537. s->prev_frame = frame;
  538. return 0;
  539. }
  540. static const AVOption tlut2_options[] = {
  541. { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  542. { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  543. { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  544. { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = TFLAGS },
  545. { NULL }
  546. };
  547. AVFILTER_DEFINE_CLASS(tlut2);
  548. static const AVFilterPad tlut2_inputs[] = {
  549. {
  550. .name = "default",
  551. .type = AVMEDIA_TYPE_VIDEO,
  552. .filter_frame = tlut2_filter_frame,
  553. .config_props = config_inputx,
  554. },
  555. { NULL }
  556. };
  557. static const AVFilterPad tlut2_outputs[] = {
  558. {
  559. .name = "default",
  560. .type = AVMEDIA_TYPE_VIDEO,
  561. .config_props = config_output,
  562. },
  563. { NULL }
  564. };
  565. AVFilter ff_vf_tlut2 = {
  566. .name = "tlut2",
  567. .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two successive frames."),
  568. .priv_size = sizeof(LUT2Context),
  569. .priv_class = &tlut2_class,
  570. .query_formats = query_formats,
  571. .init = init,
  572. .uninit = uninit,
  573. .inputs = tlut2_inputs,
  574. .outputs = tlut2_outputs,
  575. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  576. AVFILTER_FLAG_SLICE_THREADS,
  577. .process_command = process_command,
  578. };
  579. #endif