You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

534 lines
18KB

  1. /*
  2. * Copyright (c) 2011 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Compute a look-up table for binding the input value to the output
  23. * value, and apply it to input video.
  24. */
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/bswap.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/eval.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "avfilter.h"
  32. #include "drawutils.h"
  33. #include "formats.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. static const char *const var_names[] = {
  37. "w", ///< width of the input video
  38. "h", ///< height of the input video
  39. "val", ///< input value for the pixel
  40. "maxval", ///< max value for the pixel
  41. "minval", ///< min value for the pixel
  42. "negval", ///< negated value
  43. "clipval",
  44. NULL
  45. };
  46. enum var_name {
  47. VAR_W,
  48. VAR_H,
  49. VAR_VAL,
  50. VAR_MAXVAL,
  51. VAR_MINVAL,
  52. VAR_NEGVAL,
  53. VAR_CLIPVAL,
  54. VAR_VARS_NB
  55. };
  56. typedef struct LutContext {
  57. const AVClass *class;
  58. uint16_t lut[4][256 * 256]; ///< lookup table for each component
  59. char *comp_expr_str[4];
  60. AVExpr *comp_expr[4];
  61. int hsub, vsub;
  62. double var_values[VAR_VARS_NB];
  63. int is_rgb, is_yuv;
  64. int is_16bit;
  65. int step;
  66. int negate_alpha; /* only used by negate */
  67. } LutContext;
  68. #define Y 0
  69. #define U 1
  70. #define V 2
  71. #define R 0
  72. #define G 1
  73. #define B 2
  74. #define A 3
  75. #define OFFSET(x) offsetof(LutContext, x)
  76. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  77. static const AVOption options[] = {
  78. { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  79. { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  80. { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  81. { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  82. { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  83. { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  84. { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  85. { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  86. { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  87. { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  88. { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
  89. { NULL }
  90. };
  91. static av_cold void uninit(AVFilterContext *ctx)
  92. {
  93. LutContext *s = ctx->priv;
  94. int i;
  95. for (i = 0; i < 4; i++) {
  96. av_expr_free(s->comp_expr[i]);
  97. s->comp_expr[i] = NULL;
  98. av_freep(&s->comp_expr_str[i]);
  99. }
  100. }
  101. #define YUV_FORMATS \
  102. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
  103. AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
  104. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \
  105. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
  106. AV_PIX_FMT_YUVJ440P, \
  107. AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUV420P9LE, \
  108. AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV440P10LE, \
  109. AV_PIX_FMT_YUV444P12LE, AV_PIX_FMT_YUV422P12LE, AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV440P12LE, \
  110. AV_PIX_FMT_YUV444P14LE, AV_PIX_FMT_YUV422P14LE, AV_PIX_FMT_YUV420P14LE, \
  111. AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV420P16LE, \
  112. AV_PIX_FMT_YUVA444P16LE, AV_PIX_FMT_YUVA422P16LE, AV_PIX_FMT_YUVA420P16LE
  113. #define RGB_FORMATS \
  114. AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
  115. AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
  116. AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24
  117. static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE };
  118. static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE };
  119. static const enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, AV_PIX_FMT_NONE };
  120. static int query_formats(AVFilterContext *ctx)
  121. {
  122. LutContext *s = ctx->priv;
  123. const enum AVPixelFormat *pix_fmts = s->is_rgb ? rgb_pix_fmts :
  124. s->is_yuv ? yuv_pix_fmts :
  125. all_pix_fmts;
  126. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  127. if (!fmts_list)
  128. return AVERROR(ENOMEM);
  129. return ff_set_common_formats(ctx, fmts_list);
  130. }
  131. /**
  132. * Clip value val in the minval - maxval range.
  133. */
  134. static double clip(void *opaque, double val)
  135. {
  136. LutContext *s = opaque;
  137. double minval = s->var_values[VAR_MINVAL];
  138. double maxval = s->var_values[VAR_MAXVAL];
  139. return av_clip(val, minval, maxval);
  140. }
  141. /**
  142. * Compute gamma correction for value val, assuming the minval-maxval
  143. * range, val is clipped to a value contained in the same interval.
  144. */
  145. static double compute_gammaval(void *opaque, double gamma)
  146. {
  147. LutContext *s = opaque;
  148. double val = s->var_values[VAR_CLIPVAL];
  149. double minval = s->var_values[VAR_MINVAL];
  150. double maxval = s->var_values[VAR_MAXVAL];
  151. return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
  152. }
  153. /**
  154. * Compute ITU Rec.709 gamma correction of value val.
  155. */
  156. static double compute_gammaval709(void *opaque, double gamma)
  157. {
  158. LutContext *s = opaque;
  159. double val = s->var_values[VAR_CLIPVAL];
  160. double minval = s->var_values[VAR_MINVAL];
  161. double maxval = s->var_values[VAR_MAXVAL];
  162. double level = (val - minval) / (maxval - minval);
  163. level = level < 0.018 ? 4.5 * level
  164. : 1.099 * pow(level, 1.0 / gamma) - 0.099;
  165. return level * (maxval - minval) + minval;
  166. }
  167. static double (* const funcs1[])(void *, double) = {
  168. (void *)clip,
  169. (void *)compute_gammaval,
  170. (void *)compute_gammaval709,
  171. NULL
  172. };
  173. static const char * const funcs1_names[] = {
  174. "clip",
  175. "gammaval",
  176. "gammaval709",
  177. NULL
  178. };
  179. static int config_props(AVFilterLink *inlink)
  180. {
  181. AVFilterContext *ctx = inlink->dst;
  182. LutContext *s = ctx->priv;
  183. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  184. uint8_t rgba_map[4]; /* component index -> RGBA color index map */
  185. int min[4], max[4];
  186. int val, color, ret;
  187. s->hsub = desc->log2_chroma_w;
  188. s->vsub = desc->log2_chroma_h;
  189. s->var_values[VAR_W] = inlink->w;
  190. s->var_values[VAR_H] = inlink->h;
  191. s->is_16bit = desc->comp[0].depth_minus1 > 7;
  192. switch (inlink->format) {
  193. case AV_PIX_FMT_YUV410P:
  194. case AV_PIX_FMT_YUV411P:
  195. case AV_PIX_FMT_YUV420P:
  196. case AV_PIX_FMT_YUV422P:
  197. case AV_PIX_FMT_YUV440P:
  198. case AV_PIX_FMT_YUV444P:
  199. case AV_PIX_FMT_YUVA420P:
  200. case AV_PIX_FMT_YUVA422P:
  201. case AV_PIX_FMT_YUVA444P:
  202. case AV_PIX_FMT_YUV420P9LE:
  203. case AV_PIX_FMT_YUV422P9LE:
  204. case AV_PIX_FMT_YUV444P9LE:
  205. case AV_PIX_FMT_YUVA420P9LE:
  206. case AV_PIX_FMT_YUVA422P9LE:
  207. case AV_PIX_FMT_YUVA444P9LE:
  208. case AV_PIX_FMT_YUV420P10LE:
  209. case AV_PIX_FMT_YUV422P10LE:
  210. case AV_PIX_FMT_YUV440P10LE:
  211. case AV_PIX_FMT_YUV444P10LE:
  212. case AV_PIX_FMT_YUVA420P10LE:
  213. case AV_PIX_FMT_YUVA422P10LE:
  214. case AV_PIX_FMT_YUVA444P10LE:
  215. case AV_PIX_FMT_YUV420P12LE:
  216. case AV_PIX_FMT_YUV422P12LE:
  217. case AV_PIX_FMT_YUV440P12LE:
  218. case AV_PIX_FMT_YUV444P12LE:
  219. case AV_PIX_FMT_YUV420P14LE:
  220. case AV_PIX_FMT_YUV422P14LE:
  221. case AV_PIX_FMT_YUV444P14LE:
  222. case AV_PIX_FMT_YUV420P16LE:
  223. case AV_PIX_FMT_YUV422P16LE:
  224. case AV_PIX_FMT_YUV444P16LE:
  225. case AV_PIX_FMT_YUVA420P16LE:
  226. case AV_PIX_FMT_YUVA422P16LE:
  227. case AV_PIX_FMT_YUVA444P16LE:
  228. min[Y] = 16 * (1 << (desc->comp[0].depth_minus1 - 7));
  229. min[U] = 16 * (1 << (desc->comp[1].depth_minus1 - 7));
  230. min[V] = 16 * (1 << (desc->comp[2].depth_minus1 - 7));
  231. min[A] = 0;
  232. max[Y] = 235 * (1 << (desc->comp[0].depth_minus1 - 7));
  233. max[U] = 240 * (1 << (desc->comp[1].depth_minus1 - 7));
  234. max[V] = 240 * (1 << (desc->comp[2].depth_minus1 - 7));
  235. max[A] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
  236. break;
  237. default:
  238. min[0] = min[1] = min[2] = min[3] = 0;
  239. max[0] = max[1] = max[2] = max[3] = 255;
  240. }
  241. s->is_yuv = s->is_rgb = 0;
  242. if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1;
  243. else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;
  244. if (s->is_rgb) {
  245. ff_fill_rgba_map(rgba_map, inlink->format);
  246. s->step = av_get_bits_per_pixel(desc) >> 3;
  247. }
  248. for (color = 0; color < desc->nb_components; color++) {
  249. double res;
  250. int comp = s->is_rgb ? rgba_map[color] : color;
  251. /* create the parsed expression */
  252. av_expr_free(s->comp_expr[color]);
  253. s->comp_expr[color] = NULL;
  254. ret = av_expr_parse(&s->comp_expr[color], s->comp_expr_str[color],
  255. var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
  256. if (ret < 0) {
  257. av_log(ctx, AV_LOG_ERROR,
  258. "Error when parsing the expression '%s' for the component %d and color %d.\n",
  259. s->comp_expr_str[comp], comp, color);
  260. return AVERROR(EINVAL);
  261. }
  262. /* compute the lut */
  263. s->var_values[VAR_MAXVAL] = max[color];
  264. s->var_values[VAR_MINVAL] = min[color];
  265. for (val = 0; val < (1 << (desc->comp[0].depth_minus1 + 1)); val++) {
  266. s->var_values[VAR_VAL] = val;
  267. s->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
  268. s->var_values[VAR_NEGVAL] =
  269. av_clip(min[color] + max[color] - s->var_values[VAR_VAL],
  270. min[color], max[color]);
  271. res = av_expr_eval(s->comp_expr[color], s->var_values, s);
  272. if (isnan(res)) {
  273. av_log(ctx, AV_LOG_ERROR,
  274. "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
  275. s->comp_expr_str[color], val, comp);
  276. return AVERROR(EINVAL);
  277. }
  278. s->lut[comp][val] = av_clip((int)res, min[color], max[color]);
  279. av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, s->lut[comp][val]);
  280. }
  281. }
  282. return 0;
  283. }
  284. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  285. {
  286. AVFilterContext *ctx = inlink->dst;
  287. LutContext *s = ctx->priv;
  288. AVFilterLink *outlink = ctx->outputs[0];
  289. AVFrame *out;
  290. int i, j, plane, direct = 0;
  291. if (av_frame_is_writable(in)) {
  292. direct = 1;
  293. out = in;
  294. } else {
  295. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  296. if (!out) {
  297. av_frame_free(&in);
  298. return AVERROR(ENOMEM);
  299. }
  300. av_frame_copy_props(out, in);
  301. }
  302. if (s->is_rgb) {
  303. /* packed */
  304. uint8_t *inrow, *outrow, *inrow0, *outrow0;
  305. const int w = inlink->w;
  306. const int h = in->height;
  307. const uint16_t (*tab)[256*256] = (const uint16_t (*)[256*256])s->lut;
  308. const int in_linesize = in->linesize[0];
  309. const int out_linesize = out->linesize[0];
  310. const int step = s->step;
  311. inrow0 = in ->data[0];
  312. outrow0 = out->data[0];
  313. for (i = 0; i < h; i ++) {
  314. inrow = inrow0;
  315. outrow = outrow0;
  316. for (j = 0; j < w; j++) {
  317. switch (step) {
  318. case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
  319. case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
  320. case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
  321. default: outrow[0] = tab[0][inrow[0]];
  322. }
  323. outrow += step;
  324. inrow += step;
  325. }
  326. inrow0 += in_linesize;
  327. outrow0 += out_linesize;
  328. }
  329. } else if (s->is_16bit) {
  330. // planar yuv >8 bit depth
  331. uint16_t *inrow, *outrow;
  332. for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
  333. int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
  334. int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
  335. int h = FF_CEIL_RSHIFT(inlink->h, vsub);
  336. int w = FF_CEIL_RSHIFT(inlink->w, hsub);
  337. const uint16_t *tab = s->lut[plane];
  338. const int in_linesize = in->linesize[plane] / 2;
  339. const int out_linesize = out->linesize[plane] / 2;
  340. inrow = (uint16_t *)in ->data[plane];
  341. outrow = (uint16_t *)out->data[plane];
  342. for (i = 0; i < h; i++) {
  343. for (j = 0; j < w; j++) {
  344. #if HAVE_BIGENDIAN
  345. outrow[j] = av_bswap16(tab[av_bswap16(inrow[j])]);
  346. #else
  347. outrow[j] = tab[inrow[j]];
  348. #endif
  349. }
  350. inrow += in_linesize;
  351. outrow += out_linesize;
  352. }
  353. }
  354. } else {
  355. /* planar 8bit depth */
  356. uint8_t *inrow, *outrow;
  357. for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
  358. int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
  359. int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
  360. int h = FF_CEIL_RSHIFT(inlink->h, vsub);
  361. int w = FF_CEIL_RSHIFT(inlink->w, hsub);
  362. const uint16_t *tab = s->lut[plane];
  363. const int in_linesize = in->linesize[plane];
  364. const int out_linesize = out->linesize[plane];
  365. inrow = in ->data[plane];
  366. outrow = out->data[plane];
  367. for (i = 0; i < h; i++) {
  368. for (j = 0; j < w; j++)
  369. outrow[j] = tab[inrow[j]];
  370. inrow += in_linesize;
  371. outrow += out_linesize;
  372. }
  373. }
  374. }
  375. if (!direct)
  376. av_frame_free(&in);
  377. return ff_filter_frame(outlink, out);
  378. }
  379. static const AVFilterPad inputs[] = {
  380. { .name = "default",
  381. .type = AVMEDIA_TYPE_VIDEO,
  382. .filter_frame = filter_frame,
  383. .config_props = config_props,
  384. },
  385. { NULL }
  386. };
  387. static const AVFilterPad outputs[] = {
  388. { .name = "default",
  389. .type = AVMEDIA_TYPE_VIDEO,
  390. },
  391. { NULL }
  392. };
  393. #define DEFINE_LUT_FILTER(name_, description_) \
  394. AVFilter ff_vf_##name_ = { \
  395. .name = #name_, \
  396. .description = NULL_IF_CONFIG_SMALL(description_), \
  397. .priv_size = sizeof(LutContext), \
  398. .priv_class = &name_ ## _class, \
  399. .init = name_##_init, \
  400. .uninit = uninit, \
  401. .query_formats = query_formats, \
  402. .inputs = inputs, \
  403. .outputs = outputs, \
  404. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
  405. }
  406. #if CONFIG_LUT_FILTER
  407. #define lut_options options
  408. AVFILTER_DEFINE_CLASS(lut);
  409. static int lut_init(AVFilterContext *ctx)
  410. {
  411. return 0;
  412. }
  413. DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.");
  414. #endif
  415. #if CONFIG_LUTYUV_FILTER
  416. #define lutyuv_options options
  417. AVFILTER_DEFINE_CLASS(lutyuv);
  418. static av_cold int lutyuv_init(AVFilterContext *ctx)
  419. {
  420. LutContext *s = ctx->priv;
  421. s->is_yuv = 1;
  422. return 0;
  423. }
  424. DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.");
  425. #endif
  426. #if CONFIG_LUTRGB_FILTER
  427. #define lutrgb_options options
  428. AVFILTER_DEFINE_CLASS(lutrgb);
  429. static av_cold int lutrgb_init(AVFilterContext *ctx)
  430. {
  431. LutContext *s = ctx->priv;
  432. s->is_rgb = 1;
  433. return 0;
  434. }
  435. DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.");
  436. #endif
  437. #if CONFIG_NEGATE_FILTER
  438. static const AVOption negate_options[] = {
  439. { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
  440. { NULL }
  441. };
  442. AVFILTER_DEFINE_CLASS(negate);
  443. static av_cold int negate_init(AVFilterContext *ctx)
  444. {
  445. LutContext *s = ctx->priv;
  446. int i;
  447. av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", s->negate_alpha);
  448. for (i = 0; i < 4; i++) {
  449. s->comp_expr_str[i] = av_strdup((i == 3 && !s->negate_alpha) ?
  450. "val" : "negval");
  451. if (!s->comp_expr_str[i]) {
  452. uninit(ctx);
  453. return AVERROR(ENOMEM);
  454. }
  455. }
  456. return 0;
  457. }
  458. DEFINE_LUT_FILTER(negate, "Negate input video.");
  459. #endif