You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

357 lines
12KB

  1. /*
  2. * Copyright (c) 2013 Clément Bœsch
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h> /* DBL_MAX */
  21. #include "libavutil/opt.h"
  22. #include "libavutil/eval.h"
  23. #include "libavutil/avassert.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "video.h"
  29. static const char *const var_names[] = {
  30. "w", // stream width
  31. "h", // stream height
  32. "n", // frame count
  33. "pts", // presentation timestamp expressed in AV_TIME_BASE units
  34. "r", // frame rate
  35. "t", // timestamp expressed in seconds
  36. "tb", // timebase
  37. NULL
  38. };
  39. enum var_name {
  40. VAR_W,
  41. VAR_H,
  42. VAR_N,
  43. VAR_PTS,
  44. VAR_R,
  45. VAR_T,
  46. VAR_TB,
  47. VAR_NB
  48. };
  49. enum EvalMode {
  50. EVAL_MODE_INIT,
  51. EVAL_MODE_FRAME,
  52. EVAL_MODE_NB
  53. };
  54. typedef struct VignetteContext {
  55. const AVClass *class;
  56. const AVPixFmtDescriptor *desc;
  57. int backward;
  58. int eval_mode; ///< EvalMode
  59. #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
  60. DEF_EXPR_FIELDS(angle);
  61. DEF_EXPR_FIELDS(x0);
  62. DEF_EXPR_FIELDS(y0);
  63. double var_values[VAR_NB];
  64. float *fmap;
  65. int fmap_linesize;
  66. double dmax;
  67. float xscale, yscale;
  68. uint32_t dither;
  69. int do_dither;
  70. AVRational aspect;
  71. AVRational scale;
  72. } VignetteContext;
  73. #define OFFSET(x) offsetof(VignetteContext, x)
  74. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  75. static const AVOption vignette_options[] = {
  76. { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
  77. { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
  78. { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
  79. { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
  80. { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
  81. { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
  82. { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
  83. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
  84. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  85. { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  86. { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
  87. { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
  88. { NULL }
  89. };
  90. AVFILTER_DEFINE_CLASS(vignette);
  91. static av_cold int init(AVFilterContext *ctx)
  92. {
  93. VignetteContext *s = ctx->priv;
  94. #define PARSE_EXPR(name) do { \
  95. int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
  96. NULL, NULL, NULL, NULL, 0, ctx); \
  97. if (ret < 0) { \
  98. av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
  99. AV_STRINGIFY(name) "'\n"); \
  100. return ret; \
  101. } \
  102. } while (0)
  103. PARSE_EXPR(angle);
  104. PARSE_EXPR(x0);
  105. PARSE_EXPR(y0);
  106. return 0;
  107. }
  108. static av_cold void uninit(AVFilterContext *ctx)
  109. {
  110. VignetteContext *s = ctx->priv;
  111. av_freep(&s->fmap);
  112. av_expr_free(s->angle_pexpr);
  113. av_expr_free(s->x0_pexpr);
  114. av_expr_free(s->y0_pexpr);
  115. }
  116. static int query_formats(AVFilterContext *ctx)
  117. {
  118. static const enum AVPixelFormat pix_fmts[] = {
  119. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
  120. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  121. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  122. AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
  123. AV_PIX_FMT_GRAY8,
  124. AV_PIX_FMT_NONE
  125. };
  126. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  127. if (!fmts_list)
  128. return AVERROR(ENOMEM);
  129. return ff_set_common_formats(ctx, fmts_list);
  130. }
  131. static double get_natural_factor(const VignetteContext *s, int x, int y)
  132. {
  133. const int xx = (x - s->x0) * s->xscale;
  134. const int yy = (y - s->y0) * s->yscale;
  135. const double dnorm = hypot(xx, yy) / s->dmax;
  136. if (dnorm > 1) {
  137. return 0;
  138. } else {
  139. const double c = cos(s->angle * dnorm);
  140. return (c*c)*(c*c); // do not remove braces, it helps compilers
  141. }
  142. }
  143. static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
  144. {
  145. int x, y;
  146. float *dst = s->fmap;
  147. int dst_linesize = s->fmap_linesize;
  148. if (frame) {
  149. s->var_values[VAR_N] = inlink->frame_count_out;
  150. s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
  151. s->var_values[VAR_PTS] = TS2D(frame->pts);
  152. } else {
  153. s->var_values[VAR_N] = NAN;
  154. s->var_values[VAR_T] = NAN;
  155. s->var_values[VAR_PTS] = NAN;
  156. }
  157. s->angle = av_expr_eval(s->angle_pexpr, s->var_values, NULL);
  158. s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
  159. s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
  160. if (isnan(s->x0) || isnan(s->y0) || isnan(s->angle))
  161. s->eval_mode = EVAL_MODE_FRAME;
  162. s->angle = av_clipf(s->angle, 0, M_PI_2);
  163. if (s->backward) {
  164. for (y = 0; y < inlink->h; y++) {
  165. for (x = 0; x < inlink->w; x++)
  166. dst[x] = 1. / get_natural_factor(s, x, y);
  167. dst += dst_linesize;
  168. }
  169. } else {
  170. for (y = 0; y < inlink->h; y++) {
  171. for (x = 0; x < inlink->w; x++)
  172. dst[x] = get_natural_factor(s, x, y);
  173. dst += dst_linesize;
  174. }
  175. }
  176. }
  177. static inline double get_dither_value(VignetteContext *s)
  178. {
  179. double dv = 0;
  180. if (s->do_dither) {
  181. dv = s->dither / (double)(1LL<<32);
  182. s->dither = s->dither * 1664525 + 1013904223;
  183. }
  184. return dv;
  185. }
  186. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  187. {
  188. unsigned x, y, direct = 0;
  189. AVFilterContext *ctx = inlink->dst;
  190. VignetteContext *s = ctx->priv;
  191. AVFilterLink *outlink = ctx->outputs[0];
  192. AVFrame *out;
  193. if (av_frame_is_writable(in)) {
  194. direct = 1;
  195. out = in;
  196. } else {
  197. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  198. if (!out) {
  199. av_frame_free(&in);
  200. return AVERROR(ENOMEM);
  201. }
  202. av_frame_copy_props(out, in);
  203. }
  204. if (s->eval_mode == EVAL_MODE_FRAME)
  205. update_context(s, inlink, in);
  206. if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
  207. uint8_t *dst = out->data[0];
  208. const uint8_t *src = in ->data[0];
  209. const float *fmap = s->fmap;
  210. const int dst_linesize = out->linesize[0];
  211. const int src_linesize = in ->linesize[0];
  212. const int fmap_linesize = s->fmap_linesize;
  213. for (y = 0; y < inlink->h; y++) {
  214. uint8_t *dstp = dst;
  215. const uint8_t *srcp = src;
  216. for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
  217. const float f = fmap[x];
  218. dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
  219. dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
  220. dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
  221. }
  222. dst += dst_linesize;
  223. src += src_linesize;
  224. fmap += fmap_linesize;
  225. }
  226. } else {
  227. int plane;
  228. for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
  229. uint8_t *dst = out->data[plane];
  230. const uint8_t *src = in ->data[plane];
  231. const float *fmap = s->fmap;
  232. const int dst_linesize = out->linesize[plane];
  233. const int src_linesize = in ->linesize[plane];
  234. const int fmap_linesize = s->fmap_linesize;
  235. const int chroma = plane == 1 || plane == 2;
  236. const int hsub = chroma ? s->desc->log2_chroma_w : 0;
  237. const int vsub = chroma ? s->desc->log2_chroma_h : 0;
  238. const int w = AV_CEIL_RSHIFT(inlink->w, hsub);
  239. const int h = AV_CEIL_RSHIFT(inlink->h, vsub);
  240. for (y = 0; y < h; y++) {
  241. uint8_t *dstp = dst;
  242. const uint8_t *srcp = src;
  243. for (x = 0; x < w; x++) {
  244. const double dv = get_dither_value(s);
  245. if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
  246. else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
  247. }
  248. dst += dst_linesize;
  249. src += src_linesize;
  250. fmap += fmap_linesize << vsub;
  251. }
  252. }
  253. }
  254. if (!direct)
  255. av_frame_free(&in);
  256. return ff_filter_frame(outlink, out);
  257. }
  258. static int config_props(AVFilterLink *inlink)
  259. {
  260. VignetteContext *s = inlink->dst->priv;
  261. AVRational sar = inlink->sample_aspect_ratio;
  262. s->desc = av_pix_fmt_desc_get(inlink->format);
  263. s->var_values[VAR_W] = inlink->w;
  264. s->var_values[VAR_H] = inlink->h;
  265. s->var_values[VAR_TB] = av_q2d(inlink->time_base);
  266. s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
  267. NAN : av_q2d(inlink->frame_rate);
  268. if (!sar.num || !sar.den)
  269. sar.num = sar.den = 1;
  270. if (sar.num > sar.den) {
  271. s->xscale = av_q2d(av_div_q(sar, s->aspect));
  272. s->yscale = 1;
  273. } else {
  274. s->yscale = av_q2d(av_div_q(s->aspect, sar));
  275. s->xscale = 1;
  276. }
  277. s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
  278. av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
  279. s->xscale, s->yscale, s->dmax);
  280. s->fmap_linesize = FFALIGN(inlink->w, 32);
  281. s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap));
  282. if (!s->fmap)
  283. return AVERROR(ENOMEM);
  284. if (s->eval_mode == EVAL_MODE_INIT)
  285. update_context(s, inlink, NULL);
  286. return 0;
  287. }
  288. static const AVFilterPad vignette_inputs[] = {
  289. {
  290. .name = "default",
  291. .type = AVMEDIA_TYPE_VIDEO,
  292. .filter_frame = filter_frame,
  293. .config_props = config_props,
  294. },
  295. { NULL }
  296. };
  297. static const AVFilterPad vignette_outputs[] = {
  298. {
  299. .name = "default",
  300. .type = AVMEDIA_TYPE_VIDEO,
  301. },
  302. { NULL }
  303. };
  304. AVFilter ff_vf_vignette = {
  305. .name = "vignette",
  306. .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
  307. .priv_size = sizeof(VignetteContext),
  308. .init = init,
  309. .uninit = uninit,
  310. .query_formats = query_formats,
  311. .inputs = vignette_inputs,
  312. .outputs = vignette_outputs,
  313. .priv_class = &vignette_class,
  314. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  315. };