You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

355 lines
12KB

  1. /*
  2. * Copyright (c) 2013 Clément Bœsch
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h> /* DBL_MAX */
  21. #include "libavutil/opt.h"
  22. #include "libavutil/eval.h"
  23. #include "libavutil/avassert.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "video.h"
  29. static const char *const var_names[] = {
  30. "w", // stream width
  31. "h", // stream height
  32. "n", // frame count
  33. "pts", // presentation timestamp expressed in AV_TIME_BASE units
  34. "r", // frame rate
  35. "t", // timestamp expressed in seconds
  36. "tb", // timebase
  37. NULL
  38. };
  39. enum var_name {
  40. VAR_W,
  41. VAR_H,
  42. VAR_N,
  43. VAR_PTS,
  44. VAR_R,
  45. VAR_T,
  46. VAR_TB,
  47. VAR_NB
  48. };
  49. enum EvalMode {
  50. EVAL_MODE_INIT,
  51. EVAL_MODE_FRAME,
  52. EVAL_MODE_NB
  53. };
  54. typedef struct {
  55. const AVClass *class;
  56. const AVPixFmtDescriptor *desc;
  57. int backward;
  58. int eval_mode; ///< EvalMode
  59. #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name
  60. DEF_EXPR_FIELDS(angle);
  61. DEF_EXPR_FIELDS(x0);
  62. DEF_EXPR_FIELDS(y0);
  63. double var_values[VAR_NB];
  64. float *fmap;
  65. int fmap_linesize;
  66. double dmax;
  67. float xscale, yscale;
  68. uint32_t dither;
  69. int do_dither;
  70. AVRational aspect;
  71. AVRational scale;
  72. } VignetteContext;
  73. #define OFFSET(x) offsetof(VignetteContext, x)
  74. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  75. static const AVOption vignette_options[] = {
  76. { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
  77. { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
  78. { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
  79. { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
  80. { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
  81. { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
  82. { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
  83. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
  84. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  85. { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  86. { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
  87. { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS },
  88. { NULL }
  89. };
  90. AVFILTER_DEFINE_CLASS(vignette);
  91. static av_cold int init(AVFilterContext *ctx)
  92. {
  93. VignetteContext *s = ctx->priv;
  94. #define PARSE_EXPR(name) do { \
  95. int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
  96. NULL, NULL, NULL, NULL, 0, ctx); \
  97. if (ret < 0) { \
  98. av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
  99. AV_STRINGIFY(name) "'\n"); \
  100. return ret; \
  101. } \
  102. } while (0)
  103. PARSE_EXPR(angle);
  104. PARSE_EXPR(x0);
  105. PARSE_EXPR(y0);
  106. return 0;
  107. }
  108. static av_cold void uninit(AVFilterContext *ctx)
  109. {
  110. VignetteContext *s = ctx->priv;
  111. av_freep(&s->fmap);
  112. av_expr_free(s->angle_pexpr);
  113. av_expr_free(s->x0_pexpr);
  114. av_expr_free(s->y0_pexpr);
  115. }
  116. static int query_formats(AVFilterContext *ctx)
  117. {
  118. static const enum AVPixelFormat pix_fmts[] = {
  119. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
  120. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  121. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  122. AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
  123. AV_PIX_FMT_GRAY8,
  124. AV_PIX_FMT_NONE
  125. };
  126. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  127. if (!fmts_list)
  128. return AVERROR(ENOMEM);
  129. return ff_set_common_formats(ctx, fmts_list);
  130. }
  131. static double get_natural_factor(const VignetteContext *s, int x, int y)
  132. {
  133. const int xx = (x - s->x0) * s->xscale;
  134. const int yy = (y - s->y0) * s->yscale;
  135. const double dnorm = hypot(xx, yy) / s->dmax;
  136. if (dnorm > 1) {
  137. return 0;
  138. } else {
  139. const double c = cos(s->angle * dnorm);
  140. return (c*c)*(c*c); // do not remove braces, it helps compilers
  141. }
  142. }
  143. #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
  144. #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
  145. static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
  146. {
  147. int x, y;
  148. float *dst = s->fmap;
  149. int dst_linesize = s->fmap_linesize;
  150. if (frame) {
  151. s->var_values[VAR_N] = inlink->frame_count;
  152. s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
  153. s->var_values[VAR_PTS] = TS2D(frame->pts);
  154. } else {
  155. s->var_values[VAR_N] = 0;
  156. s->var_values[VAR_T] = NAN;
  157. s->var_values[VAR_PTS] = NAN;
  158. }
  159. s->angle = av_clipf(av_expr_eval(s->angle_pexpr, s->var_values, NULL), 0, M_PI_2);
  160. s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
  161. s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
  162. if (s->backward) {
  163. for (y = 0; y < inlink->h; y++) {
  164. for (x = 0; x < inlink->w; x++)
  165. dst[x] = 1. / get_natural_factor(s, x, y);
  166. dst += dst_linesize;
  167. }
  168. } else {
  169. for (y = 0; y < inlink->h; y++) {
  170. for (x = 0; x < inlink->w; x++)
  171. dst[x] = get_natural_factor(s, x, y);
  172. dst += dst_linesize;
  173. }
  174. }
  175. }
  176. static inline double get_dither_value(VignetteContext *s)
  177. {
  178. double dv = 0;
  179. if (s->do_dither) {
  180. dv = s->dither / (double)(1LL<<32);
  181. s->dither = s->dither * 1664525 + 1013904223;
  182. }
  183. return dv;
  184. }
  185. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  186. {
  187. unsigned x, y, direct = 0;
  188. AVFilterContext *ctx = inlink->dst;
  189. VignetteContext *s = ctx->priv;
  190. AVFilterLink *outlink = ctx->outputs[0];
  191. AVFrame *out;
  192. if (av_frame_is_writable(in)) {
  193. direct = 1;
  194. out = in;
  195. } else {
  196. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  197. if (!out) {
  198. av_frame_free(&in);
  199. return AVERROR(ENOMEM);
  200. }
  201. av_frame_copy_props(out, in);
  202. }
  203. if (s->eval_mode == EVAL_MODE_FRAME)
  204. update_context(s, inlink, in);
  205. if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
  206. uint8_t *dst = out->data[0];
  207. const uint8_t *src = in ->data[0];
  208. const float *fmap = s->fmap;
  209. const int dst_linesize = out->linesize[0];
  210. const int src_linesize = in ->linesize[0];
  211. const int fmap_linesize = s->fmap_linesize;
  212. for (y = 0; y < inlink->h; y++) {
  213. uint8_t *dstp = dst;
  214. const uint8_t *srcp = src;
  215. for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
  216. const float f = fmap[x];
  217. dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
  218. dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
  219. dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
  220. }
  221. dst += dst_linesize;
  222. src += src_linesize;
  223. fmap += fmap_linesize;
  224. }
  225. } else {
  226. int plane;
  227. for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
  228. uint8_t *dst = out->data[plane];
  229. const uint8_t *src = in ->data[plane];
  230. const float *fmap = s->fmap;
  231. const int dst_linesize = out->linesize[plane];
  232. const int src_linesize = in ->linesize[plane];
  233. const int fmap_linesize = s->fmap_linesize;
  234. const int chroma = plane == 1 || plane == 2;
  235. const int hsub = chroma ? s->desc->log2_chroma_w : 0;
  236. const int vsub = chroma ? s->desc->log2_chroma_h : 0;
  237. const int w = FF_CEIL_RSHIFT(inlink->w, hsub);
  238. const int h = FF_CEIL_RSHIFT(inlink->h, vsub);
  239. for (y = 0; y < h; y++) {
  240. uint8_t *dstp = dst;
  241. const uint8_t *srcp = src;
  242. for (x = 0; x < w; x++) {
  243. const double dv = get_dither_value(s);
  244. if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
  245. else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
  246. }
  247. dst += dst_linesize;
  248. src += src_linesize;
  249. fmap += fmap_linesize << vsub;
  250. }
  251. }
  252. }
  253. if (!direct)
  254. av_frame_free(&in);
  255. return ff_filter_frame(outlink, out);
  256. }
  257. static int config_props(AVFilterLink *inlink)
  258. {
  259. VignetteContext *s = inlink->dst->priv;
  260. AVRational sar = inlink->sample_aspect_ratio;
  261. s->desc = av_pix_fmt_desc_get(inlink->format);
  262. s->var_values[VAR_W] = inlink->w;
  263. s->var_values[VAR_H] = inlink->h;
  264. s->var_values[VAR_TB] = av_q2d(inlink->time_base);
  265. s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
  266. NAN : av_q2d(inlink->frame_rate);
  267. if (!sar.num || !sar.den)
  268. sar.num = sar.den = 1;
  269. if (sar.num > sar.den) {
  270. s->xscale = av_q2d(av_div_q(sar, s->aspect));
  271. s->yscale = 1;
  272. } else {
  273. s->yscale = av_q2d(av_div_q(s->aspect, sar));
  274. s->xscale = 1;
  275. }
  276. s->dmax = hypot(inlink->w / 2., inlink->h / 2.);
  277. av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n",
  278. s->xscale, s->yscale, s->dmax);
  279. s->fmap_linesize = FFALIGN(inlink->w, 32);
  280. s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap));
  281. if (!s->fmap)
  282. return AVERROR(ENOMEM);
  283. if (s->eval_mode == EVAL_MODE_INIT)
  284. update_context(s, inlink, NULL);
  285. return 0;
  286. }
  287. static const AVFilterPad vignette_inputs[] = {
  288. {
  289. .name = "default",
  290. .type = AVMEDIA_TYPE_VIDEO,
  291. .filter_frame = filter_frame,
  292. .config_props = config_props,
  293. },
  294. { NULL }
  295. };
  296. static const AVFilterPad vignette_outputs[] = {
  297. {
  298. .name = "default",
  299. .type = AVMEDIA_TYPE_VIDEO,
  300. },
  301. { NULL }
  302. };
  303. AVFilter ff_vf_vignette = {
  304. .name = "vignette",
  305. .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
  306. .priv_size = sizeof(VignetteContext),
  307. .init = init,
  308. .uninit = uninit,
  309. .query_formats = query_formats,
  310. .inputs = vignette_inputs,
  311. .outputs = vignette_outputs,
  312. .priv_class = &vignette_class,
  313. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  314. };