You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

334 lines
11KB

  1. /*
  2. * Copyright (c) 2013 Clément Bœsch
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "avfilter.h"
  25. #include "formats.h"
  26. #include "internal.h"
  27. #include "video.h"
  28. static const char *const var_names[] = {
  29. "w", // stream width
  30. "h", // stream height
  31. "n", // frame count
  32. "pts", // presentation timestamp expressed in AV_TIME_BASE units
  33. "r", // frame rate
  34. "t", // timestamp expressed in seconds
  35. "tb", // timebase
  36. NULL
  37. };
  38. enum var_name {
  39. VAR_W,
  40. VAR_H,
  41. VAR_N,
  42. VAR_PTS,
  43. VAR_R,
  44. VAR_T,
  45. VAR_TB,
  46. VAR_NB
  47. };
  48. typedef struct {
  49. const AVClass *class;
  50. const AVPixFmtDescriptor *desc;
  51. int backward;
  52. enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode;
  53. #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name;
  54. DEF_EXPR_FIELDS(angle);
  55. DEF_EXPR_FIELDS(x0);
  56. DEF_EXPR_FIELDS(y0);
  57. double var_values[VAR_NB];
  58. float *fmap;
  59. int fmap_linesize;
  60. double dmax;
  61. float xscale, yscale;
  62. uint32_t dither;
  63. int do_dither;
  64. } VignetteContext;
  65. #define OFFSET(x) offsetof(VignetteContext, x)
  66. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  67. static const AVOption vignette_options[] = {
  68. { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
  69. { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS },
  70. { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS },
  71. { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS },
  72. { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" },
  73. { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"},
  74. { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"},
  75. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
  76. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  77. { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  78. { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS },
  79. { NULL }
  80. };
  81. AVFILTER_DEFINE_CLASS(vignette);
  82. static av_cold int init(AVFilterContext *ctx)
  83. {
  84. VignetteContext *s = ctx->priv;
  85. #define PARSE_EXPR(name) do { \
  86. int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \
  87. NULL, NULL, NULL, NULL, 0, ctx); \
  88. if (ret < 0) { \
  89. av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \
  90. AV_STRINGIFY(name) "'\n"); \
  91. return ret; \
  92. } \
  93. } while (0)
  94. PARSE_EXPR(angle);
  95. PARSE_EXPR(x0);
  96. PARSE_EXPR(y0);
  97. return 0;
  98. }
  99. static av_cold void uninit(AVFilterContext *ctx)
  100. {
  101. VignetteContext *s = ctx->priv;
  102. av_freep(&s->fmap);
  103. av_expr_free(s->angle_pexpr);
  104. av_expr_free(s->x0_pexpr);
  105. av_expr_free(s->y0_pexpr);
  106. }
  107. static int query_formats(AVFilterContext *ctx)
  108. {
  109. static const enum AVPixelFormat pix_fmts[] = {
  110. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
  111. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  112. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  113. AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
  114. AV_PIX_FMT_GRAY8,
  115. AV_PIX_FMT_NONE
  116. };
  117. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  118. return 0;
  119. }
  120. static double get_natural_factor(const VignetteContext *s, int x, int y)
  121. {
  122. const int xx = (x - s->x0) * s->xscale;
  123. const int yy = (y - s->y0) * s->yscale;
  124. const double dnorm = hypot(xx, yy) / s->dmax;
  125. if (dnorm > 1) {
  126. return 0;
  127. } else {
  128. const double c = cos(s->angle * dnorm);
  129. return (c*c)*(c*c); // do not remove braces, it helps compilers
  130. }
  131. }
  132. #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
  133. #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
  134. static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
  135. {
  136. int x, y;
  137. float *dst = s->fmap;
  138. int dst_linesize = s->fmap_linesize;
  139. if (frame) {
  140. s->var_values[VAR_N] = inlink->frame_count;
  141. s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base);
  142. s->var_values[VAR_PTS] = TS2D(frame->pts);
  143. } else {
  144. s->var_values[VAR_N] = 0;
  145. s->var_values[VAR_T] = NAN;
  146. s->var_values[VAR_PTS] = NAN;
  147. }
  148. s->angle = av_clipf(av_expr_eval(s->angle_pexpr, s->var_values, NULL), 0, M_PI_2);
  149. s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL);
  150. s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL);
  151. if (s->backward) {
  152. for (y = 0; y < inlink->h; y++) {
  153. for (x = 0; x < inlink->w; x++)
  154. dst[x] = 1. / get_natural_factor(s, x, y);
  155. dst += dst_linesize;
  156. }
  157. } else {
  158. for (y = 0; y < inlink->h; y++) {
  159. for (x = 0; x < inlink->w; x++)
  160. dst[x] = get_natural_factor(s, x, y);
  161. dst += dst_linesize;
  162. }
  163. }
  164. }
  165. static inline double get_dither_value(VignetteContext *s)
  166. {
  167. double dv = 0;
  168. if (s->do_dither) {
  169. dv = s->dither / (double)(1LL<<32);
  170. s->dither = s->dither * 1664525 + 1013904223;
  171. }
  172. return dv;
  173. }
  174. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  175. {
  176. unsigned x, y;
  177. AVFilterContext *ctx = inlink->dst;
  178. VignetteContext *s = ctx->priv;
  179. AVFilterLink *outlink = inlink->dst->outputs[0];
  180. AVFrame *out;
  181. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  182. if (!out) {
  183. av_frame_free(&in);
  184. return AVERROR(ENOMEM);
  185. }
  186. av_frame_copy_props(out, in);
  187. if (s->eval_mode == EVAL_MODE_FRAME)
  188. update_context(s, inlink, in);
  189. if (s->desc->flags & PIX_FMT_RGB) {
  190. uint8_t *dst = out->data[0];
  191. const uint8_t *src = in ->data[0];
  192. const float *fmap = s->fmap;
  193. const int dst_linesize = out->linesize[0];
  194. const int src_linesize = in ->linesize[0];
  195. const int fmap_linesize = s->fmap_linesize;
  196. for (y = 0; y < inlink->h; y++) {
  197. uint8_t *dstp = dst;
  198. const uint8_t *srcp = src;
  199. for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
  200. const float f = fmap[x];
  201. dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
  202. dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
  203. dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
  204. }
  205. dst += dst_linesize;
  206. src += src_linesize;
  207. fmap += fmap_linesize;
  208. }
  209. } else {
  210. int plane;
  211. for (plane = 0; plane < 4 && in->data[plane]; plane++) {
  212. uint8_t *dst = out->data[plane];
  213. const uint8_t *src = in ->data[plane];
  214. const float *fmap = s->fmap;
  215. const int dst_linesize = out->linesize[plane];
  216. const int src_linesize = in ->linesize[plane];
  217. const int fmap_linesize = s->fmap_linesize;
  218. const int chroma = plane == 1 || plane == 2;
  219. const int hsub = chroma ? s->desc->log2_chroma_w : 0;
  220. const int vsub = chroma ? s->desc->log2_chroma_h : 0;
  221. const int w = FF_CEIL_RSHIFT(inlink->w, hsub);
  222. const int h = FF_CEIL_RSHIFT(inlink->h, vsub);
  223. for (y = 0; y < h; y++) {
  224. uint8_t *dstp = dst;
  225. const uint8_t *srcp = src;
  226. for (x = 0; x < w; x++) {
  227. const double dv = get_dither_value(s);
  228. if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
  229. else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv);
  230. }
  231. dst += dst_linesize;
  232. src += src_linesize;
  233. fmap += fmap_linesize << vsub;
  234. }
  235. }
  236. }
  237. return ff_filter_frame(outlink, out);
  238. }
  239. static int config_props(AVFilterLink *inlink)
  240. {
  241. VignetteContext *s = inlink->dst->priv;
  242. AVRational sar = inlink->sample_aspect_ratio;
  243. s->desc = av_pix_fmt_desc_get(inlink->format);
  244. s->var_values[VAR_W] = inlink->w;
  245. s->var_values[VAR_H] = inlink->h;
  246. s->var_values[VAR_TB] = av_q2d(inlink->time_base);
  247. s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ?
  248. NAN : av_q2d(inlink->frame_rate);
  249. if (!sar.num || !sar.den)
  250. sar.num = sar.den = 1;
  251. if (sar.num > sar.den) {
  252. s->xscale = av_q2d(sar);
  253. s->yscale = 1;
  254. s->dmax = hypot(inlink->w / 2., s->yscale * inlink->h / 2.);
  255. } else {
  256. s->yscale = av_q2d(av_inv_q(sar));
  257. s->xscale = 1;
  258. s->dmax = hypot(s->xscale * inlink->w / 2., inlink->h / 2.);
  259. }
  260. s->fmap_linesize = FFALIGN(inlink->w, 32);
  261. s->fmap = av_malloc(s->fmap_linesize * inlink->h * sizeof(*s->fmap));
  262. if (!s->fmap)
  263. return AVERROR(ENOMEM);
  264. if (s->eval_mode == EVAL_MODE_INIT)
  265. update_context(s, inlink, NULL);
  266. return 0;
  267. }
  268. static const AVFilterPad vignette_inputs[] = {
  269. {
  270. .name = "default",
  271. .type = AVMEDIA_TYPE_VIDEO,
  272. .filter_frame = filter_frame,
  273. .config_props = config_props,
  274. },
  275. { NULL }
  276. };
  277. static const AVFilterPad vignette_outputs[] = {
  278. {
  279. .name = "default",
  280. .type = AVMEDIA_TYPE_VIDEO,
  281. },
  282. { NULL }
  283. };
  284. AVFilter avfilter_vf_vignette = {
  285. .name = "vignette",
  286. .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."),
  287. .priv_size = sizeof(VignetteContext),
  288. .init = init,
  289. .uninit = uninit,
  290. .query_formats = query_formats,
  291. .inputs = vignette_inputs,
  292. .outputs = vignette_outputs,
  293. .priv_class = &vignette_class,
  294. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  295. };