You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

348 lines
11KB

  1. /*
  2. * Copyright (c) 2015 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "float.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/intreadwrite.h"
  23. #include "libavutil/opt.h"
  24. #include "avfilter.h"
  25. #include "formats.h"
  26. #include "internal.h"
  27. #include "video.h"
  28. typedef struct DrawGraphContext {
  29. const AVClass *class;
  30. char *key[4];
  31. float min, max;
  32. char *fg_str[4];
  33. AVExpr *fg_expr[4];
  34. uint8_t bg[4];
  35. int mode;
  36. int slide;
  37. int w, h;
  38. AVFrame *out;
  39. int x;
  40. int prev_y[4];
  41. int first;
  42. } DrawGraphContext;
  43. #define OFFSET(x) offsetof(DrawGraphContext, x)
  44. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  45. static const AVOption drawgraph_options[] = {
  46. { "m1", "set 1st metadata key", OFFSET(key[0]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
  47. { "fg1", "set 1st foreground color expression", OFFSET(fg_str[0]), AV_OPT_TYPE_STRING, {.str="0xffff0000"}, CHAR_MIN, CHAR_MAX, FLAGS },
  48. { "m2", "set 2nd metadata key", OFFSET(key[1]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
  49. { "fg2", "set 2nd foreground color expression", OFFSET(fg_str[1]), AV_OPT_TYPE_STRING, {.str="0xff00ff00"}, CHAR_MIN, CHAR_MAX, FLAGS },
  50. { "m3", "set 3rd metadata key", OFFSET(key[2]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
  51. { "fg3", "set 3rd foreground color expression", OFFSET(fg_str[2]), AV_OPT_TYPE_STRING, {.str="0xffff00ff"}, CHAR_MIN, CHAR_MAX, FLAGS },
  52. { "m4", "set 4th metadata key", OFFSET(key[3]), AV_OPT_TYPE_STRING, {.str=""}, CHAR_MIN, CHAR_MAX, FLAGS },
  53. { "fg4", "set 4th foreground color expression", OFFSET(fg_str[3]), AV_OPT_TYPE_STRING, {.str="0xffffff00"}, CHAR_MIN, CHAR_MAX, FLAGS },
  54. { "bg", "set background color", OFFSET(bg), AV_OPT_TYPE_COLOR, {.str="white"}, CHAR_MIN, CHAR_MAX, FLAGS },
  55. { "min", "set minimal value", OFFSET(min), AV_OPT_TYPE_FLOAT, {.dbl=-1.}, INT_MIN, INT_MAX, FLAGS },
  56. { "max", "set maximal value", OFFSET(max), AV_OPT_TYPE_FLOAT, {.dbl=1.}, INT_MIN, INT_MAX, FLAGS },
  57. { "mode", "set graph mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "mode" },
  58. {"bar", "draw bars", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode"},
  59. {"dot", "draw dots", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode"},
  60. {"line", "draw lines", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mode"},
  61. { "slide", "set slide mode", OFFSET(slide), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "slide" },
  62. {"frame", "draw new frames", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "slide"},
  63. {"replace", "replace old columns with new", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "slide"},
  64. {"scroll", "scroll from right to left", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "slide"},
  65. { "size", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS },
  66. { "s", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS },
  67. { NULL }
  68. };
  69. static const char *const var_names[] = { "MAX", "MIN", "VAL", NULL };
  70. enum { VAR_MAX, VAR_MIN, VAR_VAL, VAR_VARS_NB };
  71. static av_cold int init(AVFilterContext *ctx)
  72. {
  73. DrawGraphContext *s = ctx->priv;
  74. int ret, i;
  75. if (s->max <= s->min) {
  76. av_log(ctx, AV_LOG_ERROR, "max is same or lower than min\n");
  77. return AVERROR(EINVAL);
  78. }
  79. for (i = 0; i < 4; i++) {
  80. if (s->fg_str[i]) {
  81. ret = av_expr_parse(&s->fg_expr[i], s->fg_str[i], var_names,
  82. NULL, NULL, NULL, NULL, 0, ctx);
  83. if (ret < 0)
  84. return ret;
  85. }
  86. }
  87. s->first = 1;
  88. return 0;
  89. }
  90. static int query_formats(AVFilterContext *ctx)
  91. {
  92. AVFilterLink *outlink = ctx->outputs[0];
  93. static const enum AVPixelFormat pix_fmts[] = {
  94. AV_PIX_FMT_RGBA,
  95. AV_PIX_FMT_NONE
  96. };
  97. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  98. if (!fmts_list)
  99. return AVERROR(ENOMEM);
  100. ff_formats_ref(fmts_list, &outlink->in_formats);
  101. return 0;
  102. }
  103. static void clear_image(DrawGraphContext *s, AVFrame *out, AVFilterLink *outlink)
  104. {
  105. int i, j;
  106. int bg = AV_RN32(s->bg);
  107. for (i = 0; i < out->height; i++)
  108. for (j = 0; j < out->width; j++)
  109. AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
  110. }
  111. static inline void draw_dot(int fg, int x, int y, AVFrame *out)
  112. {
  113. AV_WN32(out->data[0] + y * out->linesize[0] + x * 4, fg);
  114. }
  115. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  116. {
  117. AVFilterContext *ctx = inlink->dst;
  118. DrawGraphContext *s = ctx->priv;
  119. AVFilterLink *outlink = ctx->outputs[0];
  120. AVDictionary *metadata;
  121. AVDictionaryEntry *e;
  122. AVFrame *out = s->out;
  123. int i;
  124. if (!s->out || s->out->width != outlink->w ||
  125. s->out->height != outlink->h) {
  126. av_frame_free(&s->out);
  127. s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  128. out = s->out;
  129. if (!s->out) {
  130. av_frame_free(&in);
  131. return AVERROR(ENOMEM);
  132. }
  133. clear_image(s, out, outlink);
  134. }
  135. av_frame_copy_props(out, in);
  136. metadata = av_frame_get_metadata(in);
  137. for (i = 0; i < 4; i++) {
  138. double values[VAR_VARS_NB];
  139. int j, y, x, old;
  140. uint32_t fg, bg;
  141. float vf;
  142. e = av_dict_get(metadata, s->key[i], NULL, 0);
  143. if (!e || !e->value)
  144. continue;
  145. if (sscanf(e->value, "%f", &vf) != 1)
  146. continue;
  147. vf = av_clipf(vf, s->min, s->max);
  148. values[VAR_MIN] = s->min;
  149. values[VAR_MAX] = s->max;
  150. values[VAR_VAL] = vf;
  151. fg = av_expr_eval(s->fg_expr[i], values, NULL);
  152. bg = AV_RN32(s->bg);
  153. if (i == 0 && s->x >= outlink->w) {
  154. if (s->slide == 0 || s->slide == 1)
  155. s->x = 0;
  156. if (s->slide == 2) {
  157. s->x = outlink->w - 1;
  158. for (j = 0; j < outlink->h; j++) {
  159. memmove(out->data[0] + j * out->linesize[0] ,
  160. out->data[0] + j * out->linesize[0] + 4,
  161. (outlink->w - 1) * 4);
  162. }
  163. } else if (s->slide == 0) {
  164. clear_image(s, out, outlink);
  165. }
  166. }
  167. x = s->x;
  168. y = (outlink->h - 1) * (1 - ((vf - s->min) / (s->max - s->min)));
  169. switch (s->mode) {
  170. case 0:
  171. if (i == 0 && (s->slide == 1 || s->slide == 2))
  172. for (j = 0; j < outlink->h; j++)
  173. draw_dot(bg, x, j, out);
  174. old = AV_RN32(out->data[0] + y * out->linesize[0] + x * 4);
  175. for (j = y; j < outlink->h; j++) {
  176. if (old != bg &&
  177. (AV_RN32(out->data[0] + j * out->linesize[0] + x * 4) != old) ||
  178. AV_RN32(out->data[0] + FFMIN(j+1, outlink->h - 1) * out->linesize[0] + x * 4) != old) {
  179. draw_dot(fg, x, j, out);
  180. break;
  181. }
  182. draw_dot(fg, x, j, out);
  183. }
  184. break;
  185. case 1:
  186. if (i == 0 && (s->slide == 1 || s->slide == 2))
  187. for (j = 0; j < outlink->h; j++)
  188. draw_dot(bg, x, j, out);
  189. draw_dot(fg, x, y, out);
  190. break;
  191. case 2:
  192. if (s->first) {
  193. s->first = 0;
  194. s->prev_y[i] = y;
  195. }
  196. if (i == 0 && (s->slide == 1 || s->slide == 2)) {
  197. for (j = 0; j < y; j++)
  198. draw_dot(bg, x, j, out);
  199. for (j = outlink->h - 1; j > y; j--)
  200. draw_dot(bg, x, j, out);
  201. }
  202. if (y <= s->prev_y[i]) {
  203. for (j = y; j <= s->prev_y[i]; j++)
  204. draw_dot(fg, x, j, out);
  205. } else {
  206. for (j = s->prev_y[i]; j <= y; j++)
  207. draw_dot(fg, x, j, out);
  208. }
  209. s->prev_y[i] = y;
  210. break;
  211. }
  212. }
  213. s->x++;
  214. av_frame_free(&in);
  215. return ff_filter_frame(outlink, av_frame_clone(s->out));
  216. }
  217. static int config_output(AVFilterLink *outlink)
  218. {
  219. DrawGraphContext *s = outlink->src->priv;
  220. outlink->w = s->w;
  221. outlink->h = s->h;
  222. outlink->sample_aspect_ratio = (AVRational){1,1};
  223. return 0;
  224. }
  225. static av_cold void uninit(AVFilterContext *ctx)
  226. {
  227. DrawGraphContext *s = ctx->priv;
  228. int i;
  229. for (i = 0; i < 4; i++)
  230. av_expr_free(s->fg_expr[i]);
  231. av_frame_free(&s->out);
  232. }
  233. #if CONFIG_DRAWGRAPH_FILTER
  234. AVFILTER_DEFINE_CLASS(drawgraph);
  235. static const AVFilterPad drawgraph_inputs[] = {
  236. {
  237. .name = "default",
  238. .type = AVMEDIA_TYPE_VIDEO,
  239. .filter_frame = filter_frame,
  240. },
  241. { NULL }
  242. };
  243. static const AVFilterPad drawgraph_outputs[] = {
  244. {
  245. .name = "default",
  246. .type = AVMEDIA_TYPE_VIDEO,
  247. .config_props = config_output,
  248. },
  249. { NULL }
  250. };
  251. AVFilter ff_vf_drawgraph = {
  252. .name = "drawgraph",
  253. .description = NULL_IF_CONFIG_SMALL("Draw a graph using input video metadata."),
  254. .priv_size = sizeof(DrawGraphContext),
  255. .priv_class = &drawgraph_class,
  256. .query_formats = query_formats,
  257. .init = init,
  258. .uninit = uninit,
  259. .inputs = drawgraph_inputs,
  260. .outputs = drawgraph_outputs,
  261. };
  262. #endif // CONFIG_DRAWGRAPH_FILTER
  263. #if CONFIG_ADRAWGRAPH_FILTER
  264. #define adrawgraph_options drawgraph_options
  265. AVFILTER_DEFINE_CLASS(adrawgraph);
  266. static const AVFilterPad adrawgraph_inputs[] = {
  267. {
  268. .name = "default",
  269. .type = AVMEDIA_TYPE_AUDIO,
  270. .filter_frame = filter_frame,
  271. },
  272. { NULL }
  273. };
  274. static const AVFilterPad adrawgraph_outputs[] = {
  275. {
  276. .name = "default",
  277. .type = AVMEDIA_TYPE_VIDEO,
  278. .config_props = config_output,
  279. },
  280. { NULL }
  281. };
  282. AVFilter ff_avf_adrawgraph = {
  283. .name = "adrawgraph",
  284. .description = NULL_IF_CONFIG_SMALL("Draw a graph using input audio metadata."),
  285. .priv_size = sizeof(DrawGraphContext),
  286. .priv_class = &adrawgraph_class,
  287. .query_formats = query_formats,
  288. .init = init,
  289. .uninit = uninit,
  290. .inputs = adrawgraph_inputs,
  291. .outputs = adrawgraph_outputs,
  292. };
  293. #endif // CONFIG_ADRAWGRAPH_FILTER