You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

524 lines
17KB

  1. /*
  2. * Copyright (c) 2015 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "float.h"
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/eval.h"
  23. #include "libavutil/intreadwrite.h"
  24. #include "libavutil/opt.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "video.h"
  29. typedef struct DrawGraphContext {
  30. const AVClass *class;
  31. char *key[4];
  32. float min, max;
  33. char *fg_str[4];
  34. AVExpr *fg_expr[4];
  35. uint8_t bg[4];
  36. int mode;
  37. int slide;
  38. int w, h;
  39. AVRational frame_rate;
  40. AVFrame *out;
  41. int x;
  42. int prev_y[4];
  43. int first[4];
  44. float *values[4];
  45. int values_size[4];
  46. int nb_values;
  47. int64_t prev_pts;
  48. } DrawGraphContext;
  49. #define OFFSET(x) offsetof(DrawGraphContext, x)
  50. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  51. static const AVOption drawgraph_options[] = {
  52. { "m1", "set 1st metadata key", OFFSET(key[0]), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, FLAGS },
  53. { "fg1", "set 1st foreground color expression", OFFSET(fg_str[0]), AV_OPT_TYPE_STRING, {.str="0xffff0000"}, 0, 0, FLAGS },
  54. { "m2", "set 2nd metadata key", OFFSET(key[1]), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, FLAGS },
  55. { "fg2", "set 2nd foreground color expression", OFFSET(fg_str[1]), AV_OPT_TYPE_STRING, {.str="0xff00ff00"}, 0, 0, FLAGS },
  56. { "m3", "set 3rd metadata key", OFFSET(key[2]), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, FLAGS },
  57. { "fg3", "set 3rd foreground color expression", OFFSET(fg_str[2]), AV_OPT_TYPE_STRING, {.str="0xffff00ff"}, 0, 0, FLAGS },
  58. { "m4", "set 4th metadata key", OFFSET(key[3]), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, FLAGS },
  59. { "fg4", "set 4th foreground color expression", OFFSET(fg_str[3]), AV_OPT_TYPE_STRING, {.str="0xffffff00"}, 0, 0, FLAGS },
  60. { "bg", "set background color", OFFSET(bg), AV_OPT_TYPE_COLOR, {.str="white"}, 0, 0, FLAGS },
  61. { "min", "set minimal value", OFFSET(min), AV_OPT_TYPE_FLOAT, {.dbl=-1.}, INT_MIN, INT_MAX, FLAGS },
  62. { "max", "set maximal value", OFFSET(max), AV_OPT_TYPE_FLOAT, {.dbl=1.}, INT_MIN, INT_MAX, FLAGS },
  63. { "mode", "set graph mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "mode" },
  64. {"bar", "draw bars", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode"},
  65. {"dot", "draw dots", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode"},
  66. {"line", "draw lines", OFFSET(mode), AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mode"},
  67. { "slide", "set slide mode", OFFSET(slide), AV_OPT_TYPE_INT, {.i64=0}, 0, 4, FLAGS, "slide" },
  68. {"frame", "draw new frames", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "slide"},
  69. {"replace", "replace old columns with new", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "slide"},
  70. {"scroll", "scroll from right to left", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "slide"},
  71. {"rscroll", "scroll from left to right", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "slide"},
  72. {"picture", "display graph in single frame", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "slide"},
  73. { "size", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS },
  74. { "s", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS },
  75. { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
  76. { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
  77. { NULL }
  78. };
  79. static const char *const var_names[] = { "MAX", "MIN", "VAL", NULL };
  80. enum { VAR_MAX, VAR_MIN, VAR_VAL, VAR_VARS_NB };
  81. static av_cold int init(AVFilterContext *ctx)
  82. {
  83. DrawGraphContext *s = ctx->priv;
  84. int ret, i;
  85. if (s->max <= s->min) {
  86. av_log(ctx, AV_LOG_ERROR, "max is same or lower than min\n");
  87. return AVERROR(EINVAL);
  88. }
  89. for (i = 0; i < 4; i++) {
  90. if (s->fg_str[i]) {
  91. ret = av_expr_parse(&s->fg_expr[i], s->fg_str[i], var_names,
  92. NULL, NULL, NULL, NULL, 0, ctx);
  93. if (ret < 0)
  94. return ret;
  95. }
  96. }
  97. s->first[0] = s->first[1] = s->first[2] = s->first[3] = 1;
  98. if (s->slide == 4) {
  99. s->values[0] = av_fast_realloc(NULL, &s->values_size[0], 2000);
  100. s->values[1] = av_fast_realloc(NULL, &s->values_size[1], 2000);
  101. s->values[2] = av_fast_realloc(NULL, &s->values_size[2], 2000);
  102. s->values[3] = av_fast_realloc(NULL, &s->values_size[3], 2000);
  103. if (!s->values[0] || !s->values[1] ||
  104. !s->values[2] || !s->values[3]) {
  105. return AVERROR(ENOMEM);
  106. }
  107. }
  108. return 0;
  109. }
  110. static int query_formats(AVFilterContext *ctx)
  111. {
  112. AVFilterLink *outlink = ctx->outputs[0];
  113. static const enum AVPixelFormat pix_fmts[] = {
  114. AV_PIX_FMT_RGBA,
  115. AV_PIX_FMT_NONE
  116. };
  117. int ret;
  118. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  119. if ((ret = ff_formats_ref(fmts_list, &outlink->incfg.formats)) < 0)
  120. return ret;
  121. return 0;
  122. }
  123. static void clear_image(DrawGraphContext *s, AVFrame *out, AVFilterLink *outlink)
  124. {
  125. int i, j;
  126. int bg = AV_RN32(s->bg);
  127. for (i = 0; i < out->height; i++)
  128. for (j = 0; j < out->width; j++)
  129. AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
  130. }
  131. static inline void draw_dot(int fg, int x, int y, AVFrame *out)
  132. {
  133. AV_WN32(out->data[0] + y * out->linesize[0] + x * 4, fg);
  134. }
  135. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  136. {
  137. AVFilterContext *ctx = inlink->dst;
  138. DrawGraphContext *s = ctx->priv;
  139. AVFilterLink *outlink = ctx->outputs[0];
  140. AVDictionary *metadata;
  141. AVDictionaryEntry *e;
  142. AVFrame *out = s->out;
  143. AVFrame *clone = NULL;
  144. int64_t in_pts, out_pts;
  145. int i;
  146. if (s->slide == 4 && s->nb_values >= s->values_size[0] / sizeof(float)) {
  147. float *ptr;
  148. ptr = av_fast_realloc(s->values[0], &s->values_size[0], s->values_size[0] * 2);
  149. if (!ptr)
  150. return AVERROR(ENOMEM);
  151. s->values[0] = ptr;
  152. ptr = av_fast_realloc(s->values[1], &s->values_size[1], s->values_size[1] * 2);
  153. if (!ptr)
  154. return AVERROR(ENOMEM);
  155. s->values[1] = ptr;
  156. ptr = av_fast_realloc(s->values[2], &s->values_size[2], s->values_size[2] * 2);
  157. if (!ptr)
  158. return AVERROR(ENOMEM);
  159. s->values[2] = ptr;
  160. ptr = av_fast_realloc(s->values[3], &s->values_size[3], s->values_size[3] * 2);
  161. if (!ptr)
  162. return AVERROR(ENOMEM);
  163. s->values[3] = ptr;
  164. }
  165. if (s->slide != 4 || s->nb_values == 0) {
  166. if (!s->out || s->out->width != outlink->w ||
  167. s->out->height != outlink->h) {
  168. av_frame_free(&s->out);
  169. s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  170. out = s->out;
  171. if (!s->out) {
  172. av_frame_free(&in);
  173. return AVERROR(ENOMEM);
  174. }
  175. clear_image(s, out, outlink);
  176. }
  177. av_frame_copy_props(out, in);
  178. }
  179. metadata = in->metadata;
  180. for (i = 0; i < 4; i++) {
  181. double values[VAR_VARS_NB];
  182. int j, y, x, old;
  183. uint32_t fg, bg;
  184. float vf;
  185. if (s->slide == 4)
  186. s->values[i][s->nb_values] = NAN;
  187. e = av_dict_get(metadata, s->key[i], NULL, 0);
  188. if (!e || !e->value)
  189. continue;
  190. if (av_sscanf(e->value, "%f", &vf) != 1)
  191. continue;
  192. vf = av_clipf(vf, s->min, s->max);
  193. if (s->slide == 4) {
  194. s->values[i][s->nb_values] = vf;
  195. continue;
  196. }
  197. values[VAR_MIN] = s->min;
  198. values[VAR_MAX] = s->max;
  199. values[VAR_VAL] = vf;
  200. fg = av_expr_eval(s->fg_expr[i], values, NULL);
  201. bg = AV_RN32(s->bg);
  202. if (i == 0 && (s->x >= outlink->w || s->slide == 3)) {
  203. if (s->slide == 0 || s->slide == 1)
  204. s->x = 0;
  205. if (s->slide == 2) {
  206. s->x = outlink->w - 1;
  207. for (j = 0; j < outlink->h; j++) {
  208. memmove(out->data[0] + j * out->linesize[0] ,
  209. out->data[0] + j * out->linesize[0] + 4,
  210. (outlink->w - 1) * 4);
  211. }
  212. } else if (s->slide == 3) {
  213. s->x = 0;
  214. for (j = 0; j < outlink->h; j++) {
  215. memmove(out->data[0] + j * out->linesize[0] + 4,
  216. out->data[0] + j * out->linesize[0],
  217. (outlink->w - 1) * 4);
  218. }
  219. } else if (s->slide == 0) {
  220. clear_image(s, out, outlink);
  221. }
  222. }
  223. x = s->x;
  224. y = (outlink->h - 1) * (1 - ((vf - s->min) / (s->max - s->min)));
  225. switch (s->mode) {
  226. case 0:
  227. if (i == 0 && (s->slide > 0))
  228. for (j = 0; j < outlink->h; j++)
  229. draw_dot(bg, x, j, out);
  230. old = AV_RN32(out->data[0] + y * out->linesize[0] + x * 4);
  231. for (j = y; j < outlink->h; j++) {
  232. if (old != bg &&
  233. (AV_RN32(out->data[0] + j * out->linesize[0] + x * 4) != old) ||
  234. AV_RN32(out->data[0] + FFMIN(j+1, outlink->h - 1) * out->linesize[0] + x * 4) != old) {
  235. draw_dot(fg, x, j, out);
  236. break;
  237. }
  238. draw_dot(fg, x, j, out);
  239. }
  240. break;
  241. case 1:
  242. if (i == 0 && (s->slide > 0))
  243. for (j = 0; j < outlink->h; j++)
  244. draw_dot(bg, x, j, out);
  245. draw_dot(fg, x, y, out);
  246. break;
  247. case 2:
  248. if (s->first[i]) {
  249. s->first[i] = 0;
  250. s->prev_y[i] = y;
  251. }
  252. if (i == 0 && (s->slide > 0)) {
  253. for (j = 0; j < y; j++)
  254. draw_dot(bg, x, j, out);
  255. for (j = outlink->h - 1; j > y; j--)
  256. draw_dot(bg, x, j, out);
  257. }
  258. if (y <= s->prev_y[i]) {
  259. for (j = y; j <= s->prev_y[i]; j++)
  260. draw_dot(fg, x, j, out);
  261. } else {
  262. for (j = s->prev_y[i]; j <= y; j++)
  263. draw_dot(fg, x, j, out);
  264. }
  265. s->prev_y[i] = y;
  266. break;
  267. }
  268. }
  269. s->nb_values++;
  270. s->x++;
  271. in_pts = in->pts;
  272. av_frame_free(&in);
  273. if (s->slide == 4)
  274. return 0;
  275. out_pts = av_rescale_q(in_pts, inlink->time_base, outlink->time_base);
  276. if (out_pts == s->prev_pts)
  277. return 0;
  278. clone = av_frame_clone(s->out);
  279. if (!clone)
  280. return AVERROR(ENOMEM);
  281. clone->pts = s->prev_pts = out_pts;
  282. return ff_filter_frame(outlink, clone);
  283. }
  284. static int request_frame(AVFilterLink *outlink)
  285. {
  286. AVFilterContext *ctx = outlink->src;
  287. DrawGraphContext *s = ctx->priv;
  288. AVFrame *out = s->out;
  289. int ret, i, k, step, l;
  290. ret = ff_request_frame(ctx->inputs[0]);
  291. if (s->slide == 4 && ret == AVERROR_EOF && s->nb_values > 0) {
  292. s->x = l = 0;
  293. step = ceil(s->nb_values / (float)s->w);
  294. for (k = 0; k < s->nb_values; k++) {
  295. for (i = 0; i < 4; i++) {
  296. double values[VAR_VARS_NB];
  297. int j, y, x, old;
  298. uint32_t fg, bg;
  299. float vf = s->values[i][k];
  300. if (isnan(vf))
  301. continue;
  302. values[VAR_MIN] = s->min;
  303. values[VAR_MAX] = s->max;
  304. values[VAR_VAL] = vf;
  305. fg = av_expr_eval(s->fg_expr[i], values, NULL);
  306. bg = AV_RN32(s->bg);
  307. x = s->x;
  308. y = (outlink->h - 1) * (1 - ((vf - s->min) / (s->max - s->min)));
  309. switch (s->mode) {
  310. case 0:
  311. old = AV_RN32(out->data[0] + y * out->linesize[0] + x * 4);
  312. for (j = y; j < outlink->h; j++) {
  313. if (old != bg &&
  314. (AV_RN32(out->data[0] + j * out->linesize[0] + x * 4) != old) ||
  315. AV_RN32(out->data[0] + FFMIN(j+1, outlink->h - 1) * out->linesize[0] + x * 4) != old) {
  316. draw_dot(fg, x, j, out);
  317. break;
  318. }
  319. draw_dot(fg, x, j, out);
  320. }
  321. break;
  322. case 1:
  323. draw_dot(fg, x, y, out);
  324. break;
  325. case 2:
  326. if (s->first[i]) {
  327. s->first[i] = 0;
  328. s->prev_y[i] = y;
  329. }
  330. if (y <= s->prev_y[i]) {
  331. for (j = y; j <= s->prev_y[i]; j++)
  332. draw_dot(fg, x, j, out);
  333. } else {
  334. for (j = s->prev_y[i]; j <= y; j++)
  335. draw_dot(fg, x, j, out);
  336. }
  337. s->prev_y[i] = y;
  338. break;
  339. }
  340. }
  341. l++;
  342. if (l >= step) {
  343. l = 0;
  344. s->x++;
  345. }
  346. }
  347. s->nb_values = 0;
  348. out->pts = 0;
  349. ret = ff_filter_frame(ctx->outputs[0], s->out);
  350. }
  351. return ret;
  352. }
  353. static int config_output(AVFilterLink *outlink)
  354. {
  355. DrawGraphContext *s = outlink->src->priv;
  356. outlink->w = s->w;
  357. outlink->h = s->h;
  358. outlink->sample_aspect_ratio = (AVRational){1,1};
  359. outlink->frame_rate = s->frame_rate;
  360. outlink->time_base = av_inv_q(outlink->frame_rate);
  361. s->prev_pts = AV_NOPTS_VALUE;
  362. return 0;
  363. }
  364. static av_cold void uninit(AVFilterContext *ctx)
  365. {
  366. DrawGraphContext *s = ctx->priv;
  367. int i;
  368. for (i = 0; i < 4; i++)
  369. av_expr_free(s->fg_expr[i]);
  370. if (s->slide != 4)
  371. av_frame_free(&s->out);
  372. av_freep(&s->values[0]);
  373. av_freep(&s->values[1]);
  374. av_freep(&s->values[2]);
  375. av_freep(&s->values[3]);
  376. }
  377. #if CONFIG_DRAWGRAPH_FILTER
  378. AVFILTER_DEFINE_CLASS(drawgraph);
  379. static const AVFilterPad drawgraph_inputs[] = {
  380. {
  381. .name = "default",
  382. .type = AVMEDIA_TYPE_VIDEO,
  383. .filter_frame = filter_frame,
  384. },
  385. { NULL }
  386. };
  387. static const AVFilterPad drawgraph_outputs[] = {
  388. {
  389. .name = "default",
  390. .type = AVMEDIA_TYPE_VIDEO,
  391. .config_props = config_output,
  392. .request_frame = request_frame,
  393. },
  394. { NULL }
  395. };
  396. AVFilter ff_vf_drawgraph = {
  397. .name = "drawgraph",
  398. .description = NULL_IF_CONFIG_SMALL("Draw a graph using input video metadata."),
  399. .priv_size = sizeof(DrawGraphContext),
  400. .priv_class = &drawgraph_class,
  401. .query_formats = query_formats,
  402. .init = init,
  403. .uninit = uninit,
  404. .inputs = drawgraph_inputs,
  405. .outputs = drawgraph_outputs,
  406. };
  407. #endif // CONFIG_DRAWGRAPH_FILTER
  408. #if CONFIG_ADRAWGRAPH_FILTER
  409. #define adrawgraph_options drawgraph_options
  410. AVFILTER_DEFINE_CLASS(adrawgraph);
  411. static const AVFilterPad adrawgraph_inputs[] = {
  412. {
  413. .name = "default",
  414. .type = AVMEDIA_TYPE_AUDIO,
  415. .filter_frame = filter_frame,
  416. },
  417. { NULL }
  418. };
  419. static const AVFilterPad adrawgraph_outputs[] = {
  420. {
  421. .name = "default",
  422. .type = AVMEDIA_TYPE_VIDEO,
  423. .config_props = config_output,
  424. .request_frame = request_frame,
  425. },
  426. { NULL }
  427. };
  428. AVFilter ff_avf_adrawgraph = {
  429. .name = "adrawgraph",
  430. .description = NULL_IF_CONFIG_SMALL("Draw a graph using input audio metadata."),
  431. .priv_size = sizeof(DrawGraphContext),
  432. .priv_class = &adrawgraph_class,
  433. .query_formats = query_formats,
  434. .init = init,
  435. .uninit = uninit,
  436. .inputs = adrawgraph_inputs,
  437. .outputs = adrawgraph_outputs,
  438. };
  439. #endif // CONFIG_ADRAWGRAPH_FILTER