You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

906 lines
31KB

  1. /*
  2. * Copyright (c) 2012 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * audio to video multimedia filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/parseutils.h"
  29. #include "avfilter.h"
  30. #include "filters.h"
  31. #include "formats.h"
  32. #include "audio.h"
  33. #include "video.h"
  34. #include "internal.h"
  35. enum ShowWavesMode {
  36. MODE_POINT,
  37. MODE_LINE,
  38. MODE_P2P,
  39. MODE_CENTERED_LINE,
  40. MODE_NB,
  41. };
  42. enum ShowWavesScale {
  43. SCALE_LIN,
  44. SCALE_LOG,
  45. SCALE_SQRT,
  46. SCALE_CBRT,
  47. SCALE_NB,
  48. };
  49. enum ShowWavesDrawMode {
  50. DRAW_SCALE,
  51. DRAW_FULL,
  52. DRAW_NB,
  53. };
  54. enum ShowWavesFilterMode {
  55. FILTER_AVERAGE,
  56. FILTER_PEAK,
  57. FILTER_NB,
  58. };
  59. struct frame_node {
  60. AVFrame *frame;
  61. struct frame_node *next;
  62. };
  63. typedef struct ShowWavesContext {
  64. const AVClass *class;
  65. int w, h;
  66. AVRational rate;
  67. char *colors;
  68. int buf_idx;
  69. int16_t *buf_idy; /* y coordinate of previous sample for each channel */
  70. AVFrame *outpicref;
  71. int n;
  72. int pixstep;
  73. int sample_count_mod;
  74. int mode; ///< ShowWavesMode
  75. int scale; ///< ShowWavesScale
  76. int draw_mode; ///< ShowWavesDrawMode
  77. int split_channels;
  78. int filter_mode;
  79. uint8_t *fg;
  80. int (*get_h)(int16_t sample, int height);
  81. void (*draw_sample)(uint8_t *buf, int height, int linesize,
  82. int16_t *prev_y, const uint8_t color[4], int h);
  83. /* single picture */
  84. int single_pic;
  85. struct frame_node *audio_frames;
  86. struct frame_node *last_frame;
  87. int64_t total_samples;
  88. int64_t *sum; /* abs sum of the samples per channel */
  89. } ShowWavesContext;
  90. #define OFFSET(x) offsetof(ShowWavesContext, x)
  91. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  92. static const AVOption showwaves_options[] = {
  93. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  94. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  95. { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  96. { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
  97. { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
  98. { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
  99. { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
  100. { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  101. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
  102. { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
  103. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  104. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  105. { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
  106. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
  107. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
  108. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
  109. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
  110. { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
  111. { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
  112. { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
  113. { NULL }
  114. };
  115. AVFILTER_DEFINE_CLASS(showwaves);
  116. static av_cold void uninit(AVFilterContext *ctx)
  117. {
  118. ShowWavesContext *showwaves = ctx->priv;
  119. av_frame_free(&showwaves->outpicref);
  120. av_freep(&showwaves->buf_idy);
  121. av_freep(&showwaves->fg);
  122. if (showwaves->single_pic) {
  123. struct frame_node *node = showwaves->audio_frames;
  124. while (node) {
  125. struct frame_node *tmp = node;
  126. node = node->next;
  127. av_frame_free(&tmp->frame);
  128. av_freep(&tmp);
  129. }
  130. av_freep(&showwaves->sum);
  131. showwaves->last_frame = NULL;
  132. }
  133. }
  134. static int query_formats(AVFilterContext *ctx)
  135. {
  136. AVFilterFormats *formats = NULL;
  137. AVFilterChannelLayouts *layouts = NULL;
  138. AVFilterLink *inlink = ctx->inputs[0];
  139. AVFilterLink *outlink = ctx->outputs[0];
  140. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  141. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  142. int ret;
  143. /* set input audio formats */
  144. formats = ff_make_format_list(sample_fmts);
  145. if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
  146. return ret;
  147. layouts = ff_all_channel_layouts();
  148. if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
  149. return ret;
  150. formats = ff_all_samplerates();
  151. if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
  152. return ret;
  153. /* set output video format */
  154. formats = ff_make_format_list(pix_fmts);
  155. if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
  156. return ret;
  157. return 0;
  158. }
  159. static int get_lin_h(int16_t sample, int height)
  160. {
  161. return height/2 - av_rescale(sample, height/2, INT16_MAX);
  162. }
  163. static int get_lin_h2(int16_t sample, int height)
  164. {
  165. return av_rescale(FFABS(sample), height, INT16_MAX);
  166. }
  167. static int get_log_h(int16_t sample, int height)
  168. {
  169. return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
  170. }
  171. static int get_log_h2(int16_t sample, int height)
  172. {
  173. return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
  174. }
  175. static int get_sqrt_h(int16_t sample, int height)
  176. {
  177. return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
  178. }
  179. static int get_sqrt_h2(int16_t sample, int height)
  180. {
  181. return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
  182. }
  183. static int get_cbrt_h(int16_t sample, int height)
  184. {
  185. return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
  186. }
  187. static int get_cbrt_h2(int16_t sample, int height)
  188. {
  189. return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
  190. }
  191. static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
  192. int16_t *prev_y,
  193. const uint8_t color[4], int h)
  194. {
  195. if (h >= 0 && h < height) {
  196. buf[h * linesize + 0] += color[0];
  197. buf[h * linesize + 1] += color[1];
  198. buf[h * linesize + 2] += color[2];
  199. buf[h * linesize + 3] += color[3];
  200. }
  201. }
  202. static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
  203. int16_t *prev_y,
  204. const uint8_t color[4], int h)
  205. {
  206. if (h >= 0 && h < height) {
  207. buf[h * linesize + 0] = color[0];
  208. buf[h * linesize + 1] = color[1];
  209. buf[h * linesize + 2] = color[2];
  210. buf[h * linesize + 3] = color[3];
  211. }
  212. }
  213. static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
  214. int16_t *prev_y,
  215. const uint8_t color[4], int h)
  216. {
  217. int k;
  218. int start = height/2;
  219. int end = av_clip(h, 0, height-1);
  220. if (start > end)
  221. FFSWAP(int16_t, start, end);
  222. for (k = start; k < end; k++) {
  223. buf[k * linesize + 0] += color[0];
  224. buf[k * linesize + 1] += color[1];
  225. buf[k * linesize + 2] += color[2];
  226. buf[k * linesize + 3] += color[3];
  227. }
  228. }
  229. static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
  230. int16_t *prev_y,
  231. const uint8_t color[4], int h)
  232. {
  233. int k;
  234. int start = height/2;
  235. int end = av_clip(h, 0, height-1);
  236. if (start > end)
  237. FFSWAP(int16_t, start, end);
  238. for (k = start; k < end; k++) {
  239. buf[k * linesize + 0] = color[0];
  240. buf[k * linesize + 1] = color[1];
  241. buf[k * linesize + 2] = color[2];
  242. buf[k * linesize + 3] = color[3];
  243. }
  244. }
  245. static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
  246. int16_t *prev_y,
  247. const uint8_t color[4], int h)
  248. {
  249. int k;
  250. if (h >= 0 && h < height) {
  251. buf[h * linesize + 0] += color[0];
  252. buf[h * linesize + 1] += color[1];
  253. buf[h * linesize + 2] += color[2];
  254. buf[h * linesize + 3] += color[3];
  255. if (*prev_y && h != *prev_y) {
  256. int start = *prev_y;
  257. int end = av_clip(h, 0, height-1);
  258. if (start > end)
  259. FFSWAP(int16_t, start, end);
  260. for (k = start + 1; k < end; k++) {
  261. buf[k * linesize + 0] += color[0];
  262. buf[k * linesize + 1] += color[1];
  263. buf[k * linesize + 2] += color[2];
  264. buf[k * linesize + 3] += color[3];
  265. }
  266. }
  267. }
  268. *prev_y = h;
  269. }
  270. static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
  271. int16_t *prev_y,
  272. const uint8_t color[4], int h)
  273. {
  274. int k;
  275. if (h >= 0 && h < height) {
  276. buf[h * linesize + 0] = color[0];
  277. buf[h * linesize + 1] = color[1];
  278. buf[h * linesize + 2] = color[2];
  279. buf[h * linesize + 3] = color[3];
  280. if (*prev_y && h != *prev_y) {
  281. int start = *prev_y;
  282. int end = av_clip(h, 0, height-1);
  283. if (start > end)
  284. FFSWAP(int16_t, start, end);
  285. for (k = start + 1; k < end; k++) {
  286. buf[k * linesize + 0] = color[0];
  287. buf[k * linesize + 1] = color[1];
  288. buf[k * linesize + 2] = color[2];
  289. buf[k * linesize + 3] = color[3];
  290. }
  291. }
  292. }
  293. *prev_y = h;
  294. }
  295. static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
  296. int16_t *prev_y,
  297. const uint8_t color[4], int h)
  298. {
  299. int k;
  300. const int start = (height - h) / 2;
  301. const int end = start + h;
  302. for (k = start; k < end; k++) {
  303. buf[k * linesize + 0] += color[0];
  304. buf[k * linesize + 1] += color[1];
  305. buf[k * linesize + 2] += color[2];
  306. buf[k * linesize + 3] += color[3];
  307. }
  308. }
  309. static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
  310. int16_t *prev_y,
  311. const uint8_t color[4], int h)
  312. {
  313. int k;
  314. const int start = (height - h) / 2;
  315. const int end = start + h;
  316. for (k = start; k < end; k++) {
  317. buf[k * linesize + 0] = color[0];
  318. buf[k * linesize + 1] = color[1];
  319. buf[k * linesize + 2] = color[2];
  320. buf[k * linesize + 3] = color[3];
  321. }
  322. }
  323. static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
  324. int16_t *prev_y,
  325. const uint8_t color[4], int h)
  326. {
  327. if (h >= 0 && h < height)
  328. buf[h * linesize] += color[0];
  329. }
  330. static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
  331. int16_t *prev_y,
  332. const uint8_t color[4], int h)
  333. {
  334. int k;
  335. int start = height/2;
  336. int end = av_clip(h, 0, height-1);
  337. if (start > end)
  338. FFSWAP(int16_t, start, end);
  339. for (k = start; k < end; k++)
  340. buf[k * linesize] += color[0];
  341. }
  342. static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
  343. int16_t *prev_y,
  344. const uint8_t color[4], int h)
  345. {
  346. int k;
  347. if (h >= 0 && h < height) {
  348. buf[h * linesize] += color[0];
  349. if (*prev_y && h != *prev_y) {
  350. int start = *prev_y;
  351. int end = av_clip(h, 0, height-1);
  352. if (start > end)
  353. FFSWAP(int16_t, start, end);
  354. for (k = start + 1; k < end; k++)
  355. buf[k * linesize] += color[0];
  356. }
  357. }
  358. *prev_y = h;
  359. }
  360. static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
  361. int16_t *prev_y,
  362. const uint8_t color[4], int h)
  363. {
  364. int k;
  365. const int start = (height - h) / 2;
  366. const int end = start + h;
  367. for (k = start; k < end; k++)
  368. buf[k * linesize] += color[0];
  369. }
  370. static int config_output(AVFilterLink *outlink)
  371. {
  372. AVFilterContext *ctx = outlink->src;
  373. AVFilterLink *inlink = ctx->inputs[0];
  374. ShowWavesContext *showwaves = ctx->priv;
  375. int nb_channels = inlink->channels;
  376. char *colors, *saveptr = NULL;
  377. uint8_t x;
  378. int ch;
  379. if (showwaves->single_pic)
  380. showwaves->n = 1;
  381. if (!showwaves->n)
  382. showwaves->n = FFMAX(1, av_rescale_q(inlink->sample_rate, av_make_q(1, showwaves->w), showwaves->rate));
  383. showwaves->buf_idx = 0;
  384. if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  385. av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  386. return AVERROR(ENOMEM);
  387. }
  388. outlink->w = showwaves->w;
  389. outlink->h = showwaves->h;
  390. outlink->sample_aspect_ratio = (AVRational){1,1};
  391. outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  392. (AVRational){showwaves->w,1});
  393. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  394. showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  395. switch (outlink->format) {
  396. case AV_PIX_FMT_GRAY8:
  397. switch (showwaves->mode) {
  398. case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
  399. case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
  400. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
  401. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
  402. default:
  403. return AVERROR_BUG;
  404. }
  405. showwaves->pixstep = 1;
  406. break;
  407. case AV_PIX_FMT_RGBA:
  408. switch (showwaves->mode) {
  409. case MODE_POINT: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_point_rgba_scale : draw_sample_point_rgba_full; break;
  410. case MODE_LINE: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_line_rgba_scale : draw_sample_line_rgba_full; break;
  411. case MODE_P2P: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_p2p_rgba_scale : draw_sample_p2p_rgba_full; break;
  412. case MODE_CENTERED_LINE: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_cline_rgba_scale : draw_sample_cline_rgba_full; break;
  413. default:
  414. return AVERROR_BUG;
  415. }
  416. showwaves->pixstep = 4;
  417. break;
  418. }
  419. switch (showwaves->scale) {
  420. case SCALE_LIN:
  421. switch (showwaves->mode) {
  422. case MODE_POINT:
  423. case MODE_LINE:
  424. case MODE_P2P: showwaves->get_h = get_lin_h; break;
  425. case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
  426. default:
  427. return AVERROR_BUG;
  428. }
  429. break;
  430. case SCALE_LOG:
  431. switch (showwaves->mode) {
  432. case MODE_POINT:
  433. case MODE_LINE:
  434. case MODE_P2P: showwaves->get_h = get_log_h; break;
  435. case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
  436. default:
  437. return AVERROR_BUG;
  438. }
  439. break;
  440. case SCALE_SQRT:
  441. switch (showwaves->mode) {
  442. case MODE_POINT:
  443. case MODE_LINE:
  444. case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
  445. case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
  446. default:
  447. return AVERROR_BUG;
  448. }
  449. break;
  450. case SCALE_CBRT:
  451. switch (showwaves->mode) {
  452. case MODE_POINT:
  453. case MODE_LINE:
  454. case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
  455. case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
  456. default:
  457. return AVERROR_BUG;
  458. }
  459. break;
  460. }
  461. showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
  462. if (!showwaves->fg)
  463. return AVERROR(ENOMEM);
  464. colors = av_strdup(showwaves->colors);
  465. if (!colors)
  466. return AVERROR(ENOMEM);
  467. if (showwaves->draw_mode == DRAW_SCALE) {
  468. /* multiplication factor, pre-computed to avoid in-loop divisions */
  469. x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
  470. } else {
  471. x = 255;
  472. }
  473. if (outlink->format == AV_PIX_FMT_RGBA) {
  474. uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
  475. for (ch = 0; ch < nb_channels; ch++) {
  476. char *color;
  477. color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
  478. if (color)
  479. av_parse_color(fg, color, -1, ctx);
  480. showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
  481. showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
  482. showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
  483. showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
  484. }
  485. } else {
  486. for (ch = 0; ch < nb_channels; ch++)
  487. showwaves->fg[4 * ch + 0] = x;
  488. }
  489. av_free(colors);
  490. return 0;
  491. }
  492. inline static int push_frame(AVFilterLink *outlink)
  493. {
  494. AVFilterContext *ctx = outlink->src;
  495. AVFilterLink *inlink = ctx->inputs[0];
  496. ShowWavesContext *showwaves = outlink->src->priv;
  497. int nb_channels = inlink->channels;
  498. int ret, i;
  499. ret = ff_filter_frame(outlink, showwaves->outpicref);
  500. showwaves->outpicref = NULL;
  501. showwaves->buf_idx = 0;
  502. for (i = 0; i < nb_channels; i++)
  503. showwaves->buf_idy[i] = 0;
  504. return ret;
  505. }
  506. static int push_single_pic(AVFilterLink *outlink)
  507. {
  508. AVFilterContext *ctx = outlink->src;
  509. AVFilterLink *inlink = ctx->inputs[0];
  510. ShowWavesContext *showwaves = ctx->priv;
  511. int64_t n = 0, column_max_samples = showwaves->total_samples / outlink->w;
  512. int64_t remaining_samples = showwaves->total_samples - (column_max_samples * outlink->w);
  513. int64_t last_column_samples = column_max_samples + remaining_samples;
  514. AVFrame *out = showwaves->outpicref;
  515. struct frame_node *node;
  516. const int nb_channels = inlink->channels;
  517. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  518. const int linesize = out->linesize[0];
  519. const int pixstep = showwaves->pixstep;
  520. int col = 0;
  521. int64_t *sum = showwaves->sum;
  522. if (column_max_samples == 0) {
  523. av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
  524. return AVERROR(EINVAL);
  525. }
  526. av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", column_max_samples);
  527. memset(sum, 0, nb_channels);
  528. for (node = showwaves->audio_frames; node; node = node->next) {
  529. int i;
  530. const AVFrame *frame = node->frame;
  531. const int16_t *p = (const int16_t *)frame->data[0];
  532. for (i = 0; i < frame->nb_samples; i++) {
  533. int64_t max_samples = col == outlink->w - 1 ? last_column_samples: column_max_samples;
  534. int ch;
  535. switch (showwaves->filter_mode) {
  536. case FILTER_AVERAGE:
  537. for (ch = 0; ch < nb_channels; ch++)
  538. sum[ch] += abs(p[ch + i*nb_channels]) << 1;
  539. break;
  540. case FILTER_PEAK:
  541. for (ch = 0; ch < nb_channels; ch++)
  542. sum[ch] = FFMAX(sum[ch], abs(p[ch + i*nb_channels]));
  543. break;
  544. }
  545. n++;
  546. if (n == max_samples) {
  547. for (ch = 0; ch < nb_channels; ch++) {
  548. int16_t sample = sum[ch] / (showwaves->filter_mode == FILTER_AVERAGE ? max_samples : 1);
  549. uint8_t *buf = out->data[0] + col * pixstep;
  550. int h;
  551. if (showwaves->split_channels)
  552. buf += ch*ch_height*linesize;
  553. av_assert0(col < outlink->w);
  554. h = showwaves->get_h(sample, ch_height);
  555. showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
  556. sum[ch] = 0;
  557. }
  558. col++;
  559. n = 0;
  560. }
  561. }
  562. }
  563. return push_frame(outlink);
  564. }
  565. static int request_frame(AVFilterLink *outlink)
  566. {
  567. ShowWavesContext *showwaves = outlink->src->priv;
  568. AVFilterLink *inlink = outlink->src->inputs[0];
  569. int ret;
  570. ret = ff_request_frame(inlink);
  571. if (ret == AVERROR_EOF && showwaves->outpicref) {
  572. if (showwaves->single_pic)
  573. push_single_pic(outlink);
  574. else
  575. push_frame(outlink);
  576. }
  577. return ret;
  578. }
  579. static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
  580. const AVFilterLink *inlink, AVFilterLink *outlink,
  581. const AVFrame *in)
  582. {
  583. if (!showwaves->outpicref) {
  584. int j;
  585. AVFrame *out = showwaves->outpicref =
  586. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  587. if (!out)
  588. return AVERROR(ENOMEM);
  589. out->width = outlink->w;
  590. out->height = outlink->h;
  591. out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
  592. av_make_q(1, inlink->sample_rate),
  593. outlink->time_base);
  594. for (j = 0; j < outlink->h; j++)
  595. memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
  596. }
  597. return 0;
  598. }
  599. static av_cold int init(AVFilterContext *ctx)
  600. {
  601. ShowWavesContext *showwaves = ctx->priv;
  602. if (!strcmp(ctx->filter->name, "showwavespic")) {
  603. showwaves->single_pic = 1;
  604. showwaves->mode = MODE_CENTERED_LINE;
  605. }
  606. return 0;
  607. }
  608. #if CONFIG_SHOWWAVES_FILTER
  609. static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  610. {
  611. AVFilterContext *ctx = inlink->dst;
  612. AVFilterLink *outlink = ctx->outputs[0];
  613. ShowWavesContext *showwaves = ctx->priv;
  614. const int nb_samples = insamples->nb_samples;
  615. AVFrame *outpicref = showwaves->outpicref;
  616. int16_t *p = (int16_t *)insamples->data[0];
  617. int nb_channels = inlink->channels;
  618. int i, j, ret = 0;
  619. const int pixstep = showwaves->pixstep;
  620. const int n = showwaves->n;
  621. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  622. /* draw data in the buffer */
  623. for (i = 0; i < nb_samples; i++) {
  624. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  625. if (ret < 0)
  626. goto end;
  627. outpicref = showwaves->outpicref;
  628. for (j = 0; j < nb_channels; j++) {
  629. uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
  630. const int linesize = outpicref->linesize[0];
  631. int h;
  632. if (showwaves->split_channels)
  633. buf += j*ch_height*linesize;
  634. h = showwaves->get_h(*p++, ch_height);
  635. showwaves->draw_sample(buf, ch_height, linesize,
  636. &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
  637. }
  638. showwaves->sample_count_mod++;
  639. if (showwaves->sample_count_mod == n) {
  640. showwaves->sample_count_mod = 0;
  641. showwaves->buf_idx++;
  642. }
  643. if (showwaves->buf_idx == showwaves->w ||
  644. (ff_outlink_get_status(inlink) && i == nb_samples - 1))
  645. if ((ret = push_frame(outlink)) < 0)
  646. break;
  647. outpicref = showwaves->outpicref;
  648. }
  649. end:
  650. av_frame_free(&insamples);
  651. return ret;
  652. }
  653. static int activate(AVFilterContext *ctx)
  654. {
  655. AVFilterLink *inlink = ctx->inputs[0];
  656. AVFilterLink *outlink = ctx->outputs[0];
  657. ShowWavesContext *showwaves = ctx->priv;
  658. AVFrame *in;
  659. const int nb_samples = showwaves->n * outlink->w;
  660. int ret;
  661. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  662. ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
  663. if (ret < 0)
  664. return ret;
  665. if (ret > 0)
  666. return showwaves_filter_frame(inlink, in);
  667. FF_FILTER_FORWARD_STATUS(inlink, outlink);
  668. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  669. return FFERROR_NOT_READY;
  670. }
  671. static const AVFilterPad showwaves_inputs[] = {
  672. {
  673. .name = "default",
  674. .type = AVMEDIA_TYPE_AUDIO,
  675. },
  676. { NULL }
  677. };
  678. static const AVFilterPad showwaves_outputs[] = {
  679. {
  680. .name = "default",
  681. .type = AVMEDIA_TYPE_VIDEO,
  682. .config_props = config_output,
  683. },
  684. { NULL }
  685. };
  686. AVFilter ff_avf_showwaves = {
  687. .name = "showwaves",
  688. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  689. .init = init,
  690. .uninit = uninit,
  691. .query_formats = query_formats,
  692. .priv_size = sizeof(ShowWavesContext),
  693. .inputs = showwaves_inputs,
  694. .activate = activate,
  695. .outputs = showwaves_outputs,
  696. .priv_class = &showwaves_class,
  697. };
  698. #endif // CONFIG_SHOWWAVES_FILTER
  699. #if CONFIG_SHOWWAVESPIC_FILTER
  700. #define OFFSET(x) offsetof(ShowWavesContext, x)
  701. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  702. static const AVOption showwavespic_options[] = {
  703. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  704. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  705. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  706. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  707. { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
  708. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
  709. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
  710. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
  711. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
  712. { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
  713. { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
  714. { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
  715. { "filter", "set filter mode", OFFSET(filter_mode), AV_OPT_TYPE_INT, {.i64 = FILTER_AVERAGE}, 0, FILTER_NB-1, FLAGS, .unit="filter" },
  716. { "average", "use average samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_AVERAGE}, .flags=FLAGS, .unit="filter"},
  717. { "peak", "use peak samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_PEAK}, .flags=FLAGS, .unit="filter"},
  718. { NULL }
  719. };
  720. AVFILTER_DEFINE_CLASS(showwavespic);
  721. static int showwavespic_config_input(AVFilterLink *inlink)
  722. {
  723. AVFilterContext *ctx = inlink->dst;
  724. ShowWavesContext *showwaves = ctx->priv;
  725. if (showwaves->single_pic) {
  726. showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
  727. if (!showwaves->sum)
  728. return AVERROR(ENOMEM);
  729. }
  730. return 0;
  731. }
  732. static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  733. {
  734. AVFilterContext *ctx = inlink->dst;
  735. AVFilterLink *outlink = ctx->outputs[0];
  736. ShowWavesContext *showwaves = ctx->priv;
  737. int16_t *p = (int16_t *)insamples->data[0];
  738. int ret = 0;
  739. if (showwaves->single_pic) {
  740. struct frame_node *f;
  741. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  742. if (ret < 0)
  743. goto end;
  744. /* queue the audio frame */
  745. f = av_malloc(sizeof(*f));
  746. if (!f) {
  747. ret = AVERROR(ENOMEM);
  748. goto end;
  749. }
  750. f->frame = insamples;
  751. f->next = NULL;
  752. if (!showwaves->last_frame) {
  753. showwaves->audio_frames =
  754. showwaves->last_frame = f;
  755. } else {
  756. showwaves->last_frame->next = f;
  757. showwaves->last_frame = f;
  758. }
  759. showwaves->total_samples += insamples->nb_samples;
  760. return 0;
  761. }
  762. end:
  763. av_frame_free(&insamples);
  764. return ret;
  765. }
  766. static const AVFilterPad showwavespic_inputs[] = {
  767. {
  768. .name = "default",
  769. .type = AVMEDIA_TYPE_AUDIO,
  770. .config_props = showwavespic_config_input,
  771. .filter_frame = showwavespic_filter_frame,
  772. },
  773. { NULL }
  774. };
  775. static const AVFilterPad showwavespic_outputs[] = {
  776. {
  777. .name = "default",
  778. .type = AVMEDIA_TYPE_VIDEO,
  779. .config_props = config_output,
  780. .request_frame = request_frame,
  781. },
  782. { NULL }
  783. };
  784. AVFilter ff_avf_showwavespic = {
  785. .name = "showwavespic",
  786. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
  787. .init = init,
  788. .uninit = uninit,
  789. .query_formats = query_formats,
  790. .priv_size = sizeof(ShowWavesContext),
  791. .inputs = showwavespic_inputs,
  792. .outputs = showwavespic_outputs,
  793. .priv_class = &showwavespic_class,
  794. };
  795. #endif // CONFIG_SHOWWAVESPIC_FILTER