You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

856 lines
29KB

  1. /*
  2. * Copyright (c) 2012 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * audio to video multimedia filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/parseutils.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "audio.h"
  32. #include "video.h"
  33. #include "internal.h"
  34. enum ShowWavesMode {
  35. MODE_POINT,
  36. MODE_LINE,
  37. MODE_P2P,
  38. MODE_CENTERED_LINE,
  39. MODE_NB,
  40. };
  41. enum ShowWavesScale {
  42. SCALE_LIN,
  43. SCALE_LOG,
  44. SCALE_SQRT,
  45. SCALE_CBRT,
  46. SCALE_NB,
  47. };
  48. enum ShowWavesDrawMode {
  49. DRAW_SCALE,
  50. DRAW_FULL,
  51. DRAW_NB,
  52. };
  53. struct frame_node {
  54. AVFrame *frame;
  55. struct frame_node *next;
  56. };
  57. typedef struct ShowWavesContext {
  58. const AVClass *class;
  59. int w, h;
  60. AVRational rate;
  61. char *colors;
  62. int buf_idx;
  63. int16_t *buf_idy; /* y coordinate of previous sample for each channel */
  64. AVFrame *outpicref;
  65. int n;
  66. int pixstep;
  67. int sample_count_mod;
  68. int mode; ///< ShowWavesMode
  69. int scale; ///< ShowWavesScale
  70. int draw_mode; ///< ShowWavesDrawMode
  71. int split_channels;
  72. uint8_t *fg;
  73. int (*get_h)(int16_t sample, int height);
  74. void (*draw_sample)(uint8_t *buf, int height, int linesize,
  75. int16_t *prev_y, const uint8_t color[4], int h);
  76. /* single picture */
  77. int single_pic;
  78. struct frame_node *audio_frames;
  79. struct frame_node *last_frame;
  80. int64_t total_samples;
  81. int64_t *sum; /* abs sum of the samples per channel */
  82. } ShowWavesContext;
  83. #define OFFSET(x) offsetof(ShowWavesContext, x)
  84. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  85. static const AVOption showwaves_options[] = {
  86. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  87. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  88. { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  89. { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
  90. { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
  91. { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
  92. { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
  93. { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  94. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
  95. { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
  96. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  97. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  98. { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
  99. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
  100. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
  101. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
  102. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
  103. { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
  104. { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
  105. { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
  106. { NULL }
  107. };
  108. AVFILTER_DEFINE_CLASS(showwaves);
  109. static av_cold void uninit(AVFilterContext *ctx)
  110. {
  111. ShowWavesContext *showwaves = ctx->priv;
  112. av_frame_free(&showwaves->outpicref);
  113. av_freep(&showwaves->buf_idy);
  114. av_freep(&showwaves->fg);
  115. if (showwaves->single_pic) {
  116. struct frame_node *node = showwaves->audio_frames;
  117. while (node) {
  118. struct frame_node *tmp = node;
  119. node = node->next;
  120. av_frame_free(&tmp->frame);
  121. av_freep(&tmp);
  122. }
  123. av_freep(&showwaves->sum);
  124. showwaves->last_frame = NULL;
  125. }
  126. }
  127. static int query_formats(AVFilterContext *ctx)
  128. {
  129. AVFilterFormats *formats = NULL;
  130. AVFilterChannelLayouts *layouts = NULL;
  131. AVFilterLink *inlink = ctx->inputs[0];
  132. AVFilterLink *outlink = ctx->outputs[0];
  133. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  134. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  135. int ret;
  136. /* set input audio formats */
  137. formats = ff_make_format_list(sample_fmts);
  138. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  139. return ret;
  140. layouts = ff_all_channel_layouts();
  141. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  142. return ret;
  143. formats = ff_all_samplerates();
  144. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  145. return ret;
  146. /* set output video format */
  147. formats = ff_make_format_list(pix_fmts);
  148. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  149. return ret;
  150. return 0;
  151. }
  152. static int get_lin_h(int16_t sample, int height)
  153. {
  154. return height/2 - av_rescale(sample, height/2, INT16_MAX);
  155. }
  156. static int get_lin_h2(int16_t sample, int height)
  157. {
  158. return av_rescale(FFABS(sample), height, INT16_MAX);
  159. }
  160. static int get_log_h(int16_t sample, int height)
  161. {
  162. return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
  163. }
  164. static int get_log_h2(int16_t sample, int height)
  165. {
  166. return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
  167. }
  168. static int get_sqrt_h(int16_t sample, int height)
  169. {
  170. return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
  171. }
  172. static int get_sqrt_h2(int16_t sample, int height)
  173. {
  174. return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
  175. }
  176. static int get_cbrt_h(int16_t sample, int height)
  177. {
  178. return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
  179. }
  180. static int get_cbrt_h2(int16_t sample, int height)
  181. {
  182. return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
  183. }
  184. static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
  185. int16_t *prev_y,
  186. const uint8_t color[4], int h)
  187. {
  188. if (h >= 0 && h < height) {
  189. buf[h * linesize + 0] += color[0];
  190. buf[h * linesize + 1] += color[1];
  191. buf[h * linesize + 2] += color[2];
  192. buf[h * linesize + 3] += color[3];
  193. }
  194. }
  195. static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
  196. int16_t *prev_y,
  197. const uint8_t color[4], int h)
  198. {
  199. if (h >= 0 && h < height) {
  200. buf[h * linesize + 0] = color[0];
  201. buf[h * linesize + 1] = color[1];
  202. buf[h * linesize + 2] = color[2];
  203. buf[h * linesize + 3] = color[3];
  204. }
  205. }
  206. static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
  207. int16_t *prev_y,
  208. const uint8_t color[4], int h)
  209. {
  210. int k;
  211. int start = height/2;
  212. int end = av_clip(h, 0, height-1);
  213. if (start > end)
  214. FFSWAP(int16_t, start, end);
  215. for (k = start; k < end; k++) {
  216. buf[k * linesize + 0] += color[0];
  217. buf[k * linesize + 1] += color[1];
  218. buf[k * linesize + 2] += color[2];
  219. buf[k * linesize + 3] += color[3];
  220. }
  221. }
  222. static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
  223. int16_t *prev_y,
  224. const uint8_t color[4], int h)
  225. {
  226. int k;
  227. int start = height/2;
  228. int end = av_clip(h, 0, height-1);
  229. if (start > end)
  230. FFSWAP(int16_t, start, end);
  231. for (k = start; k < end; k++) {
  232. buf[k * linesize + 0] = color[0];
  233. buf[k * linesize + 1] = color[1];
  234. buf[k * linesize + 2] = color[2];
  235. buf[k * linesize + 3] = color[3];
  236. }
  237. }
  238. static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
  239. int16_t *prev_y,
  240. const uint8_t color[4], int h)
  241. {
  242. int k;
  243. if (h >= 0 && h < height) {
  244. buf[h * linesize + 0] += color[0];
  245. buf[h * linesize + 1] += color[1];
  246. buf[h * linesize + 2] += color[2];
  247. buf[h * linesize + 3] += color[3];
  248. if (*prev_y && h != *prev_y) {
  249. int start = *prev_y;
  250. int end = av_clip(h, 0, height-1);
  251. if (start > end)
  252. FFSWAP(int16_t, start, end);
  253. for (k = start + 1; k < end; k++) {
  254. buf[k * linesize + 0] += color[0];
  255. buf[k * linesize + 1] += color[1];
  256. buf[k * linesize + 2] += color[2];
  257. buf[k * linesize + 3] += color[3];
  258. }
  259. }
  260. }
  261. *prev_y = h;
  262. }
  263. static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
  264. int16_t *prev_y,
  265. const uint8_t color[4], int h)
  266. {
  267. int k;
  268. if (h >= 0 && h < height) {
  269. buf[h * linesize + 0] = color[0];
  270. buf[h * linesize + 1] = color[1];
  271. buf[h * linesize + 2] = color[2];
  272. buf[h * linesize + 3] = color[3];
  273. if (*prev_y && h != *prev_y) {
  274. int start = *prev_y;
  275. int end = av_clip(h, 0, height-1);
  276. if (start > end)
  277. FFSWAP(int16_t, start, end);
  278. for (k = start + 1; k < end; k++) {
  279. buf[k * linesize + 0] = color[0];
  280. buf[k * linesize + 1] = color[1];
  281. buf[k * linesize + 2] = color[2];
  282. buf[k * linesize + 3] = color[3];
  283. }
  284. }
  285. }
  286. *prev_y = h;
  287. }
  288. static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
  289. int16_t *prev_y,
  290. const uint8_t color[4], int h)
  291. {
  292. int k;
  293. const int start = (height - h) / 2;
  294. const int end = start + h;
  295. for (k = start; k < end; k++) {
  296. buf[k * linesize + 0] += color[0];
  297. buf[k * linesize + 1] += color[1];
  298. buf[k * linesize + 2] += color[2];
  299. buf[k * linesize + 3] += color[3];
  300. }
  301. }
  302. static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
  303. int16_t *prev_y,
  304. const uint8_t color[4], int h)
  305. {
  306. int k;
  307. const int start = (height - h) / 2;
  308. const int end = start + h;
  309. for (k = start; k < end; k++) {
  310. buf[k * linesize + 0] = color[0];
  311. buf[k * linesize + 1] = color[1];
  312. buf[k * linesize + 2] = color[2];
  313. buf[k * linesize + 3] = color[3];
  314. }
  315. }
  316. static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
  317. int16_t *prev_y,
  318. const uint8_t color[4], int h)
  319. {
  320. if (h >= 0 && h < height)
  321. buf[h * linesize] += color[0];
  322. }
  323. static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
  324. int16_t *prev_y,
  325. const uint8_t color[4], int h)
  326. {
  327. int k;
  328. int start = height/2;
  329. int end = av_clip(h, 0, height-1);
  330. if (start > end)
  331. FFSWAP(int16_t, start, end);
  332. for (k = start; k < end; k++)
  333. buf[k * linesize] += color[0];
  334. }
  335. static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
  336. int16_t *prev_y,
  337. const uint8_t color[4], int h)
  338. {
  339. int k;
  340. if (h >= 0 && h < height) {
  341. buf[h * linesize] += color[0];
  342. if (*prev_y && h != *prev_y) {
  343. int start = *prev_y;
  344. int end = av_clip(h, 0, height-1);
  345. if (start > end)
  346. FFSWAP(int16_t, start, end);
  347. for (k = start + 1; k < end; k++)
  348. buf[k * linesize] += color[0];
  349. }
  350. }
  351. *prev_y = h;
  352. }
  353. static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
  354. int16_t *prev_y,
  355. const uint8_t color[4], int h)
  356. {
  357. int k;
  358. const int start = (height - h) / 2;
  359. const int end = start + h;
  360. for (k = start; k < end; k++)
  361. buf[k * linesize] += color[0];
  362. }
  363. static int config_output(AVFilterLink *outlink)
  364. {
  365. AVFilterContext *ctx = outlink->src;
  366. AVFilterLink *inlink = ctx->inputs[0];
  367. ShowWavesContext *showwaves = ctx->priv;
  368. int nb_channels = inlink->channels;
  369. char *colors, *saveptr = NULL;
  370. uint8_t x;
  371. int ch;
  372. if (showwaves->single_pic)
  373. showwaves->n = 1;
  374. if (!showwaves->n)
  375. showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
  376. showwaves->buf_idx = 0;
  377. if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  378. av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  379. return AVERROR(ENOMEM);
  380. }
  381. outlink->w = showwaves->w;
  382. outlink->h = showwaves->h;
  383. outlink->sample_aspect_ratio = (AVRational){1,1};
  384. outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  385. (AVRational){showwaves->w,1});
  386. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  387. showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  388. switch (outlink->format) {
  389. case AV_PIX_FMT_GRAY8:
  390. switch (showwaves->mode) {
  391. case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
  392. case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
  393. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
  394. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
  395. default:
  396. return AVERROR_BUG;
  397. }
  398. showwaves->pixstep = 1;
  399. break;
  400. case AV_PIX_FMT_RGBA:
  401. switch (showwaves->mode) {
  402. case MODE_POINT: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_point_rgba_scale : draw_sample_point_rgba_full; break;
  403. case MODE_LINE: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_line_rgba_scale : draw_sample_line_rgba_full; break;
  404. case MODE_P2P: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_p2p_rgba_scale : draw_sample_p2p_rgba_full; break;
  405. case MODE_CENTERED_LINE: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_cline_rgba_scale : draw_sample_cline_rgba_full; break;
  406. default:
  407. return AVERROR_BUG;
  408. }
  409. showwaves->pixstep = 4;
  410. break;
  411. }
  412. switch (showwaves->scale) {
  413. case SCALE_LIN:
  414. switch (showwaves->mode) {
  415. case MODE_POINT:
  416. case MODE_LINE:
  417. case MODE_P2P: showwaves->get_h = get_lin_h; break;
  418. case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
  419. default:
  420. return AVERROR_BUG;
  421. }
  422. break;
  423. case SCALE_LOG:
  424. switch (showwaves->mode) {
  425. case MODE_POINT:
  426. case MODE_LINE:
  427. case MODE_P2P: showwaves->get_h = get_log_h; break;
  428. case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
  429. default:
  430. return AVERROR_BUG;
  431. }
  432. break;
  433. case SCALE_SQRT:
  434. switch (showwaves->mode) {
  435. case MODE_POINT:
  436. case MODE_LINE:
  437. case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
  438. case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
  439. default:
  440. return AVERROR_BUG;
  441. }
  442. break;
  443. case SCALE_CBRT:
  444. switch (showwaves->mode) {
  445. case MODE_POINT:
  446. case MODE_LINE:
  447. case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
  448. case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
  449. default:
  450. return AVERROR_BUG;
  451. }
  452. break;
  453. }
  454. showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
  455. if (!showwaves->fg)
  456. return AVERROR(ENOMEM);
  457. colors = av_strdup(showwaves->colors);
  458. if (!colors)
  459. return AVERROR(ENOMEM);
  460. if (showwaves->draw_mode == DRAW_SCALE) {
  461. /* multiplication factor, pre-computed to avoid in-loop divisions */
  462. x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
  463. } else {
  464. x = 255;
  465. }
  466. if (outlink->format == AV_PIX_FMT_RGBA) {
  467. uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
  468. for (ch = 0; ch < nb_channels; ch++) {
  469. char *color;
  470. color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
  471. if (color)
  472. av_parse_color(fg, color, -1, ctx);
  473. showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
  474. showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
  475. showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
  476. showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
  477. }
  478. } else {
  479. for (ch = 0; ch < nb_channels; ch++)
  480. showwaves->fg[4 * ch + 0] = x;
  481. }
  482. av_free(colors);
  483. return 0;
  484. }
  485. inline static int push_frame(AVFilterLink *outlink)
  486. {
  487. AVFilterContext *ctx = outlink->src;
  488. AVFilterLink *inlink = ctx->inputs[0];
  489. ShowWavesContext *showwaves = outlink->src->priv;
  490. int nb_channels = inlink->channels;
  491. int ret, i;
  492. ret = ff_filter_frame(outlink, showwaves->outpicref);
  493. showwaves->outpicref = NULL;
  494. showwaves->buf_idx = 0;
  495. for (i = 0; i < nb_channels; i++)
  496. showwaves->buf_idy[i] = 0;
  497. return ret;
  498. }
  499. static int push_single_pic(AVFilterLink *outlink)
  500. {
  501. AVFilterContext *ctx = outlink->src;
  502. AVFilterLink *inlink = ctx->inputs[0];
  503. ShowWavesContext *showwaves = ctx->priv;
  504. int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
  505. AVFrame *out = showwaves->outpicref;
  506. struct frame_node *node;
  507. const int nb_channels = inlink->channels;
  508. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  509. const int linesize = out->linesize[0];
  510. const int pixstep = showwaves->pixstep;
  511. int col = 0;
  512. int64_t *sum = showwaves->sum;
  513. if (max_samples == 0) {
  514. av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
  515. return AVERROR(EINVAL);
  516. }
  517. av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
  518. memset(sum, 0, nb_channels);
  519. for (node = showwaves->audio_frames; node; node = node->next) {
  520. int i;
  521. const AVFrame *frame = node->frame;
  522. const int16_t *p = (const int16_t *)frame->data[0];
  523. for (i = 0; i < frame->nb_samples; i++) {
  524. int ch;
  525. for (ch = 0; ch < nb_channels; ch++)
  526. sum[ch] += abs(p[ch + i*nb_channels]) << 1;
  527. if (n++ == max_samples) {
  528. for (ch = 0; ch < nb_channels; ch++) {
  529. int16_t sample = sum[ch] / max_samples;
  530. uint8_t *buf = out->data[0] + col * pixstep;
  531. int h;
  532. if (showwaves->split_channels)
  533. buf += ch*ch_height*linesize;
  534. av_assert0(col < outlink->w);
  535. h = showwaves->get_h(sample, ch_height);
  536. showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
  537. sum[ch] = 0;
  538. }
  539. col++;
  540. n = 0;
  541. }
  542. }
  543. }
  544. return push_frame(outlink);
  545. }
  546. static int request_frame(AVFilterLink *outlink)
  547. {
  548. ShowWavesContext *showwaves = outlink->src->priv;
  549. AVFilterLink *inlink = outlink->src->inputs[0];
  550. int ret;
  551. ret = ff_request_frame(inlink);
  552. if (ret == AVERROR_EOF && showwaves->outpicref) {
  553. if (showwaves->single_pic)
  554. push_single_pic(outlink);
  555. else
  556. push_frame(outlink);
  557. }
  558. return ret;
  559. }
  560. static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
  561. const AVFilterLink *inlink, AVFilterLink *outlink,
  562. const AVFrame *in)
  563. {
  564. if (!showwaves->outpicref) {
  565. int j;
  566. AVFrame *out = showwaves->outpicref =
  567. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  568. if (!out)
  569. return AVERROR(ENOMEM);
  570. out->width = outlink->w;
  571. out->height = outlink->h;
  572. out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
  573. av_make_q(1, inlink->sample_rate),
  574. outlink->time_base);
  575. for (j = 0; j < outlink->h; j++)
  576. memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
  577. }
  578. return 0;
  579. }
  580. static av_cold int init(AVFilterContext *ctx)
  581. {
  582. ShowWavesContext *showwaves = ctx->priv;
  583. if (!strcmp(ctx->filter->name, "showwavespic")) {
  584. showwaves->single_pic = 1;
  585. showwaves->mode = MODE_CENTERED_LINE;
  586. }
  587. return 0;
  588. }
  589. #if CONFIG_SHOWWAVES_FILTER
  590. static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  591. {
  592. AVFilterContext *ctx = inlink->dst;
  593. AVFilterLink *outlink = ctx->outputs[0];
  594. ShowWavesContext *showwaves = ctx->priv;
  595. const int nb_samples = insamples->nb_samples;
  596. AVFrame *outpicref = showwaves->outpicref;
  597. int16_t *p = (int16_t *)insamples->data[0];
  598. int nb_channels = inlink->channels;
  599. int i, j, ret = 0;
  600. const int pixstep = showwaves->pixstep;
  601. const int n = showwaves->n;
  602. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  603. /* draw data in the buffer */
  604. for (i = 0; i < nb_samples; i++) {
  605. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  606. if (ret < 0)
  607. goto end;
  608. outpicref = showwaves->outpicref;
  609. for (j = 0; j < nb_channels; j++) {
  610. uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
  611. const int linesize = outpicref->linesize[0];
  612. int h;
  613. if (showwaves->split_channels)
  614. buf += j*ch_height*linesize;
  615. h = showwaves->get_h(*p++, ch_height);
  616. showwaves->draw_sample(buf, ch_height, linesize,
  617. &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
  618. }
  619. showwaves->sample_count_mod++;
  620. if (showwaves->sample_count_mod == n) {
  621. showwaves->sample_count_mod = 0;
  622. showwaves->buf_idx++;
  623. }
  624. if (showwaves->buf_idx == showwaves->w)
  625. if ((ret = push_frame(outlink)) < 0)
  626. break;
  627. outpicref = showwaves->outpicref;
  628. }
  629. end:
  630. av_frame_free(&insamples);
  631. return ret;
  632. }
  633. static const AVFilterPad showwaves_inputs[] = {
  634. {
  635. .name = "default",
  636. .type = AVMEDIA_TYPE_AUDIO,
  637. .filter_frame = showwaves_filter_frame,
  638. },
  639. { NULL }
  640. };
  641. static const AVFilterPad showwaves_outputs[] = {
  642. {
  643. .name = "default",
  644. .type = AVMEDIA_TYPE_VIDEO,
  645. .config_props = config_output,
  646. .request_frame = request_frame,
  647. },
  648. { NULL }
  649. };
  650. AVFilter ff_avf_showwaves = {
  651. .name = "showwaves",
  652. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  653. .init = init,
  654. .uninit = uninit,
  655. .query_formats = query_formats,
  656. .priv_size = sizeof(ShowWavesContext),
  657. .inputs = showwaves_inputs,
  658. .outputs = showwaves_outputs,
  659. .priv_class = &showwaves_class,
  660. };
  661. #endif // CONFIG_SHOWWAVES_FILTER
  662. #if CONFIG_SHOWWAVESPIC_FILTER
  663. #define OFFSET(x) offsetof(ShowWavesContext, x)
  664. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  665. static const AVOption showwavespic_options[] = {
  666. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  667. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  668. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  669. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  670. { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
  671. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
  672. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
  673. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
  674. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
  675. { NULL }
  676. };
  677. AVFILTER_DEFINE_CLASS(showwavespic);
  678. static int showwavespic_config_input(AVFilterLink *inlink)
  679. {
  680. AVFilterContext *ctx = inlink->dst;
  681. ShowWavesContext *showwaves = ctx->priv;
  682. if (showwaves->single_pic) {
  683. showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
  684. if (!showwaves->sum)
  685. return AVERROR(ENOMEM);
  686. }
  687. return 0;
  688. }
  689. static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  690. {
  691. AVFilterContext *ctx = inlink->dst;
  692. AVFilterLink *outlink = ctx->outputs[0];
  693. ShowWavesContext *showwaves = ctx->priv;
  694. int16_t *p = (int16_t *)insamples->data[0];
  695. int ret = 0;
  696. if (showwaves->single_pic) {
  697. struct frame_node *f;
  698. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  699. if (ret < 0)
  700. goto end;
  701. /* queue the audio frame */
  702. f = av_malloc(sizeof(*f));
  703. if (!f) {
  704. ret = AVERROR(ENOMEM);
  705. goto end;
  706. }
  707. f->frame = insamples;
  708. f->next = NULL;
  709. if (!showwaves->last_frame) {
  710. showwaves->audio_frames =
  711. showwaves->last_frame = f;
  712. } else {
  713. showwaves->last_frame->next = f;
  714. showwaves->last_frame = f;
  715. }
  716. showwaves->total_samples += insamples->nb_samples;
  717. return 0;
  718. }
  719. end:
  720. av_frame_free(&insamples);
  721. return ret;
  722. }
  723. static const AVFilterPad showwavespic_inputs[] = {
  724. {
  725. .name = "default",
  726. .type = AVMEDIA_TYPE_AUDIO,
  727. .config_props = showwavespic_config_input,
  728. .filter_frame = showwavespic_filter_frame,
  729. },
  730. { NULL }
  731. };
  732. static const AVFilterPad showwavespic_outputs[] = {
  733. {
  734. .name = "default",
  735. .type = AVMEDIA_TYPE_VIDEO,
  736. .config_props = config_output,
  737. .request_frame = request_frame,
  738. },
  739. { NULL }
  740. };
  741. AVFilter ff_avf_showwavespic = {
  742. .name = "showwavespic",
  743. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
  744. .init = init,
  745. .uninit = uninit,
  746. .query_formats = query_formats,
  747. .priv_size = sizeof(ShowWavesContext),
  748. .inputs = showwavespic_inputs,
  749. .outputs = showwavespic_outputs,
  750. .priv_class = &showwavespic_class,
  751. };
  752. #endif // CONFIG_SHOWWAVESPIC_FILTER