You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

671 lines
22KB

  1. /*
  2. * Copyright (c) 2012 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * audio to video multimedia filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/parseutils.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "audio.h"
  32. #include "video.h"
  33. #include "internal.h"
  34. enum ShowWavesMode {
  35. MODE_POINT,
  36. MODE_LINE,
  37. MODE_P2P,
  38. MODE_CENTERED_LINE,
  39. MODE_NB,
  40. };
  41. struct frame_node {
  42. AVFrame *frame;
  43. struct frame_node *next;
  44. };
  45. typedef struct {
  46. const AVClass *class;
  47. int w, h;
  48. AVRational rate;
  49. char *colors;
  50. int buf_idx;
  51. int16_t *buf_idy; /* y coordinate of previous sample for each channel */
  52. AVFrame *outpicref;
  53. int n;
  54. int pixstep;
  55. int sample_count_mod;
  56. int mode; ///< ShowWavesMode
  57. int split_channels;
  58. uint8_t *fg;
  59. void (*draw_sample)(uint8_t *buf, int height, int linesize,
  60. int16_t sample, int16_t *prev_y, const uint8_t color[4]);
  61. /* single picture */
  62. int single_pic;
  63. struct frame_node *audio_frames;
  64. struct frame_node *last_frame;
  65. int64_t total_samples;
  66. int64_t *sum; /* abs sum of the samples per channel */
  67. } ShowWavesContext;
  68. #define OFFSET(x) offsetof(ShowWavesContext, x)
  69. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  70. static const AVOption showwaves_options[] = {
  71. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  72. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  73. { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  74. { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
  75. { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
  76. { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
  77. { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
  78. { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  79. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  80. { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  81. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  82. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  83. { NULL }
  84. };
  85. AVFILTER_DEFINE_CLASS(showwaves);
  86. static av_cold void uninit(AVFilterContext *ctx)
  87. {
  88. ShowWavesContext *showwaves = ctx->priv;
  89. av_frame_free(&showwaves->outpicref);
  90. av_freep(&showwaves->buf_idy);
  91. av_freep(&showwaves->fg);
  92. if (showwaves->single_pic) {
  93. struct frame_node *node = showwaves->audio_frames;
  94. while (node) {
  95. struct frame_node *tmp = node;
  96. node = node->next;
  97. av_frame_free(&tmp->frame);
  98. av_freep(&tmp);
  99. }
  100. av_freep(&showwaves->sum);
  101. showwaves->last_frame = NULL;
  102. }
  103. }
  104. static int query_formats(AVFilterContext *ctx)
  105. {
  106. AVFilterFormats *formats = NULL;
  107. AVFilterChannelLayouts *layouts = NULL;
  108. AVFilterLink *inlink = ctx->inputs[0];
  109. AVFilterLink *outlink = ctx->outputs[0];
  110. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  111. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  112. int ret;
  113. /* set input audio formats */
  114. formats = ff_make_format_list(sample_fmts);
  115. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  116. return ret;
  117. layouts = ff_all_channel_layouts();
  118. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  119. return ret;
  120. formats = ff_all_samplerates();
  121. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  122. return ret;
  123. /* set output video format */
  124. formats = ff_make_format_list(pix_fmts);
  125. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  126. return ret;
  127. return 0;
  128. }
  129. static void draw_sample_point_rgba(uint8_t *buf, int height, int linesize,
  130. int16_t sample, int16_t *prev_y,
  131. const uint8_t color[4])
  132. {
  133. const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  134. if (h >= 0 && h < height) {
  135. buf[h * linesize + 0] += color[0];
  136. buf[h * linesize + 1] += color[1];
  137. buf[h * linesize + 2] += color[2];
  138. buf[h * linesize + 3] += color[3];
  139. }
  140. }
  141. static void draw_sample_line_rgba(uint8_t *buf, int height, int linesize,
  142. int16_t sample, int16_t *prev_y,
  143. const uint8_t color[4])
  144. {
  145. int k;
  146. const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  147. int start = height/2;
  148. int end = av_clip(h, 0, height-1);
  149. if (start > end)
  150. FFSWAP(int16_t, start, end);
  151. for (k = start; k < end; k++) {
  152. buf[k * linesize + 0] += color[0];
  153. buf[k * linesize + 1] += color[1];
  154. buf[k * linesize + 2] += color[2];
  155. buf[k * linesize + 3] += color[3];
  156. }
  157. }
  158. static void draw_sample_p2p_rgba(uint8_t *buf, int height, int linesize,
  159. int16_t sample, int16_t *prev_y,
  160. const uint8_t color[4])
  161. {
  162. int k;
  163. const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  164. if (h >= 0 && h < height) {
  165. buf[h * linesize + 0] += color[0];
  166. buf[h * linesize + 1] += color[1];
  167. buf[h * linesize + 2] += color[2];
  168. buf[h * linesize + 3] += color[3];
  169. if (*prev_y && h != *prev_y) {
  170. int start = *prev_y;
  171. int end = av_clip(h, 0, height-1);
  172. if (start > end)
  173. FFSWAP(int16_t, start, end);
  174. for (k = start + 1; k < end; k++) {
  175. buf[k * linesize + 0] += color[0];
  176. buf[k * linesize + 1] += color[1];
  177. buf[k * linesize + 2] += color[2];
  178. buf[k * linesize + 3] += color[3];
  179. }
  180. }
  181. }
  182. *prev_y = h;
  183. }
  184. static void draw_sample_cline_rgba(uint8_t *buf, int height, int linesize,
  185. int16_t sample, int16_t *prev_y,
  186. const uint8_t color[4])
  187. {
  188. int k;
  189. const int h = av_rescale(abs(sample), height, INT16_MAX);
  190. const int start = (height - h) / 2;
  191. const int end = start + h;
  192. for (k = start; k < end; k++) {
  193. buf[k * linesize + 0] += color[0];
  194. buf[k * linesize + 1] += color[1];
  195. buf[k * linesize + 2] += color[2];
  196. buf[k * linesize + 3] += color[3];
  197. }
  198. }
  199. static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
  200. int16_t sample, int16_t *prev_y,
  201. const uint8_t color[4])
  202. {
  203. const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  204. if (h >= 0 && h < height)
  205. buf[h * linesize] += color[0];
  206. }
  207. static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
  208. int16_t sample, int16_t *prev_y,
  209. const uint8_t color[4])
  210. {
  211. int k;
  212. const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  213. int start = height/2;
  214. int end = av_clip(h, 0, height-1);
  215. if (start > end)
  216. FFSWAP(int16_t, start, end);
  217. for (k = start; k < end; k++)
  218. buf[k * linesize] += color[0];
  219. }
  220. static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
  221. int16_t sample, int16_t *prev_y,
  222. const uint8_t color[4])
  223. {
  224. int k;
  225. const int h = height/2 - av_rescale(sample, height/2, INT16_MAX);
  226. if (h >= 0 && h < height) {
  227. buf[h * linesize] += color[0];
  228. if (*prev_y && h != *prev_y) {
  229. int start = *prev_y;
  230. int end = av_clip(h, 0, height-1);
  231. if (start > end)
  232. FFSWAP(int16_t, start, end);
  233. for (k = start + 1; k < end; k++)
  234. buf[k * linesize] += color[0];
  235. }
  236. }
  237. *prev_y = h;
  238. }
  239. static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
  240. int16_t sample, int16_t *prev_y,
  241. const uint8_t color[4])
  242. {
  243. int k;
  244. const int h = av_rescale(abs(sample), height, INT16_MAX);
  245. const int start = (height - h) / 2;
  246. const int end = start + h;
  247. for (k = start; k < end; k++)
  248. buf[k * linesize] += color[0];
  249. }
  250. static int config_output(AVFilterLink *outlink)
  251. {
  252. AVFilterContext *ctx = outlink->src;
  253. AVFilterLink *inlink = ctx->inputs[0];
  254. ShowWavesContext *showwaves = ctx->priv;
  255. int nb_channels = inlink->channels;
  256. char *colors, *saveptr = NULL;
  257. uint8_t x;
  258. int ch;
  259. if (showwaves->single_pic)
  260. showwaves->n = 1;
  261. if (!showwaves->n)
  262. showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
  263. showwaves->buf_idx = 0;
  264. if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  265. av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  266. return AVERROR(ENOMEM);
  267. }
  268. outlink->w = showwaves->w;
  269. outlink->h = showwaves->h;
  270. outlink->sample_aspect_ratio = (AVRational){1,1};
  271. outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  272. (AVRational){showwaves->w,1});
  273. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  274. showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  275. switch (outlink->format) {
  276. case AV_PIX_FMT_GRAY8:
  277. switch (showwaves->mode) {
  278. case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
  279. case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
  280. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
  281. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
  282. default:
  283. return AVERROR_BUG;
  284. }
  285. showwaves->pixstep = 1;
  286. break;
  287. case AV_PIX_FMT_RGBA:
  288. switch (showwaves->mode) {
  289. case MODE_POINT: showwaves->draw_sample = draw_sample_point_rgba; break;
  290. case MODE_LINE: showwaves->draw_sample = draw_sample_line_rgba; break;
  291. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_rgba; break;
  292. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_rgba; break;
  293. default:
  294. return AVERROR_BUG;
  295. }
  296. showwaves->pixstep = 4;
  297. break;
  298. }
  299. showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
  300. if (!showwaves->fg)
  301. return AVERROR(ENOMEM);
  302. colors = av_strdup(showwaves->colors);
  303. if (!colors)
  304. return AVERROR(ENOMEM);
  305. /* multiplication factor, pre-computed to avoid in-loop divisions */
  306. x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
  307. if (outlink->format == AV_PIX_FMT_RGBA) {
  308. uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
  309. for (ch = 0; ch < nb_channels; ch++) {
  310. char *color;
  311. color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
  312. if (color)
  313. av_parse_color(fg, color, -1, ctx);
  314. showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
  315. showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
  316. showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
  317. showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
  318. }
  319. } else {
  320. for (ch = 0; ch < nb_channels; ch++)
  321. showwaves->fg[4 * ch + 0] = x;
  322. }
  323. av_free(colors);
  324. return 0;
  325. }
  326. inline static int push_frame(AVFilterLink *outlink)
  327. {
  328. AVFilterContext *ctx = outlink->src;
  329. AVFilterLink *inlink = ctx->inputs[0];
  330. ShowWavesContext *showwaves = outlink->src->priv;
  331. int nb_channels = inlink->channels;
  332. int ret, i;
  333. ret = ff_filter_frame(outlink, showwaves->outpicref);
  334. showwaves->outpicref = NULL;
  335. showwaves->buf_idx = 0;
  336. for (i = 0; i < nb_channels; i++)
  337. showwaves->buf_idy[i] = 0;
  338. return ret;
  339. }
  340. static int push_single_pic(AVFilterLink *outlink)
  341. {
  342. AVFilterContext *ctx = outlink->src;
  343. AVFilterLink *inlink = ctx->inputs[0];
  344. ShowWavesContext *showwaves = ctx->priv;
  345. int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
  346. AVFrame *out = showwaves->outpicref;
  347. struct frame_node *node;
  348. const int nb_channels = inlink->channels;
  349. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  350. const int linesize = out->linesize[0];
  351. const int pixstep = showwaves->pixstep;
  352. int col = 0;
  353. int64_t *sum = showwaves->sum;
  354. if (max_samples == 0) {
  355. av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
  356. return AVERROR(EINVAL);
  357. }
  358. av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
  359. memset(sum, 0, nb_channels);
  360. for (node = showwaves->audio_frames; node; node = node->next) {
  361. int i;
  362. const AVFrame *frame = node->frame;
  363. const int16_t *p = (const int16_t *)frame->data[0];
  364. for (i = 0; i < frame->nb_samples; i++) {
  365. int ch;
  366. for (ch = 0; ch < nb_channels; ch++)
  367. sum[ch] += abs(p[ch + i*nb_channels]) << 1;
  368. if (n++ == max_samples) {
  369. for (ch = 0; ch < nb_channels; ch++) {
  370. int16_t sample = sum[ch] / max_samples;
  371. uint8_t *buf = out->data[0] + col * pixstep;
  372. if (showwaves->split_channels)
  373. buf += ch*ch_height*linesize;
  374. av_assert0(col < outlink->w);
  375. showwaves->draw_sample(buf, ch_height, linesize, sample, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4]);
  376. sum[ch] = 0;
  377. }
  378. col++;
  379. n = 0;
  380. }
  381. }
  382. }
  383. return push_frame(outlink);
  384. }
  385. static int request_frame(AVFilterLink *outlink)
  386. {
  387. ShowWavesContext *showwaves = outlink->src->priv;
  388. AVFilterLink *inlink = outlink->src->inputs[0];
  389. int ret;
  390. ret = ff_request_frame(inlink);
  391. if (ret == AVERROR_EOF && showwaves->outpicref) {
  392. if (showwaves->single_pic)
  393. push_single_pic(outlink);
  394. else
  395. push_frame(outlink);
  396. }
  397. return ret;
  398. }
  399. static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
  400. const AVFilterLink *inlink, AVFilterLink *outlink,
  401. const AVFrame *in)
  402. {
  403. if (!showwaves->outpicref) {
  404. int j;
  405. AVFrame *out = showwaves->outpicref =
  406. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  407. if (!out)
  408. return AVERROR(ENOMEM);
  409. out->width = outlink->w;
  410. out->height = outlink->h;
  411. out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
  412. av_make_q(1, inlink->sample_rate),
  413. outlink->time_base);
  414. for (j = 0; j < outlink->h; j++)
  415. memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
  416. }
  417. return 0;
  418. }
  419. static av_cold int init(AVFilterContext *ctx)
  420. {
  421. ShowWavesContext *showwaves = ctx->priv;
  422. if (!strcmp(ctx->filter->name, "showwavespic")) {
  423. showwaves->single_pic = 1;
  424. showwaves->mode = MODE_CENTERED_LINE;
  425. }
  426. return 0;
  427. }
  428. #if CONFIG_SHOWWAVES_FILTER
  429. static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  430. {
  431. AVFilterContext *ctx = inlink->dst;
  432. AVFilterLink *outlink = ctx->outputs[0];
  433. ShowWavesContext *showwaves = ctx->priv;
  434. const int nb_samples = insamples->nb_samples;
  435. AVFrame *outpicref = showwaves->outpicref;
  436. int16_t *p = (int16_t *)insamples->data[0];
  437. int nb_channels = inlink->channels;
  438. int i, j, ret = 0;
  439. const int pixstep = showwaves->pixstep;
  440. const int n = showwaves->n;
  441. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  442. /* draw data in the buffer */
  443. for (i = 0; i < nb_samples; i++) {
  444. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  445. if (ret < 0)
  446. goto end;
  447. outpicref = showwaves->outpicref;
  448. for (j = 0; j < nb_channels; j++) {
  449. uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
  450. const int linesize = outpicref->linesize[0];
  451. if (showwaves->split_channels)
  452. buf += j*ch_height*linesize;
  453. showwaves->draw_sample(buf, ch_height, linesize, *p++,
  454. &showwaves->buf_idy[j], &showwaves->fg[j * 4]);
  455. }
  456. showwaves->sample_count_mod++;
  457. if (showwaves->sample_count_mod == n) {
  458. showwaves->sample_count_mod = 0;
  459. showwaves->buf_idx++;
  460. }
  461. if (showwaves->buf_idx == showwaves->w)
  462. if ((ret = push_frame(outlink)) < 0)
  463. break;
  464. outpicref = showwaves->outpicref;
  465. }
  466. end:
  467. av_frame_free(&insamples);
  468. return ret;
  469. }
  470. static const AVFilterPad showwaves_inputs[] = {
  471. {
  472. .name = "default",
  473. .type = AVMEDIA_TYPE_AUDIO,
  474. .filter_frame = showwaves_filter_frame,
  475. },
  476. { NULL }
  477. };
  478. static const AVFilterPad showwaves_outputs[] = {
  479. {
  480. .name = "default",
  481. .type = AVMEDIA_TYPE_VIDEO,
  482. .config_props = config_output,
  483. .request_frame = request_frame,
  484. },
  485. { NULL }
  486. };
  487. AVFilter ff_avf_showwaves = {
  488. .name = "showwaves",
  489. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  490. .init = init,
  491. .uninit = uninit,
  492. .query_formats = query_formats,
  493. .priv_size = sizeof(ShowWavesContext),
  494. .inputs = showwaves_inputs,
  495. .outputs = showwaves_outputs,
  496. .priv_class = &showwaves_class,
  497. };
  498. #endif // CONFIG_SHOWWAVES_FILTER
  499. #if CONFIG_SHOWWAVESPIC_FILTER
  500. #define OFFSET(x) offsetof(ShowWavesContext, x)
  501. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  502. static const AVOption showwavespic_options[] = {
  503. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  504. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  505. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  506. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  507. { NULL }
  508. };
  509. AVFILTER_DEFINE_CLASS(showwavespic);
  510. static int showwavespic_config_input(AVFilterLink *inlink)
  511. {
  512. AVFilterContext *ctx = inlink->dst;
  513. ShowWavesContext *showwaves = ctx->priv;
  514. if (showwaves->single_pic) {
  515. showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
  516. if (!showwaves->sum)
  517. return AVERROR(ENOMEM);
  518. }
  519. return 0;
  520. }
  521. static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  522. {
  523. AVFilterContext *ctx = inlink->dst;
  524. AVFilterLink *outlink = ctx->outputs[0];
  525. ShowWavesContext *showwaves = ctx->priv;
  526. int16_t *p = (int16_t *)insamples->data[0];
  527. int ret = 0;
  528. if (showwaves->single_pic) {
  529. struct frame_node *f;
  530. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  531. if (ret < 0)
  532. goto end;
  533. /* queue the audio frame */
  534. f = av_malloc(sizeof(*f));
  535. if (!f) {
  536. ret = AVERROR(ENOMEM);
  537. goto end;
  538. }
  539. f->frame = insamples;
  540. f->next = NULL;
  541. if (!showwaves->last_frame) {
  542. showwaves->audio_frames =
  543. showwaves->last_frame = f;
  544. } else {
  545. showwaves->last_frame->next = f;
  546. showwaves->last_frame = f;
  547. }
  548. showwaves->total_samples += insamples->nb_samples;
  549. return 0;
  550. }
  551. end:
  552. av_frame_free(&insamples);
  553. return ret;
  554. }
  555. static const AVFilterPad showwavespic_inputs[] = {
  556. {
  557. .name = "default",
  558. .type = AVMEDIA_TYPE_AUDIO,
  559. .config_props = showwavespic_config_input,
  560. .filter_frame = showwavespic_filter_frame,
  561. },
  562. { NULL }
  563. };
  564. static const AVFilterPad showwavespic_outputs[] = {
  565. {
  566. .name = "default",
  567. .type = AVMEDIA_TYPE_VIDEO,
  568. .config_props = config_output,
  569. .request_frame = request_frame,
  570. },
  571. { NULL }
  572. };
  573. AVFilter ff_avf_showwavespic = {
  574. .name = "showwavespic",
  575. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
  576. .init = init,
  577. .uninit = uninit,
  578. .query_formats = query_formats,
  579. .priv_size = sizeof(ShowWavesContext),
  580. .inputs = showwavespic_inputs,
  581. .outputs = showwavespic_outputs,
  582. .priv_class = &showwavespic_class,
  583. };
  584. #endif // CONFIG_SHOWWAVESPIC_FILTER