You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

773 lines
25KB

  1. /*
  2. * Copyright (c) 2012 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * audio to video multimedia filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/parseutils.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "audio.h"
  32. #include "video.h"
  33. #include "internal.h"
  34. enum ShowWavesMode {
  35. MODE_POINT,
  36. MODE_LINE,
  37. MODE_P2P,
  38. MODE_CENTERED_LINE,
  39. MODE_NB,
  40. };
  41. enum ShowWavesScale {
  42. SCALE_LIN,
  43. SCALE_LOG,
  44. SCALE_SQRT,
  45. SCALE_CBRT,
  46. SCALE_NB,
  47. };
  48. struct frame_node {
  49. AVFrame *frame;
  50. struct frame_node *next;
  51. };
  52. typedef struct ShowWavesContext {
  53. const AVClass *class;
  54. int w, h;
  55. AVRational rate;
  56. char *colors;
  57. int buf_idx;
  58. int16_t *buf_idy; /* y coordinate of previous sample for each channel */
  59. AVFrame *outpicref;
  60. int n;
  61. int pixstep;
  62. int sample_count_mod;
  63. int mode; ///< ShowWavesMode
  64. int scale; ///< ShowWavesScale
  65. int split_channels;
  66. uint8_t *fg;
  67. int (*get_h)(int16_t sample, int height);
  68. void (*draw_sample)(uint8_t *buf, int height, int linesize,
  69. int16_t *prev_y, const uint8_t color[4], int h);
  70. /* single picture */
  71. int single_pic;
  72. struct frame_node *audio_frames;
  73. struct frame_node *last_frame;
  74. int64_t total_samples;
  75. int64_t *sum; /* abs sum of the samples per channel */
  76. } ShowWavesContext;
  77. #define OFFSET(x) offsetof(ShowWavesContext, x)
  78. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  79. static const AVOption showwaves_options[] = {
  80. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  81. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  82. { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  83. { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
  84. { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
  85. { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
  86. { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
  87. { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  88. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
  89. { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
  90. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  91. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  92. { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
  93. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
  94. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
  95. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
  96. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
  97. { NULL }
  98. };
  99. AVFILTER_DEFINE_CLASS(showwaves);
  100. static av_cold void uninit(AVFilterContext *ctx)
  101. {
  102. ShowWavesContext *showwaves = ctx->priv;
  103. av_frame_free(&showwaves->outpicref);
  104. av_freep(&showwaves->buf_idy);
  105. av_freep(&showwaves->fg);
  106. if (showwaves->single_pic) {
  107. struct frame_node *node = showwaves->audio_frames;
  108. while (node) {
  109. struct frame_node *tmp = node;
  110. node = node->next;
  111. av_frame_free(&tmp->frame);
  112. av_freep(&tmp);
  113. }
  114. av_freep(&showwaves->sum);
  115. showwaves->last_frame = NULL;
  116. }
  117. }
  118. static int query_formats(AVFilterContext *ctx)
  119. {
  120. AVFilterFormats *formats = NULL;
  121. AVFilterChannelLayouts *layouts = NULL;
  122. AVFilterLink *inlink = ctx->inputs[0];
  123. AVFilterLink *outlink = ctx->outputs[0];
  124. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  125. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  126. int ret;
  127. /* set input audio formats */
  128. formats = ff_make_format_list(sample_fmts);
  129. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  130. return ret;
  131. layouts = ff_all_channel_layouts();
  132. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  133. return ret;
  134. formats = ff_all_samplerates();
  135. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  136. return ret;
  137. /* set output video format */
  138. formats = ff_make_format_list(pix_fmts);
  139. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  140. return ret;
  141. return 0;
  142. }
  143. static int get_lin_h(int16_t sample, int height)
  144. {
  145. return height/2 - av_rescale(sample, height/2, INT16_MAX);
  146. }
  147. static int get_lin_h2(int16_t sample, int height)
  148. {
  149. return av_rescale(FFABS(sample), height, INT16_MAX);
  150. }
  151. static int get_log_h(int16_t sample, int height)
  152. {
  153. return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
  154. }
  155. static int get_log_h2(int16_t sample, int height)
  156. {
  157. return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
  158. }
  159. static int get_sqrt_h(int16_t sample, int height)
  160. {
  161. return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
  162. }
  163. static int get_sqrt_h2(int16_t sample, int height)
  164. {
  165. return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
  166. }
  167. static int get_cbrt_h(int16_t sample, int height)
  168. {
  169. return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
  170. }
  171. static int get_cbrt_h2(int16_t sample, int height)
  172. {
  173. return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
  174. }
  175. static void draw_sample_point_rgba(uint8_t *buf, int height, int linesize,
  176. int16_t *prev_y,
  177. const uint8_t color[4], int h)
  178. {
  179. if (h >= 0 && h < height) {
  180. buf[h * linesize + 0] += color[0];
  181. buf[h * linesize + 1] += color[1];
  182. buf[h * linesize + 2] += color[2];
  183. buf[h * linesize + 3] += color[3];
  184. }
  185. }
  186. static void draw_sample_line_rgba(uint8_t *buf, int height, int linesize,
  187. int16_t *prev_y,
  188. const uint8_t color[4], int h)
  189. {
  190. int k;
  191. int start = height/2;
  192. int end = av_clip(h, 0, height-1);
  193. if (start > end)
  194. FFSWAP(int16_t, start, end);
  195. for (k = start; k < end; k++) {
  196. buf[k * linesize + 0] += color[0];
  197. buf[k * linesize + 1] += color[1];
  198. buf[k * linesize + 2] += color[2];
  199. buf[k * linesize + 3] += color[3];
  200. }
  201. }
  202. static void draw_sample_p2p_rgba(uint8_t *buf, int height, int linesize,
  203. int16_t *prev_y,
  204. const uint8_t color[4], int h)
  205. {
  206. int k;
  207. if (h >= 0 && h < height) {
  208. buf[h * linesize + 0] += color[0];
  209. buf[h * linesize + 1] += color[1];
  210. buf[h * linesize + 2] += color[2];
  211. buf[h * linesize + 3] += color[3];
  212. if (*prev_y && h != *prev_y) {
  213. int start = *prev_y;
  214. int end = av_clip(h, 0, height-1);
  215. if (start > end)
  216. FFSWAP(int16_t, start, end);
  217. for (k = start + 1; k < end; k++) {
  218. buf[k * linesize + 0] += color[0];
  219. buf[k * linesize + 1] += color[1];
  220. buf[k * linesize + 2] += color[2];
  221. buf[k * linesize + 3] += color[3];
  222. }
  223. }
  224. }
  225. *prev_y = h;
  226. }
  227. static void draw_sample_cline_rgba(uint8_t *buf, int height, int linesize,
  228. int16_t *prev_y,
  229. const uint8_t color[4], int h)
  230. {
  231. int k;
  232. const int start = (height - h) / 2;
  233. const int end = start + h;
  234. for (k = start; k < end; k++) {
  235. buf[k * linesize + 0] += color[0];
  236. buf[k * linesize + 1] += color[1];
  237. buf[k * linesize + 2] += color[2];
  238. buf[k * linesize + 3] += color[3];
  239. }
  240. }
  241. static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
  242. int16_t *prev_y,
  243. const uint8_t color[4], int h)
  244. {
  245. if (h >= 0 && h < height)
  246. buf[h * linesize] += color[0];
  247. }
  248. static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
  249. int16_t *prev_y,
  250. const uint8_t color[4], int h)
  251. {
  252. int k;
  253. int start = height/2;
  254. int end = av_clip(h, 0, height-1);
  255. if (start > end)
  256. FFSWAP(int16_t, start, end);
  257. for (k = start; k < end; k++)
  258. buf[k * linesize] += color[0];
  259. }
  260. static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
  261. int16_t *prev_y,
  262. const uint8_t color[4], int h)
  263. {
  264. int k;
  265. if (h >= 0 && h < height) {
  266. buf[h * linesize] += color[0];
  267. if (*prev_y && h != *prev_y) {
  268. int start = *prev_y;
  269. int end = av_clip(h, 0, height-1);
  270. if (start > end)
  271. FFSWAP(int16_t, start, end);
  272. for (k = start + 1; k < end; k++)
  273. buf[k * linesize] += color[0];
  274. }
  275. }
  276. *prev_y = h;
  277. }
  278. static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
  279. int16_t *prev_y,
  280. const uint8_t color[4], int h)
  281. {
  282. int k;
  283. const int start = (height - h) / 2;
  284. const int end = start + h;
  285. for (k = start; k < end; k++)
  286. buf[k * linesize] += color[0];
  287. }
  288. static int config_output(AVFilterLink *outlink)
  289. {
  290. AVFilterContext *ctx = outlink->src;
  291. AVFilterLink *inlink = ctx->inputs[0];
  292. ShowWavesContext *showwaves = ctx->priv;
  293. int nb_channels = inlink->channels;
  294. char *colors, *saveptr = NULL;
  295. uint8_t x;
  296. int ch;
  297. if (showwaves->single_pic)
  298. showwaves->n = 1;
  299. if (!showwaves->n)
  300. showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
  301. showwaves->buf_idx = 0;
  302. if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  303. av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  304. return AVERROR(ENOMEM);
  305. }
  306. outlink->w = showwaves->w;
  307. outlink->h = showwaves->h;
  308. outlink->sample_aspect_ratio = (AVRational){1,1};
  309. outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  310. (AVRational){showwaves->w,1});
  311. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  312. showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  313. switch (outlink->format) {
  314. case AV_PIX_FMT_GRAY8:
  315. switch (showwaves->mode) {
  316. case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
  317. case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
  318. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
  319. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
  320. default:
  321. return AVERROR_BUG;
  322. }
  323. showwaves->pixstep = 1;
  324. break;
  325. case AV_PIX_FMT_RGBA:
  326. switch (showwaves->mode) {
  327. case MODE_POINT: showwaves->draw_sample = draw_sample_point_rgba; break;
  328. case MODE_LINE: showwaves->draw_sample = draw_sample_line_rgba; break;
  329. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_rgba; break;
  330. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_rgba; break;
  331. default:
  332. return AVERROR_BUG;
  333. }
  334. showwaves->pixstep = 4;
  335. break;
  336. }
  337. switch (showwaves->scale) {
  338. case SCALE_LIN:
  339. switch (showwaves->mode) {
  340. case MODE_POINT:
  341. case MODE_LINE:
  342. case MODE_P2P: showwaves->get_h = get_lin_h; break;
  343. case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
  344. default:
  345. return AVERROR_BUG;
  346. }
  347. break;
  348. case SCALE_LOG:
  349. switch (showwaves->mode) {
  350. case MODE_POINT:
  351. case MODE_LINE:
  352. case MODE_P2P: showwaves->get_h = get_log_h; break;
  353. case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
  354. default:
  355. return AVERROR_BUG;
  356. }
  357. break;
  358. case SCALE_SQRT:
  359. switch (showwaves->mode) {
  360. case MODE_POINT:
  361. case MODE_LINE:
  362. case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
  363. case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
  364. default:
  365. return AVERROR_BUG;
  366. }
  367. break;
  368. case SCALE_CBRT:
  369. switch (showwaves->mode) {
  370. case MODE_POINT:
  371. case MODE_LINE:
  372. case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
  373. case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
  374. default:
  375. return AVERROR_BUG;
  376. }
  377. break;
  378. }
  379. showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
  380. if (!showwaves->fg)
  381. return AVERROR(ENOMEM);
  382. colors = av_strdup(showwaves->colors);
  383. if (!colors)
  384. return AVERROR(ENOMEM);
  385. /* multiplication factor, pre-computed to avoid in-loop divisions */
  386. x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
  387. if (outlink->format == AV_PIX_FMT_RGBA) {
  388. uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
  389. for (ch = 0; ch < nb_channels; ch++) {
  390. char *color;
  391. color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
  392. if (color)
  393. av_parse_color(fg, color, -1, ctx);
  394. showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
  395. showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
  396. showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
  397. showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
  398. }
  399. } else {
  400. for (ch = 0; ch < nb_channels; ch++)
  401. showwaves->fg[4 * ch + 0] = x;
  402. }
  403. av_free(colors);
  404. return 0;
  405. }
  406. inline static int push_frame(AVFilterLink *outlink)
  407. {
  408. AVFilterContext *ctx = outlink->src;
  409. AVFilterLink *inlink = ctx->inputs[0];
  410. ShowWavesContext *showwaves = outlink->src->priv;
  411. int nb_channels = inlink->channels;
  412. int ret, i;
  413. ret = ff_filter_frame(outlink, showwaves->outpicref);
  414. showwaves->outpicref = NULL;
  415. showwaves->buf_idx = 0;
  416. for (i = 0; i < nb_channels; i++)
  417. showwaves->buf_idy[i] = 0;
  418. return ret;
  419. }
  420. static int push_single_pic(AVFilterLink *outlink)
  421. {
  422. AVFilterContext *ctx = outlink->src;
  423. AVFilterLink *inlink = ctx->inputs[0];
  424. ShowWavesContext *showwaves = ctx->priv;
  425. int64_t n = 0, max_samples = showwaves->total_samples / outlink->w;
  426. AVFrame *out = showwaves->outpicref;
  427. struct frame_node *node;
  428. const int nb_channels = inlink->channels;
  429. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  430. const int linesize = out->linesize[0];
  431. const int pixstep = showwaves->pixstep;
  432. int col = 0;
  433. int64_t *sum = showwaves->sum;
  434. if (max_samples == 0) {
  435. av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
  436. return AVERROR(EINVAL);
  437. }
  438. av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples);
  439. memset(sum, 0, nb_channels);
  440. for (node = showwaves->audio_frames; node; node = node->next) {
  441. int i;
  442. const AVFrame *frame = node->frame;
  443. const int16_t *p = (const int16_t *)frame->data[0];
  444. for (i = 0; i < frame->nb_samples; i++) {
  445. int ch;
  446. for (ch = 0; ch < nb_channels; ch++)
  447. sum[ch] += abs(p[ch + i*nb_channels]) << 1;
  448. if (n++ == max_samples) {
  449. for (ch = 0; ch < nb_channels; ch++) {
  450. int16_t sample = sum[ch] / max_samples;
  451. uint8_t *buf = out->data[0] + col * pixstep;
  452. int h;
  453. if (showwaves->split_channels)
  454. buf += ch*ch_height*linesize;
  455. av_assert0(col < outlink->w);
  456. h = showwaves->get_h(sample, ch_height);
  457. showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
  458. sum[ch] = 0;
  459. }
  460. col++;
  461. n = 0;
  462. }
  463. }
  464. }
  465. return push_frame(outlink);
  466. }
  467. static int request_frame(AVFilterLink *outlink)
  468. {
  469. ShowWavesContext *showwaves = outlink->src->priv;
  470. AVFilterLink *inlink = outlink->src->inputs[0];
  471. int ret;
  472. ret = ff_request_frame(inlink);
  473. if (ret == AVERROR_EOF && showwaves->outpicref) {
  474. if (showwaves->single_pic)
  475. push_single_pic(outlink);
  476. else
  477. push_frame(outlink);
  478. }
  479. return ret;
  480. }
  481. static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
  482. const AVFilterLink *inlink, AVFilterLink *outlink,
  483. const AVFrame *in)
  484. {
  485. if (!showwaves->outpicref) {
  486. int j;
  487. AVFrame *out = showwaves->outpicref =
  488. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  489. if (!out)
  490. return AVERROR(ENOMEM);
  491. out->width = outlink->w;
  492. out->height = outlink->h;
  493. out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
  494. av_make_q(1, inlink->sample_rate),
  495. outlink->time_base);
  496. for (j = 0; j < outlink->h; j++)
  497. memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
  498. }
  499. return 0;
  500. }
  501. static av_cold int init(AVFilterContext *ctx)
  502. {
  503. ShowWavesContext *showwaves = ctx->priv;
  504. if (!strcmp(ctx->filter->name, "showwavespic")) {
  505. showwaves->single_pic = 1;
  506. showwaves->mode = MODE_CENTERED_LINE;
  507. }
  508. return 0;
  509. }
  510. #if CONFIG_SHOWWAVES_FILTER
  511. static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  512. {
  513. AVFilterContext *ctx = inlink->dst;
  514. AVFilterLink *outlink = ctx->outputs[0];
  515. ShowWavesContext *showwaves = ctx->priv;
  516. const int nb_samples = insamples->nb_samples;
  517. AVFrame *outpicref = showwaves->outpicref;
  518. int16_t *p = (int16_t *)insamples->data[0];
  519. int nb_channels = inlink->channels;
  520. int i, j, ret = 0;
  521. const int pixstep = showwaves->pixstep;
  522. const int n = showwaves->n;
  523. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  524. /* draw data in the buffer */
  525. for (i = 0; i < nb_samples; i++) {
  526. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  527. if (ret < 0)
  528. goto end;
  529. outpicref = showwaves->outpicref;
  530. for (j = 0; j < nb_channels; j++) {
  531. uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
  532. const int linesize = outpicref->linesize[0];
  533. int h;
  534. if (showwaves->split_channels)
  535. buf += j*ch_height*linesize;
  536. h = showwaves->get_h(*p++, ch_height);
  537. showwaves->draw_sample(buf, ch_height, linesize,
  538. &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
  539. }
  540. showwaves->sample_count_mod++;
  541. if (showwaves->sample_count_mod == n) {
  542. showwaves->sample_count_mod = 0;
  543. showwaves->buf_idx++;
  544. }
  545. if (showwaves->buf_idx == showwaves->w)
  546. if ((ret = push_frame(outlink)) < 0)
  547. break;
  548. outpicref = showwaves->outpicref;
  549. }
  550. end:
  551. av_frame_free(&insamples);
  552. return ret;
  553. }
  554. static const AVFilterPad showwaves_inputs[] = {
  555. {
  556. .name = "default",
  557. .type = AVMEDIA_TYPE_AUDIO,
  558. .filter_frame = showwaves_filter_frame,
  559. },
  560. { NULL }
  561. };
  562. static const AVFilterPad showwaves_outputs[] = {
  563. {
  564. .name = "default",
  565. .type = AVMEDIA_TYPE_VIDEO,
  566. .config_props = config_output,
  567. .request_frame = request_frame,
  568. },
  569. { NULL }
  570. };
  571. AVFilter ff_avf_showwaves = {
  572. .name = "showwaves",
  573. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  574. .init = init,
  575. .uninit = uninit,
  576. .query_formats = query_formats,
  577. .priv_size = sizeof(ShowWavesContext),
  578. .inputs = showwaves_inputs,
  579. .outputs = showwaves_outputs,
  580. .priv_class = &showwaves_class,
  581. };
  582. #endif // CONFIG_SHOWWAVES_FILTER
  583. #if CONFIG_SHOWWAVESPIC_FILTER
  584. #define OFFSET(x) offsetof(ShowWavesContext, x)
  585. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  586. static const AVOption showwavespic_options[] = {
  587. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  588. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  589. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  590. { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
  591. { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
  592. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
  593. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
  594. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
  595. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
  596. { NULL }
  597. };
  598. AVFILTER_DEFINE_CLASS(showwavespic);
  599. static int showwavespic_config_input(AVFilterLink *inlink)
  600. {
  601. AVFilterContext *ctx = inlink->dst;
  602. ShowWavesContext *showwaves = ctx->priv;
  603. if (showwaves->single_pic) {
  604. showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
  605. if (!showwaves->sum)
  606. return AVERROR(ENOMEM);
  607. }
  608. return 0;
  609. }
  610. static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  611. {
  612. AVFilterContext *ctx = inlink->dst;
  613. AVFilterLink *outlink = ctx->outputs[0];
  614. ShowWavesContext *showwaves = ctx->priv;
  615. int16_t *p = (int16_t *)insamples->data[0];
  616. int ret = 0;
  617. if (showwaves->single_pic) {
  618. struct frame_node *f;
  619. ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
  620. if (ret < 0)
  621. goto end;
  622. /* queue the audio frame */
  623. f = av_malloc(sizeof(*f));
  624. if (!f) {
  625. ret = AVERROR(ENOMEM);
  626. goto end;
  627. }
  628. f->frame = insamples;
  629. f->next = NULL;
  630. if (!showwaves->last_frame) {
  631. showwaves->audio_frames =
  632. showwaves->last_frame = f;
  633. } else {
  634. showwaves->last_frame->next = f;
  635. showwaves->last_frame = f;
  636. }
  637. showwaves->total_samples += insamples->nb_samples;
  638. return 0;
  639. }
  640. end:
  641. av_frame_free(&insamples);
  642. return ret;
  643. }
  644. static const AVFilterPad showwavespic_inputs[] = {
  645. {
  646. .name = "default",
  647. .type = AVMEDIA_TYPE_AUDIO,
  648. .config_props = showwavespic_config_input,
  649. .filter_frame = showwavespic_filter_frame,
  650. },
  651. { NULL }
  652. };
  653. static const AVFilterPad showwavespic_outputs[] = {
  654. {
  655. .name = "default",
  656. .type = AVMEDIA_TYPE_VIDEO,
  657. .config_props = config_output,
  658. .request_frame = request_frame,
  659. },
  660. { NULL }
  661. };
  662. AVFilter ff_avf_showwavespic = {
  663. .name = "showwavespic",
  664. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
  665. .init = init,
  666. .uninit = uninit,
  667. .query_formats = query_formats,
  668. .priv_size = sizeof(ShowWavesContext),
  669. .inputs = showwavespic_inputs,
  670. .outputs = showwavespic_outputs,
  671. .priv_class = &showwavespic_class,
  672. };
  673. #endif // CONFIG_SHOWWAVESPIC_FILTER