You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

944 lines
40KB

  1. /*
  2. * Copyright (c) 2012-2013 Clément Bœsch
  3. * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
  4. * Copyright (c) 2015 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
  25. * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
  26. */
  27. #include <math.h>
  28. #include "libavcodec/avfft.h"
  29. #include "libavutil/audio_fifo.h"
  30. #include "libavutil/avassert.h"
  31. #include "libavutil/channel_layout.h"
  32. #include "libavutil/opt.h"
  33. #include "audio.h"
  34. #include "video.h"
  35. #include "avfilter.h"
  36. #include "internal.h"
  37. #include "window_func.h"
  38. enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
  39. enum DisplayScale { LINEAR, SQRT, CBRT, LOG, FOURTHRT, FIFTHRT, NB_SCALES };
  40. enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, NB_CLMODES };
  41. enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
  42. enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
  43. typedef struct {
  44. const AVClass *class;
  45. int w, h;
  46. AVFrame *outpicref;
  47. int nb_display_channels;
  48. int orientation;
  49. int channel_width;
  50. int channel_height;
  51. int sliding; ///< 1 if sliding mode, 0 otherwise
  52. int mode; ///< channel display mode
  53. int color_mode; ///< display color scheme
  54. int scale;
  55. float saturation; ///< color saturation multiplier
  56. int xpos; ///< x position (current column)
  57. FFTContext *fft; ///< Fast Fourier Transform context
  58. int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
  59. FFTComplex **fft_data; ///< bins holder for each (displayed) channels
  60. float *window_func_lut; ///< Window function LUT
  61. float **magnitudes;
  62. int win_func;
  63. int win_size;
  64. double win_scale;
  65. float overlap;
  66. float gain;
  67. int skip_samples;
  68. float *combine_buffer; ///< color combining buffer (3 * h items)
  69. AVAudioFifo *fifo;
  70. int64_t pts;
  71. int single_pic;
  72. } ShowSpectrumContext;
  73. #define OFFSET(x) offsetof(ShowSpectrumContext, x)
  74. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  75. static const AVOption showspectrum_options[] = {
  76. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  77. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  78. { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
  79. { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
  80. { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
  81. { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
  82. { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
  83. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
  84. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  85. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  86. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
  87. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  88. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  89. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  90. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  91. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  92. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  93. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  94. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  95. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
  96. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  97. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  98. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  99. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  100. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  101. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  102. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  103. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  104. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  105. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  106. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  107. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  108. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  109. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  110. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  111. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  112. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  113. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  114. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  115. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  116. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  117. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  118. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  119. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  120. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  121. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  122. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  123. { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
  124. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  125. { NULL }
  126. };
  127. AVFILTER_DEFINE_CLASS(showspectrum);
  128. static const struct ColorTable {
  129. float a, y, u, v;
  130. } color_table[][8] = {
  131. [INTENSITY] = {
  132. { 0, 0, 0, 0 },
  133. { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
  134. { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
  135. { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
  136. { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
  137. { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
  138. { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
  139. { 1, 1, 0, 0 }},
  140. [RAINBOW] = {
  141. { 0, 0, 0, 0 },
  142. { 0.13, 44/256., (189-128)/256., (138-128)/256. },
  143. { 0.25, 29/256., (186-128)/256., (119-128)/256. },
  144. { 0.38, 119/256., (194-128)/256., (53-128)/256. },
  145. { 0.60, 111/256., (73-128)/256., (59-128)/256. },
  146. { 0.73, 205/256., (19-128)/256., (149-128)/256. },
  147. { 0.86, 135/256., (83-128)/256., (200-128)/256. },
  148. { 1, 73/256., (95-128)/256., (225-128)/256. }},
  149. [MORELAND] = {
  150. { 0, 44/256., (181-128)/256., (112-128)/256. },
  151. { 0.13, 126/256., (177-128)/256., (106-128)/256. },
  152. { 0.25, 164/256., (163-128)/256., (109-128)/256. },
  153. { 0.38, 200/256., (140-128)/256., (120-128)/256. },
  154. { 0.60, 201/256., (117-128)/256., (141-128)/256. },
  155. { 0.73, 177/256., (103-128)/256., (165-128)/256. },
  156. { 0.86, 136/256., (100-128)/256., (183-128)/256. },
  157. { 1, 68/256., (117-128)/256., (203-128)/256. }},
  158. [NEBULAE] = {
  159. { 0, 10/256., (134-128)/256., (132-128)/256. },
  160. { 0.23, 21/256., (137-128)/256., (130-128)/256. },
  161. { 0.45, 35/256., (134-128)/256., (134-128)/256. },
  162. { 0.57, 51/256., (130-128)/256., (139-128)/256. },
  163. { 0.67, 104/256., (116-128)/256., (162-128)/256. },
  164. { 0.77, 120/256., (105-128)/256., (188-128)/256. },
  165. { 0.87, 140/256., (105-128)/256., (188-128)/256. },
  166. { 1, 1, 0, 0 }},
  167. [FIRE] = {
  168. { 0, 0, 0, 0 },
  169. { 0.23, 44/256., (132-128)/256., (127-128)/256. },
  170. { 0.45, 62/256., (116-128)/256., (140-128)/256. },
  171. { 0.57, 75/256., (105-128)/256., (152-128)/256. },
  172. { 0.67, 95/256., (91-128)/256., (166-128)/256. },
  173. { 0.77, 126/256., (74-128)/256., (172-128)/256. },
  174. { 0.87, 164/256., (73-128)/256., (162-128)/256. },
  175. { 1, 1, 0, 0 }},
  176. [FIERY] = {
  177. { 0, 0, 0, 0 },
  178. { 0.23, 36/256., (116-128)/256., (163-128)/256. },
  179. { 0.45, 52/256., (102-128)/256., (200-128)/256. },
  180. { 0.57, 116/256., (84-128)/256., (196-128)/256. },
  181. { 0.67, 157/256., (67-128)/256., (181-128)/256. },
  182. { 0.77, 193/256., (40-128)/256., (155-128)/256. },
  183. { 0.87, 221/256., (101-128)/256., (134-128)/256. },
  184. { 1, 1, 0, 0 }},
  185. [FRUIT] = {
  186. { 0, 0, 0, 0 },
  187. { 0.20, 29/256., (136-128)/256., (119-128)/256. },
  188. { 0.30, 60/256., (119-128)/256., (90-128)/256. },
  189. { 0.40, 85/256., (91-128)/256., (85-128)/256. },
  190. { 0.50, 116/256., (70-128)/256., (105-128)/256. },
  191. { 0.60, 151/256., (50-128)/256., (146-128)/256. },
  192. { 0.70, 191/256., (63-128)/256., (178-128)/256. },
  193. { 1, 98/256., (80-128)/256., (221-128)/256. }},
  194. };
  195. static av_cold void uninit(AVFilterContext *ctx)
  196. {
  197. ShowSpectrumContext *s = ctx->priv;
  198. int i;
  199. av_freep(&s->combine_buffer);
  200. av_fft_end(s->fft);
  201. if (s->fft_data) {
  202. for (i = 0; i < s->nb_display_channels; i++)
  203. av_freep(&s->fft_data[i]);
  204. }
  205. av_freep(&s->fft_data);
  206. av_freep(&s->window_func_lut);
  207. if (s->magnitudes) {
  208. for (i = 0; i < s->nb_display_channels; i++)
  209. av_freep(&s->magnitudes[i]);
  210. }
  211. av_freep(&s->magnitudes);
  212. av_frame_free(&s->outpicref);
  213. av_audio_fifo_free(s->fifo);
  214. }
  215. static int query_formats(AVFilterContext *ctx)
  216. {
  217. AVFilterFormats *formats = NULL;
  218. AVFilterChannelLayouts *layouts = NULL;
  219. AVFilterLink *inlink = ctx->inputs[0];
  220. AVFilterLink *outlink = ctx->outputs[0];
  221. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
  222. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
  223. int ret;
  224. /* set input audio formats */
  225. formats = ff_make_format_list(sample_fmts);
  226. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  227. return ret;
  228. layouts = ff_all_channel_layouts();
  229. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  230. return ret;
  231. formats = ff_all_samplerates();
  232. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  233. return ret;
  234. /* set output video format */
  235. formats = ff_make_format_list(pix_fmts);
  236. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  237. return ret;
  238. return 0;
  239. }
  240. static int config_output(AVFilterLink *outlink)
  241. {
  242. AVFilterContext *ctx = outlink->src;
  243. AVFilterLink *inlink = ctx->inputs[0];
  244. ShowSpectrumContext *s = ctx->priv;
  245. int i, fft_bits, h, w;
  246. float overlap;
  247. if (!strcmp(ctx->filter->name, "showspectrumpic"))
  248. s->single_pic = 1;
  249. outlink->w = s->w;
  250. outlink->h = s->h;
  251. h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? outlink->h : outlink->h / inlink->channels;
  252. w = (s->mode == COMBINED || s->orientation == VERTICAL) ? outlink->w : outlink->w / inlink->channels;
  253. s->channel_height = h;
  254. s->channel_width = w;
  255. if (s->orientation == VERTICAL) {
  256. /* FFT window size (precision) according to the requested output frame height */
  257. for (fft_bits = 1; 1 << fft_bits < 2 * h; fft_bits++);
  258. } else {
  259. /* FFT window size (precision) according to the requested output frame width */
  260. for (fft_bits = 1; 1 << fft_bits < 2 * w; fft_bits++);
  261. }
  262. s->win_size = 1 << fft_bits;
  263. /* (re-)configuration if the video output changed (or first init) */
  264. if (fft_bits != s->fft_bits) {
  265. AVFrame *outpicref;
  266. av_fft_end(s->fft);
  267. s->fft = av_fft_init(fft_bits, 0);
  268. if (!s->fft) {
  269. av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
  270. "The window size might be too high.\n");
  271. return AVERROR(EINVAL);
  272. }
  273. s->fft_bits = fft_bits;
  274. /* FFT buffers: x2 for each (display) channel buffer.
  275. * Note: we use free and malloc instead of a realloc-like function to
  276. * make sure the buffer is aligned in memory for the FFT functions. */
  277. for (i = 0; i < s->nb_display_channels; i++)
  278. av_freep(&s->fft_data[i]);
  279. av_freep(&s->fft_data);
  280. s->nb_display_channels = inlink->channels;
  281. s->magnitudes = av_calloc(s->nb_display_channels, sizeof(*s->magnitudes));
  282. if (!s->magnitudes)
  283. return AVERROR(ENOMEM);
  284. for (i = 0; i < s->nb_display_channels; i++) {
  285. s->magnitudes[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->magnitudes));
  286. if (!s->magnitudes[i])
  287. return AVERROR(ENOMEM);
  288. }
  289. s->fft_data = av_calloc(s->nb_display_channels, sizeof(*s->fft_data));
  290. if (!s->fft_data)
  291. return AVERROR(ENOMEM);
  292. for (i = 0; i < s->nb_display_channels; i++) {
  293. s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
  294. if (!s->fft_data[i])
  295. return AVERROR(ENOMEM);
  296. }
  297. /* pre-calc windowing function */
  298. s->window_func_lut =
  299. av_realloc_f(s->window_func_lut, s->win_size,
  300. sizeof(*s->window_func_lut));
  301. if (!s->window_func_lut)
  302. return AVERROR(ENOMEM);
  303. ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
  304. if (s->overlap == 1)
  305. s->overlap = overlap;
  306. s->skip_samples = (1. - s->overlap) * s->win_size;
  307. if (s->skip_samples < 1) {
  308. av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
  309. return AVERROR(EINVAL);
  310. }
  311. for (s->win_scale = 0, i = 0; i < s->win_size; i++) {
  312. s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
  313. }
  314. s->win_scale = 1. / sqrt(s->win_scale);
  315. /* prepare the initial picref buffer (black frame) */
  316. av_frame_free(&s->outpicref);
  317. s->outpicref = outpicref =
  318. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  319. if (!outpicref)
  320. return AVERROR(ENOMEM);
  321. outlink->sample_aspect_ratio = (AVRational){1,1};
  322. for (i = 0; i < outlink->h; i++) {
  323. memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
  324. memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
  325. memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
  326. }
  327. }
  328. if ((s->orientation == VERTICAL && s->xpos >= outlink->w) ||
  329. (s->orientation == HORIZONTAL && s->xpos >= outlink->h))
  330. s->xpos = 0;
  331. outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
  332. if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
  333. outlink->frame_rate.den *= outlink->w;
  334. if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
  335. outlink->frame_rate.den *= outlink->h;
  336. if (s->orientation == VERTICAL) {
  337. s->combine_buffer =
  338. av_realloc_f(s->combine_buffer, outlink->h * 3,
  339. sizeof(*s->combine_buffer));
  340. } else {
  341. s->combine_buffer =
  342. av_realloc_f(s->combine_buffer, outlink->w * 3,
  343. sizeof(*s->combine_buffer));
  344. }
  345. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d FFT window size:%d\n",
  346. s->w, s->h, s->win_size);
  347. av_audio_fifo_free(s->fifo);
  348. s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
  349. if (!s->fifo)
  350. return AVERROR(ENOMEM);
  351. return 0;
  352. }
  353. static void run_fft(ShowSpectrumContext *s, AVFrame *fin)
  354. {
  355. int ch, n;
  356. /* fill FFT input with the number of samples available */
  357. for (ch = 0; ch < s->nb_display_channels; ch++) {
  358. const float *p = (float *)fin->extended_data[ch];
  359. for (n = 0; n < s->win_size; n++) {
  360. s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
  361. s->fft_data[ch][n].im = 0;
  362. }
  363. }
  364. /* run FFT on each samples set */
  365. for (ch = 0; ch < s->nb_display_channels; ch++) {
  366. av_fft_permute(s->fft, s->fft_data[ch]);
  367. av_fft_calc(s->fft, s->fft_data[ch]);
  368. }
  369. }
  370. #define RE(y, ch) s->fft_data[ch][y].re
  371. #define IM(y, ch) s->fft_data[ch][y].im
  372. #define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
  373. static void calc_magnitudes(ShowSpectrumContext *s)
  374. {
  375. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  376. for (ch = 0; ch < s->nb_display_channels; ch++) {
  377. float *magnitudes = s->magnitudes[ch];
  378. for (y = 0; y < h; y++)
  379. magnitudes[y] = MAGNITUDE(y, ch);
  380. }
  381. }
  382. static void acalc_magnitudes(ShowSpectrumContext *s)
  383. {
  384. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  385. for (ch = 0; ch < s->nb_display_channels; ch++) {
  386. float *magnitudes = s->magnitudes[ch];
  387. for (y = 0; y < h; y++)
  388. magnitudes[y] += MAGNITUDE(y, ch);
  389. }
  390. }
  391. static void scale_magnitudes(ShowSpectrumContext *s, float scale)
  392. {
  393. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  394. for (ch = 0; ch < s->nb_display_channels; ch++) {
  395. float *magnitudes = s->magnitudes[ch];
  396. for (y = 0; y < h; y++)
  397. magnitudes[y] *= scale;
  398. }
  399. }
  400. static void pick_color(ShowSpectrumContext *s,
  401. float yf, float uf, float vf,
  402. float a, float *out)
  403. {
  404. if (s->color_mode > CHANNEL) {
  405. const int cm = s->color_mode;
  406. float y, u, v;
  407. int i;
  408. for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
  409. if (color_table[cm][i].a >= a)
  410. break;
  411. // i now is the first item >= the color
  412. // now we know to interpolate between item i - 1 and i
  413. if (a <= color_table[cm][i - 1].a) {
  414. y = color_table[cm][i - 1].y;
  415. u = color_table[cm][i - 1].u;
  416. v = color_table[cm][i - 1].v;
  417. } else if (a >= color_table[cm][i].a) {
  418. y = color_table[cm][i].y;
  419. u = color_table[cm][i].u;
  420. v = color_table[cm][i].v;
  421. } else {
  422. float start = color_table[cm][i - 1].a;
  423. float end = color_table[cm][i].a;
  424. float lerpfrac = (a - start) / (end - start);
  425. y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
  426. + color_table[cm][i].y * lerpfrac;
  427. u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
  428. + color_table[cm][i].u * lerpfrac;
  429. v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
  430. + color_table[cm][i].v * lerpfrac;
  431. }
  432. out[0] += y * yf;
  433. out[1] += u * uf;
  434. out[2] += v * vf;
  435. } else {
  436. out[0] += a * yf;
  437. out[1] += a * uf;
  438. out[2] += a * vf;
  439. }
  440. }
  441. static void clear_combine_buffer(ShowSpectrumContext *s, int size)
  442. {
  443. int y;
  444. for (y = 0; y < size; y++) {
  445. s->combine_buffer[3 * y ] = 0;
  446. s->combine_buffer[3 * y + 1] = 127.5;
  447. s->combine_buffer[3 * y + 2] = 127.5;
  448. }
  449. }
  450. static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
  451. {
  452. int ret;
  453. AVFilterContext *ctx = inlink->dst;
  454. AVFilterLink *outlink = ctx->outputs[0];
  455. ShowSpectrumContext *s = ctx->priv;
  456. AVFrame *outpicref = s->outpicref;
  457. const double w = s->win_scale;
  458. const float g = s->gain;
  459. int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
  460. int ch, plane, x, y;
  461. /* fill a new spectrum column */
  462. /* initialize buffer for combining to black */
  463. clear_combine_buffer(s, s->orientation == VERTICAL ? outlink->h : outlink->w);
  464. for (ch = 0; ch < s->nb_display_channels; ch++) {
  465. float *magnitudes = s->magnitudes[ch];
  466. float yf, uf, vf;
  467. /* decide color range */
  468. switch (s->mode) {
  469. case COMBINED:
  470. // reduce range by channel count
  471. yf = 256.0f / s->nb_display_channels;
  472. switch (s->color_mode) {
  473. case RAINBOW:
  474. case MORELAND:
  475. case NEBULAE:
  476. case FIRE:
  477. case FIERY:
  478. case FRUIT:
  479. case INTENSITY:
  480. uf = yf;
  481. vf = yf;
  482. break;
  483. case CHANNEL:
  484. /* adjust saturation for mixed UV coloring */
  485. /* this factor is correct for infinite channels, an approximation otherwise */
  486. uf = yf * M_PI;
  487. vf = yf * M_PI;
  488. break;
  489. default:
  490. av_assert0(0);
  491. }
  492. break;
  493. case SEPARATE:
  494. // full range
  495. yf = 256.0f;
  496. uf = 256.0f;
  497. vf = 256.0f;
  498. break;
  499. default:
  500. av_assert0(0);
  501. }
  502. if (s->color_mode == CHANNEL) {
  503. if (s->nb_display_channels > 1) {
  504. uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
  505. vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
  506. } else {
  507. uf = 0.0f;
  508. vf = 0.0f;
  509. }
  510. }
  511. uf *= s->saturation;
  512. vf *= s->saturation;
  513. /* draw the channel */
  514. for (y = 0; y < h; y++) {
  515. int row = (s->mode == COMBINED) ? y : ch * h + y;
  516. float *out = &s->combine_buffer[3 * row];
  517. /* get magnitude */
  518. float a = g * w * magnitudes[y];
  519. /* apply scale */
  520. switch (s->scale) {
  521. case LINEAR:
  522. break;
  523. case SQRT:
  524. a = sqrt(a);
  525. break;
  526. case CBRT:
  527. a = cbrt(a);
  528. break;
  529. case FOURTHRT:
  530. a = pow(a, 0.25);
  531. break;
  532. case FIFTHRT:
  533. a = pow(a, 0.20);
  534. break;
  535. case LOG:
  536. a = 1 + log10(av_clipd(a * w, 1e-6, 1)) / 6; // zero = -120dBFS
  537. break;
  538. default:
  539. av_assert0(0);
  540. }
  541. pick_color(s, yf, uf, vf, a, out);
  542. }
  543. }
  544. av_frame_make_writable(s->outpicref);
  545. /* copy to output */
  546. if (s->orientation == VERTICAL) {
  547. if (s->sliding == SCROLL) {
  548. for (plane = 0; plane < 3; plane++) {
  549. for (y = 0; y < outlink->h; y++) {
  550. uint8_t *p = outpicref->data[plane] +
  551. y * outpicref->linesize[plane];
  552. memmove(p, p + 1, outlink->w - 1);
  553. }
  554. }
  555. s->xpos = outlink->w - 1;
  556. } else if (s->sliding == RSCROLL) {
  557. for (plane = 0; plane < 3; plane++) {
  558. for (y = 0; y < outlink->h; y++) {
  559. uint8_t *p = outpicref->data[plane] +
  560. y * outpicref->linesize[plane];
  561. memmove(p + 1, p, outlink->w - 1);
  562. }
  563. }
  564. s->xpos = 0;
  565. }
  566. for (plane = 0; plane < 3; plane++) {
  567. uint8_t *p = outpicref->data[plane] +
  568. (outlink->h - 1) * outpicref->linesize[plane] +
  569. s->xpos;
  570. for (y = 0; y < outlink->h; y++) {
  571. *p = lrintf(av_clipf(s->combine_buffer[3 * y + plane], 0, 255));
  572. p -= outpicref->linesize[plane];
  573. }
  574. }
  575. } else {
  576. if (s->sliding == SCROLL) {
  577. for (plane = 0; plane < 3; plane++) {
  578. for (y = 1; y < outlink->h; y++) {
  579. memmove(outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  580. outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  581. outlink->w);
  582. }
  583. }
  584. s->xpos = outlink->h - 1;
  585. } else if (s->sliding == RSCROLL) {
  586. for (plane = 0; plane < 3; plane++) {
  587. for (y = outlink->h - 1; y >= 1; y--) {
  588. memmove(outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  589. outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  590. outlink->w);
  591. }
  592. }
  593. s->xpos = 0;
  594. }
  595. for (plane = 0; plane < 3; plane++) {
  596. uint8_t *p = outpicref->data[plane] +
  597. s->xpos * outpicref->linesize[plane];
  598. for (x = 0; x < outlink->w; x++) {
  599. *p = lrintf(av_clipf(s->combine_buffer[3 * x + plane], 0, 255));
  600. p++;
  601. }
  602. }
  603. }
  604. if (s->sliding != FULLFRAME || s->xpos == 0)
  605. outpicref->pts = insamples->pts;
  606. s->xpos++;
  607. if (s->orientation == VERTICAL && s->xpos >= outlink->w)
  608. s->xpos = 0;
  609. if (s->orientation == HORIZONTAL && s->xpos >= outlink->h)
  610. s->xpos = 0;
  611. if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) {
  612. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  613. if (ret < 0)
  614. return ret;
  615. }
  616. return s->win_size;
  617. }
  618. #if CONFIG_SHOWSPECTRUM_FILTER
  619. static int request_frame(AVFilterLink *outlink)
  620. {
  621. ShowSpectrumContext *s = outlink->src->priv;
  622. AVFilterLink *inlink = outlink->src->inputs[0];
  623. unsigned i;
  624. int ret;
  625. ret = ff_request_frame(inlink);
  626. if (ret == AVERROR_EOF && s->sliding == FULLFRAME && s->xpos > 0 &&
  627. s->outpicref) {
  628. if (s->orientation == VERTICAL) {
  629. for (i = 0; i < outlink->h; i++) {
  630. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
  631. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
  632. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
  633. }
  634. } else {
  635. for (i = s->xpos; i < outlink->h; i++) {
  636. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
  637. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
  638. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
  639. }
  640. }
  641. ret = ff_filter_frame(outlink, s->outpicref);
  642. s->outpicref = NULL;
  643. }
  644. return ret;
  645. }
  646. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  647. {
  648. AVFilterContext *ctx = inlink->dst;
  649. ShowSpectrumContext *s = ctx->priv;
  650. AVFrame *fin = NULL;
  651. int ret = 0;
  652. av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  653. av_frame_free(&insamples);
  654. while (av_audio_fifo_size(s->fifo) >= s->win_size) {
  655. fin = ff_get_audio_buffer(inlink, s->win_size);
  656. if (!fin) {
  657. ret = AVERROR(ENOMEM);
  658. goto fail;
  659. }
  660. fin->pts = s->pts;
  661. s->pts += s->skip_samples;
  662. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  663. if (ret < 0)
  664. goto fail;
  665. av_assert0(fin->nb_samples == s->win_size);
  666. run_fft(s, fin);
  667. calc_magnitudes(s);
  668. ret = plot_spectrum_column(inlink, fin);
  669. av_frame_free(&fin);
  670. av_audio_fifo_drain(s->fifo, s->skip_samples);
  671. if (ret < 0)
  672. goto fail;
  673. }
  674. fail:
  675. av_frame_free(&fin);
  676. return ret;
  677. }
  678. static const AVFilterPad showspectrum_inputs[] = {
  679. {
  680. .name = "default",
  681. .type = AVMEDIA_TYPE_AUDIO,
  682. .filter_frame = filter_frame,
  683. },
  684. { NULL }
  685. };
  686. static const AVFilterPad showspectrum_outputs[] = {
  687. {
  688. .name = "default",
  689. .type = AVMEDIA_TYPE_VIDEO,
  690. .config_props = config_output,
  691. .request_frame = request_frame,
  692. },
  693. { NULL }
  694. };
  695. AVFilter ff_avf_showspectrum = {
  696. .name = "showspectrum",
  697. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
  698. .uninit = uninit,
  699. .query_formats = query_formats,
  700. .priv_size = sizeof(ShowSpectrumContext),
  701. .inputs = showspectrum_inputs,
  702. .outputs = showspectrum_outputs,
  703. .priv_class = &showspectrum_class,
  704. };
  705. #endif // CONFIG_SHOWSPECTRUM_FILTER
  706. #if CONFIG_SHOWSPECTRUMPIC_FILTER
  707. static const AVOption showspectrumpic_options[] = {
  708. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  709. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  710. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_MODES-1, FLAGS, "mode" },
  711. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  712. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  713. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=INTENSITY}, 0, NB_CLMODES-1, FLAGS, "color" },
  714. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  715. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  716. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  717. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  718. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  719. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  720. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  721. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  722. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" },
  723. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  724. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  725. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  726. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  727. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  728. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  729. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  730. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  731. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  732. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  733. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  734. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  735. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  736. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  737. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  738. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  739. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  740. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  741. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  742. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  743. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  744. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  745. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  746. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  747. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  748. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  749. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  750. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  751. { NULL }
  752. };
  753. AVFILTER_DEFINE_CLASS(showspectrumpic);
  754. static int showspectrumpic_request_frame(AVFilterLink *outlink)
  755. {
  756. ShowSpectrumContext *s = outlink->src->priv;
  757. AVFilterLink *inlink = outlink->src->inputs[0];
  758. int ret;
  759. ret = ff_request_frame(inlink);
  760. if (ret == AVERROR_EOF && s->outpicref) {
  761. int samples = av_audio_fifo_size(s->fifo);
  762. int consumed = 0;
  763. int x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
  764. int ch, spf, spb;
  765. AVFrame *fin;
  766. spf = s->win_size * (samples / ((s->win_size * sz) * ceil(samples / (float)(s->win_size * sz))));
  767. spb = (samples / (spf * sz)) * spf;
  768. fin = ff_get_audio_buffer(inlink, s->win_size);
  769. if (!fin)
  770. return AVERROR(ENOMEM);
  771. while (x < sz) {
  772. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  773. if (ret < 0) {
  774. av_frame_free(&fin);
  775. return ret;
  776. }
  777. av_audio_fifo_drain(s->fifo, spf);
  778. if (ret < s->win_size) {
  779. for (ch = 0; ch < s->nb_display_channels; ch++) {
  780. memset(fin->extended_data[ch] + ret * sizeof(float), 0,
  781. (s->win_size - ret) * sizeof(float));
  782. }
  783. }
  784. run_fft(s, fin);
  785. acalc_magnitudes(s);
  786. consumed += spf;
  787. if (consumed >= spb) {
  788. int h = s->orientation == VERTICAL ? s->h : s->w;
  789. scale_magnitudes(s, 1. / (consumed / spf));
  790. plot_spectrum_column(inlink, fin);
  791. consumed = 0;
  792. x++;
  793. for (ch = 0; ch < s->nb_display_channels; ch++)
  794. memset(s->magnitudes[ch], 0, h * sizeof(float));
  795. }
  796. }
  797. av_frame_free(&fin);
  798. s->outpicref->pts = 0;
  799. ret = ff_filter_frame(outlink, s->outpicref);
  800. s->outpicref = NULL;
  801. }
  802. return ret;
  803. }
  804. static int showspectrumpic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  805. {
  806. AVFilterContext *ctx = inlink->dst;
  807. ShowSpectrumContext *s = ctx->priv;
  808. int ret;
  809. ret = av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  810. av_frame_free(&insamples);
  811. return ret;
  812. }
  813. static const AVFilterPad showspectrumpic_inputs[] = {
  814. {
  815. .name = "default",
  816. .type = AVMEDIA_TYPE_AUDIO,
  817. .filter_frame = showspectrumpic_filter_frame,
  818. },
  819. { NULL }
  820. };
  821. static const AVFilterPad showspectrumpic_outputs[] = {
  822. {
  823. .name = "default",
  824. .type = AVMEDIA_TYPE_VIDEO,
  825. .config_props = config_output,
  826. .request_frame = showspectrumpic_request_frame,
  827. },
  828. { NULL }
  829. };
  830. AVFilter ff_avf_showspectrumpic = {
  831. .name = "showspectrumpic",
  832. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output single picture."),
  833. .uninit = uninit,
  834. .query_formats = query_formats,
  835. .priv_size = sizeof(ShowSpectrumContext),
  836. .inputs = showspectrumpic_inputs,
  837. .outputs = showspectrumpic_outputs,
  838. .priv_class = &showspectrumpic_class,
  839. };
  840. #endif // CONFIG_SHOWSPECTRUMPIC_FILTER