You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1341 lines
58KB

  1. /*
  2. * Copyright (c) 2012-2013 Clément Bœsch
  3. * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
  4. * Copyright (c) 2015 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
  25. * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
  26. */
  27. #include <math.h>
  28. #include "libavcodec/avfft.h"
  29. #include "libavutil/audio_fifo.h"
  30. #include "libavutil/avassert.h"
  31. #include "libavutil/avstring.h"
  32. #include "libavutil/channel_layout.h"
  33. #include "libavutil/opt.h"
  34. #include "libavutil/xga_font_data.h"
  35. #include "audio.h"
  36. #include "video.h"
  37. #include "avfilter.h"
  38. #include "internal.h"
  39. #include "window_func.h"
  40. enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
  41. enum DataMode { D_MAGNITUDE, D_PHASE, NB_DMODES };
  42. enum DisplayScale { LINEAR, SQRT, CBRT, LOG, FOURTHRT, FIFTHRT, NB_SCALES };
  43. enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, COOL, MAGMA, NB_CLMODES };
  44. enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
  45. enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
  46. typedef struct ShowSpectrumContext {
  47. const AVClass *class;
  48. int w, h;
  49. AVFrame *outpicref;
  50. int nb_display_channels;
  51. int orientation;
  52. int channel_width;
  53. int channel_height;
  54. int sliding; ///< 1 if sliding mode, 0 otherwise
  55. int mode; ///< channel display mode
  56. int color_mode; ///< display color scheme
  57. int scale;
  58. float saturation; ///< color saturation multiplier
  59. float rotation; ///< color rotation
  60. int data;
  61. int xpos; ///< x position (current column)
  62. FFTContext **fft; ///< Fast Fourier Transform context
  63. int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
  64. FFTComplex **fft_data; ///< bins holder for each (displayed) channels
  65. float *window_func_lut; ///< Window function LUT
  66. float **magnitudes;
  67. float **phases;
  68. int win_func;
  69. int win_size;
  70. double win_scale;
  71. float overlap;
  72. float gain;
  73. int hop_size;
  74. float *combine_buffer; ///< color combining buffer (3 * h items)
  75. float **color_buffer; ///< color buffer (3 * h * ch items)
  76. AVAudioFifo *fifo;
  77. int64_t pts;
  78. int single_pic;
  79. int legend;
  80. int start_x, start_y;
  81. } ShowSpectrumContext;
  82. #define OFFSET(x) offsetof(ShowSpectrumContext, x)
  83. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  84. static const AVOption showspectrum_options[] = {
  85. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  86. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  87. { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
  88. { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
  89. { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
  90. { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
  91. { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
  92. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
  93. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  94. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  95. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
  96. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  97. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  98. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  99. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  100. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  101. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  102. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  103. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  104. { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
  105. { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" },
  106. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
  107. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  108. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  109. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  110. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  111. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  112. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  113. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  114. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  115. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  116. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  117. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  118. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  119. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  120. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  121. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  122. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  123. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  124. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  125. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  126. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  127. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  128. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  129. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  130. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  131. { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
  132. { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
  133. { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
  134. { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
  135. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  136. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  137. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  138. { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
  139. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  140. { "data", "set data mode", OFFSET(data), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_DMODES-1, FLAGS, "data" },
  141. { "magnitude", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_MAGNITUDE}, 0, 0, FLAGS, "data" },
  142. { "phase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_PHASE}, 0, 0, FLAGS, "data" },
  143. { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
  144. { NULL }
  145. };
  146. AVFILTER_DEFINE_CLASS(showspectrum);
  147. static const struct ColorTable {
  148. float a, y, u, v;
  149. } color_table[][8] = {
  150. [INTENSITY] = {
  151. { 0, 0, 0, 0 },
  152. { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
  153. { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
  154. { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
  155. { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
  156. { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
  157. { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
  158. { 1, 1, 0, 0 }},
  159. [RAINBOW] = {
  160. { 0, 0, 0, 0 },
  161. { 0.13, 44/256., (189-128)/256., (138-128)/256. },
  162. { 0.25, 29/256., (186-128)/256., (119-128)/256. },
  163. { 0.38, 119/256., (194-128)/256., (53-128)/256. },
  164. { 0.60, 111/256., (73-128)/256., (59-128)/256. },
  165. { 0.73, 205/256., (19-128)/256., (149-128)/256. },
  166. { 0.86, 135/256., (83-128)/256., (200-128)/256. },
  167. { 1, 73/256., (95-128)/256., (225-128)/256. }},
  168. [MORELAND] = {
  169. { 0, 44/256., (181-128)/256., (112-128)/256. },
  170. { 0.13, 126/256., (177-128)/256., (106-128)/256. },
  171. { 0.25, 164/256., (163-128)/256., (109-128)/256. },
  172. { 0.38, 200/256., (140-128)/256., (120-128)/256. },
  173. { 0.60, 201/256., (117-128)/256., (141-128)/256. },
  174. { 0.73, 177/256., (103-128)/256., (165-128)/256. },
  175. { 0.86, 136/256., (100-128)/256., (183-128)/256. },
  176. { 1, 68/256., (117-128)/256., (203-128)/256. }},
  177. [NEBULAE] = {
  178. { 0, 10/256., (134-128)/256., (132-128)/256. },
  179. { 0.23, 21/256., (137-128)/256., (130-128)/256. },
  180. { 0.45, 35/256., (134-128)/256., (134-128)/256. },
  181. { 0.57, 51/256., (130-128)/256., (139-128)/256. },
  182. { 0.67, 104/256., (116-128)/256., (162-128)/256. },
  183. { 0.77, 120/256., (105-128)/256., (188-128)/256. },
  184. { 0.87, 140/256., (105-128)/256., (188-128)/256. },
  185. { 1, 1, 0, 0 }},
  186. [FIRE] = {
  187. { 0, 0, 0, 0 },
  188. { 0.23, 44/256., (132-128)/256., (127-128)/256. },
  189. { 0.45, 62/256., (116-128)/256., (140-128)/256. },
  190. { 0.57, 75/256., (105-128)/256., (152-128)/256. },
  191. { 0.67, 95/256., (91-128)/256., (166-128)/256. },
  192. { 0.77, 126/256., (74-128)/256., (172-128)/256. },
  193. { 0.87, 164/256., (73-128)/256., (162-128)/256. },
  194. { 1, 1, 0, 0 }},
  195. [FIERY] = {
  196. { 0, 0, 0, 0 },
  197. { 0.23, 36/256., (116-128)/256., (163-128)/256. },
  198. { 0.45, 52/256., (102-128)/256., (200-128)/256. },
  199. { 0.57, 116/256., (84-128)/256., (196-128)/256. },
  200. { 0.67, 157/256., (67-128)/256., (181-128)/256. },
  201. { 0.77, 193/256., (40-128)/256., (155-128)/256. },
  202. { 0.87, 221/256., (101-128)/256., (134-128)/256. },
  203. { 1, 1, 0, 0 }},
  204. [FRUIT] = {
  205. { 0, 0, 0, 0 },
  206. { 0.20, 29/256., (136-128)/256., (119-128)/256. },
  207. { 0.30, 60/256., (119-128)/256., (90-128)/256. },
  208. { 0.40, 85/256., (91-128)/256., (85-128)/256. },
  209. { 0.50, 116/256., (70-128)/256., (105-128)/256. },
  210. { 0.60, 151/256., (50-128)/256., (146-128)/256. },
  211. { 0.70, 191/256., (63-128)/256., (178-128)/256. },
  212. { 1, 98/256., (80-128)/256., (221-128)/256. }},
  213. [COOL] = {
  214. { 0, 0, 0, 0 },
  215. { .15, 0, .5, -.5 },
  216. { 1, 1, -.5, .5 }},
  217. [MAGMA] = {
  218. { 0, 0, 0, 0 },
  219. { 0.10, 23/256., (175-128)/256., (120-128)/256. },
  220. { 0.23, 43/256., (158-128)/256., (144-128)/256. },
  221. { 0.35, 85/256., (138-128)/256., (179-128)/256. },
  222. { 0.48, 96/256., (128-128)/256., (189-128)/256. },
  223. { 0.64, 128/256., (103-128)/256., (214-128)/256. },
  224. { 0.78, 167/256., (85-128)/256., (174-128)/256. },
  225. { 1, 205/256., (80-128)/256., (152-128)/256. }},
  226. };
  227. static av_cold void uninit(AVFilterContext *ctx)
  228. {
  229. ShowSpectrumContext *s = ctx->priv;
  230. int i;
  231. av_freep(&s->combine_buffer);
  232. if (s->fft) {
  233. for (i = 0; i < s->nb_display_channels; i++)
  234. av_fft_end(s->fft[i]);
  235. }
  236. av_freep(&s->fft);
  237. if (s->fft_data) {
  238. for (i = 0; i < s->nb_display_channels; i++)
  239. av_freep(&s->fft_data[i]);
  240. }
  241. av_freep(&s->fft_data);
  242. if (s->color_buffer) {
  243. for (i = 0; i < s->nb_display_channels; i++)
  244. av_freep(&s->color_buffer[i]);
  245. }
  246. av_freep(&s->color_buffer);
  247. av_freep(&s->window_func_lut);
  248. if (s->magnitudes) {
  249. for (i = 0; i < s->nb_display_channels; i++)
  250. av_freep(&s->magnitudes[i]);
  251. }
  252. av_freep(&s->magnitudes);
  253. av_frame_free(&s->outpicref);
  254. av_audio_fifo_free(s->fifo);
  255. if (s->phases) {
  256. for (i = 0; i < s->nb_display_channels; i++)
  257. av_freep(&s->phases[i]);
  258. }
  259. av_freep(&s->phases);
  260. }
  261. static int query_formats(AVFilterContext *ctx)
  262. {
  263. AVFilterFormats *formats = NULL;
  264. AVFilterChannelLayouts *layouts = NULL;
  265. AVFilterLink *inlink = ctx->inputs[0];
  266. AVFilterLink *outlink = ctx->outputs[0];
  267. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
  268. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
  269. int ret;
  270. /* set input audio formats */
  271. formats = ff_make_format_list(sample_fmts);
  272. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  273. return ret;
  274. layouts = ff_all_channel_layouts();
  275. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  276. return ret;
  277. formats = ff_all_samplerates();
  278. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  279. return ret;
  280. /* set output video format */
  281. formats = ff_make_format_list(pix_fmts);
  282. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  283. return ret;
  284. return 0;
  285. }
  286. static int config_output(AVFilterLink *outlink)
  287. {
  288. AVFilterContext *ctx = outlink->src;
  289. AVFilterLink *inlink = ctx->inputs[0];
  290. ShowSpectrumContext *s = ctx->priv;
  291. int i, fft_bits, h, w;
  292. float overlap;
  293. s->pts = AV_NOPTS_VALUE;
  294. if (!strcmp(ctx->filter->name, "showspectrumpic"))
  295. s->single_pic = 1;
  296. outlink->w = s->w;
  297. outlink->h = s->h;
  298. outlink->sample_aspect_ratio = (AVRational){1,1};
  299. if (s->legend) {
  300. s->start_x = log10(inlink->sample_rate) * 25;
  301. s->start_y = 64;
  302. outlink->w += s->start_x * 2;
  303. outlink->h += s->start_y * 2;
  304. }
  305. h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? s->h : s->h / inlink->channels;
  306. w = (s->mode == COMBINED || s->orientation == VERTICAL) ? s->w : s->w / inlink->channels;
  307. s->channel_height = h;
  308. s->channel_width = w;
  309. if (s->orientation == VERTICAL) {
  310. /* FFT window size (precision) according to the requested output frame height */
  311. for (fft_bits = 1; 1 << fft_bits < 2 * h; fft_bits++);
  312. } else {
  313. /* FFT window size (precision) according to the requested output frame width */
  314. for (fft_bits = 1; 1 << fft_bits < 2 * w; fft_bits++);
  315. }
  316. s->win_size = 1 << fft_bits;
  317. if (!s->fft) {
  318. s->fft = av_calloc(inlink->channels, sizeof(*s->fft));
  319. if (!s->fft)
  320. return AVERROR(ENOMEM);
  321. }
  322. /* (re-)configuration if the video output changed (or first init) */
  323. if (fft_bits != s->fft_bits) {
  324. AVFrame *outpicref;
  325. s->fft_bits = fft_bits;
  326. /* FFT buffers: x2 for each (display) channel buffer.
  327. * Note: we use free and malloc instead of a realloc-like function to
  328. * make sure the buffer is aligned in memory for the FFT functions. */
  329. for (i = 0; i < s->nb_display_channels; i++) {
  330. av_fft_end(s->fft[i]);
  331. av_freep(&s->fft_data[i]);
  332. }
  333. av_freep(&s->fft_data);
  334. s->nb_display_channels = inlink->channels;
  335. for (i = 0; i < s->nb_display_channels; i++) {
  336. s->fft[i] = av_fft_init(fft_bits, 0);
  337. if (!s->fft[i]) {
  338. av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
  339. "The window size might be too high.\n");
  340. return AVERROR(EINVAL);
  341. }
  342. }
  343. s->magnitudes = av_calloc(s->nb_display_channels, sizeof(*s->magnitudes));
  344. if (!s->magnitudes)
  345. return AVERROR(ENOMEM);
  346. for (i = 0; i < s->nb_display_channels; i++) {
  347. s->magnitudes[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->magnitudes));
  348. if (!s->magnitudes[i])
  349. return AVERROR(ENOMEM);
  350. }
  351. s->phases = av_calloc(s->nb_display_channels, sizeof(*s->phases));
  352. if (!s->phases)
  353. return AVERROR(ENOMEM);
  354. for (i = 0; i < s->nb_display_channels; i++) {
  355. s->phases[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->phases));
  356. if (!s->phases[i])
  357. return AVERROR(ENOMEM);
  358. }
  359. av_freep(&s->color_buffer);
  360. s->color_buffer = av_calloc(s->nb_display_channels, sizeof(*s->color_buffer));
  361. if (!s->color_buffer)
  362. return AVERROR(ENOMEM);
  363. for (i = 0; i < s->nb_display_channels; i++) {
  364. s->color_buffer[i] = av_calloc(s->orientation == VERTICAL ? s->h * 3 : s->w * 3, sizeof(**s->color_buffer));
  365. if (!s->color_buffer[i])
  366. return AVERROR(ENOMEM);
  367. }
  368. s->fft_data = av_calloc(s->nb_display_channels, sizeof(*s->fft_data));
  369. if (!s->fft_data)
  370. return AVERROR(ENOMEM);
  371. for (i = 0; i < s->nb_display_channels; i++) {
  372. s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
  373. if (!s->fft_data[i])
  374. return AVERROR(ENOMEM);
  375. }
  376. /* pre-calc windowing function */
  377. s->window_func_lut =
  378. av_realloc_f(s->window_func_lut, s->win_size,
  379. sizeof(*s->window_func_lut));
  380. if (!s->window_func_lut)
  381. return AVERROR(ENOMEM);
  382. generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
  383. if (s->overlap == 1)
  384. s->overlap = overlap;
  385. s->hop_size = (1. - s->overlap) * s->win_size;
  386. if (s->hop_size < 1) {
  387. av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
  388. return AVERROR(EINVAL);
  389. }
  390. for (s->win_scale = 0, i = 0; i < s->win_size; i++) {
  391. s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
  392. }
  393. s->win_scale = 1. / sqrt(s->win_scale);
  394. /* prepare the initial picref buffer (black frame) */
  395. av_frame_free(&s->outpicref);
  396. s->outpicref = outpicref =
  397. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  398. if (!outpicref)
  399. return AVERROR(ENOMEM);
  400. outpicref->sample_aspect_ratio = (AVRational){1,1};
  401. for (i = 0; i < outlink->h; i++) {
  402. memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
  403. memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
  404. memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
  405. }
  406. outpicref->color_range = AVCOL_RANGE_JPEG;
  407. }
  408. if ((s->orientation == VERTICAL && s->xpos >= s->w) ||
  409. (s->orientation == HORIZONTAL && s->xpos >= s->h))
  410. s->xpos = 0;
  411. outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
  412. if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
  413. outlink->frame_rate.den *= s->w;
  414. if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
  415. outlink->frame_rate.den *= s->h;
  416. if (s->orientation == VERTICAL) {
  417. s->combine_buffer =
  418. av_realloc_f(s->combine_buffer, s->h * 3,
  419. sizeof(*s->combine_buffer));
  420. } else {
  421. s->combine_buffer =
  422. av_realloc_f(s->combine_buffer, s->w * 3,
  423. sizeof(*s->combine_buffer));
  424. }
  425. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d FFT window size:%d\n",
  426. s->w, s->h, s->win_size);
  427. av_audio_fifo_free(s->fifo);
  428. s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
  429. if (!s->fifo)
  430. return AVERROR(ENOMEM);
  431. return 0;
  432. }
  433. static int run_channel_fft(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  434. {
  435. ShowSpectrumContext *s = ctx->priv;
  436. const float *window_func_lut = s->window_func_lut;
  437. AVFrame *fin = arg;
  438. const int ch = jobnr;
  439. int n;
  440. /* fill FFT input with the number of samples available */
  441. const float *p = (float *)fin->extended_data[ch];
  442. for (n = 0; n < s->win_size; n++) {
  443. s->fft_data[ch][n].re = p[n] * window_func_lut[n];
  444. s->fft_data[ch][n].im = 0;
  445. }
  446. /* run FFT on each samples set */
  447. av_fft_permute(s->fft[ch], s->fft_data[ch]);
  448. av_fft_calc(s->fft[ch], s->fft_data[ch]);
  449. return 0;
  450. }
  451. #define RE(y, ch) s->fft_data[ch][y].re
  452. #define IM(y, ch) s->fft_data[ch][y].im
  453. #define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
  454. #define PHASE(y, ch) atan2(IM(y, ch), RE(y, ch))
  455. static int calc_channel_magnitudes(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  456. {
  457. ShowSpectrumContext *s = ctx->priv;
  458. const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
  459. int y, h = s->orientation == VERTICAL ? s->h : s->w;
  460. const float f = s->gain * w;
  461. const int ch = jobnr;
  462. float *magnitudes = s->magnitudes[ch];
  463. for (y = 0; y < h; y++)
  464. magnitudes[y] = MAGNITUDE(y, ch) * f;
  465. return 0;
  466. }
  467. static int calc_channel_phases(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  468. {
  469. ShowSpectrumContext *s = ctx->priv;
  470. const int h = s->orientation == VERTICAL ? s->h : s->w;
  471. const int ch = jobnr;
  472. float *phases = s->phases[ch];
  473. int y;
  474. for (y = 0; y < h; y++)
  475. phases[y] = (PHASE(y, ch) / M_PI + 1) / 2;
  476. return 0;
  477. }
  478. static void acalc_magnitudes(ShowSpectrumContext *s)
  479. {
  480. const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
  481. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  482. const float f = s->gain * w;
  483. for (ch = 0; ch < s->nb_display_channels; ch++) {
  484. float *magnitudes = s->magnitudes[ch];
  485. for (y = 0; y < h; y++)
  486. magnitudes[y] += MAGNITUDE(y, ch) * f;
  487. }
  488. }
  489. static void scale_magnitudes(ShowSpectrumContext *s, float scale)
  490. {
  491. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  492. for (ch = 0; ch < s->nb_display_channels; ch++) {
  493. float *magnitudes = s->magnitudes[ch];
  494. for (y = 0; y < h; y++)
  495. magnitudes[y] *= scale;
  496. }
  497. }
  498. static void color_range(ShowSpectrumContext *s, int ch,
  499. float *yf, float *uf, float *vf)
  500. {
  501. switch (s->mode) {
  502. case COMBINED:
  503. // reduce range by channel count
  504. *yf = 256.0f / s->nb_display_channels;
  505. switch (s->color_mode) {
  506. case RAINBOW:
  507. case MORELAND:
  508. case NEBULAE:
  509. case FIRE:
  510. case FIERY:
  511. case FRUIT:
  512. case COOL:
  513. case MAGMA:
  514. case INTENSITY:
  515. *uf = *yf;
  516. *vf = *yf;
  517. break;
  518. case CHANNEL:
  519. /* adjust saturation for mixed UV coloring */
  520. /* this factor is correct for infinite channels, an approximation otherwise */
  521. *uf = *yf * M_PI;
  522. *vf = *yf * M_PI;
  523. break;
  524. default:
  525. av_assert0(0);
  526. }
  527. break;
  528. case SEPARATE:
  529. // full range
  530. *yf = 256.0f;
  531. *uf = 256.0f;
  532. *vf = 256.0f;
  533. break;
  534. default:
  535. av_assert0(0);
  536. }
  537. if (s->color_mode == CHANNEL) {
  538. if (s->nb_display_channels > 1) {
  539. *uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
  540. *vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
  541. } else {
  542. *uf *= 0.5 * sin(M_PI * s->rotation);
  543. *vf *= 0.5 * cos(M_PI * s->rotation + M_PI_2);
  544. }
  545. } else {
  546. *uf += *uf * sin(M_PI * s->rotation);
  547. *vf += *vf * cos(M_PI * s->rotation + M_PI_2);
  548. }
  549. *uf *= s->saturation;
  550. *vf *= s->saturation;
  551. }
  552. static void pick_color(ShowSpectrumContext *s,
  553. float yf, float uf, float vf,
  554. float a, float *out)
  555. {
  556. if (s->color_mode > CHANNEL) {
  557. const int cm = s->color_mode;
  558. float y, u, v;
  559. int i;
  560. for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
  561. if (color_table[cm][i].a >= a)
  562. break;
  563. // i now is the first item >= the color
  564. // now we know to interpolate between item i - 1 and i
  565. if (a <= color_table[cm][i - 1].a) {
  566. y = color_table[cm][i - 1].y;
  567. u = color_table[cm][i - 1].u;
  568. v = color_table[cm][i - 1].v;
  569. } else if (a >= color_table[cm][i].a) {
  570. y = color_table[cm][i].y;
  571. u = color_table[cm][i].u;
  572. v = color_table[cm][i].v;
  573. } else {
  574. float start = color_table[cm][i - 1].a;
  575. float end = color_table[cm][i].a;
  576. float lerpfrac = (a - start) / (end - start);
  577. y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
  578. + color_table[cm][i].y * lerpfrac;
  579. u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
  580. + color_table[cm][i].u * lerpfrac;
  581. v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
  582. + color_table[cm][i].v * lerpfrac;
  583. }
  584. out[0] = y * yf;
  585. out[1] = u * uf;
  586. out[2] = v * vf;
  587. } else {
  588. out[0] = a * yf;
  589. out[1] = a * uf;
  590. out[2] = a * vf;
  591. }
  592. }
  593. static void clear_combine_buffer(ShowSpectrumContext *s, int size)
  594. {
  595. int y;
  596. for (y = 0; y < size; y++) {
  597. s->combine_buffer[3 * y ] = 0;
  598. s->combine_buffer[3 * y + 1] = 127.5;
  599. s->combine_buffer[3 * y + 2] = 127.5;
  600. }
  601. }
  602. static int plot_channel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  603. {
  604. ShowSpectrumContext *s = ctx->priv;
  605. const int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
  606. const int ch = jobnr;
  607. float *magnitudes = s->magnitudes[ch];
  608. float *phases = s->phases[ch];
  609. float yf, uf, vf;
  610. int y;
  611. /* decide color range */
  612. color_range(s, ch, &yf, &uf, &vf);
  613. /* draw the channel */
  614. for (y = 0; y < h; y++) {
  615. int row = (s->mode == COMBINED) ? y : ch * h + y;
  616. float *out = &s->color_buffer[ch][3 * row];
  617. float a;
  618. switch (s->data) {
  619. case D_MAGNITUDE:
  620. /* get magnitude */
  621. a = magnitudes[y];
  622. break;
  623. case D_PHASE:
  624. /* get phase */
  625. a = phases[y];
  626. break;
  627. default:
  628. av_assert0(0);
  629. }
  630. /* apply scale */
  631. switch (s->scale) {
  632. case LINEAR:
  633. a = av_clipf(a, 0, 1);
  634. break;
  635. case SQRT:
  636. a = av_clipf(sqrt(a), 0, 1);
  637. break;
  638. case CBRT:
  639. a = av_clipf(cbrt(a), 0, 1);
  640. break;
  641. case FOURTHRT:
  642. a = av_clipf(sqrt(sqrt(a)), 0, 1);
  643. break;
  644. case FIFTHRT:
  645. a = av_clipf(pow(a, 0.20), 0, 1);
  646. break;
  647. case LOG:
  648. a = 1 + log10(av_clipd(a, 1e-6, 1)) / 6; // zero = -120dBFS
  649. break;
  650. default:
  651. av_assert0(0);
  652. }
  653. pick_color(s, yf, uf, vf, a, out);
  654. }
  655. return 0;
  656. }
  657. static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
  658. {
  659. AVFilterContext *ctx = inlink->dst;
  660. AVFilterLink *outlink = ctx->outputs[0];
  661. ShowSpectrumContext *s = ctx->priv;
  662. AVFrame *outpicref = s->outpicref;
  663. int ret, plane, x, y, z = s->orientation == VERTICAL ? s->h : s->w;
  664. /* fill a new spectrum column */
  665. /* initialize buffer for combining to black */
  666. clear_combine_buffer(s, z);
  667. ctx->internal->execute(ctx, plot_channel, NULL, NULL, s->nb_display_channels);
  668. for (y = 0; y < z * 3; y++) {
  669. for (x = 0; x < s->nb_display_channels; x++) {
  670. s->combine_buffer[y] += s->color_buffer[x][y];
  671. }
  672. }
  673. av_frame_make_writable(s->outpicref);
  674. /* copy to output */
  675. if (s->orientation == VERTICAL) {
  676. if (s->sliding == SCROLL) {
  677. for (plane = 0; plane < 3; plane++) {
  678. for (y = 0; y < s->h; y++) {
  679. uint8_t *p = outpicref->data[plane] +
  680. y * outpicref->linesize[plane];
  681. memmove(p, p + 1, s->w - 1);
  682. }
  683. }
  684. s->xpos = s->w - 1;
  685. } else if (s->sliding == RSCROLL) {
  686. for (plane = 0; plane < 3; plane++) {
  687. for (y = 0; y < s->h; y++) {
  688. uint8_t *p = outpicref->data[plane] +
  689. y * outpicref->linesize[plane];
  690. memmove(p + 1, p, s->w - 1);
  691. }
  692. }
  693. s->xpos = 0;
  694. }
  695. for (plane = 0; plane < 3; plane++) {
  696. uint8_t *p = outpicref->data[plane] + s->start_x +
  697. (outlink->h - 1 - s->start_y) * outpicref->linesize[plane] +
  698. s->xpos;
  699. for (y = 0; y < s->h; y++) {
  700. *p = lrintf(av_clipf(s->combine_buffer[3 * y + plane], 0, 255));
  701. p -= outpicref->linesize[plane];
  702. }
  703. }
  704. } else {
  705. if (s->sliding == SCROLL) {
  706. for (plane = 0; plane < 3; plane++) {
  707. for (y = 1; y < s->h; y++) {
  708. memmove(outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  709. outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  710. s->w);
  711. }
  712. }
  713. s->xpos = s->h - 1;
  714. } else if (s->sliding == RSCROLL) {
  715. for (plane = 0; plane < 3; plane++) {
  716. for (y = s->h - 1; y >= 1; y--) {
  717. memmove(outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  718. outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  719. s->w);
  720. }
  721. }
  722. s->xpos = 0;
  723. }
  724. for (plane = 0; plane < 3; plane++) {
  725. uint8_t *p = outpicref->data[plane] + s->start_x +
  726. (s->xpos + s->start_y) * outpicref->linesize[plane];
  727. for (x = 0; x < s->w; x++) {
  728. *p = lrintf(av_clipf(s->combine_buffer[3 * x + plane], 0, 255));
  729. p++;
  730. }
  731. }
  732. }
  733. if (s->sliding != FULLFRAME || s->xpos == 0)
  734. outpicref->pts = insamples->pts;
  735. s->xpos++;
  736. if (s->orientation == VERTICAL && s->xpos >= s->w)
  737. s->xpos = 0;
  738. if (s->orientation == HORIZONTAL && s->xpos >= s->h)
  739. s->xpos = 0;
  740. if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) {
  741. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  742. if (ret < 0)
  743. return ret;
  744. }
  745. return s->win_size;
  746. }
  747. #if CONFIG_SHOWSPECTRUM_FILTER
  748. static int request_frame(AVFilterLink *outlink)
  749. {
  750. ShowSpectrumContext *s = outlink->src->priv;
  751. AVFilterLink *inlink = outlink->src->inputs[0];
  752. unsigned i;
  753. int ret;
  754. ret = ff_request_frame(inlink);
  755. if (ret == AVERROR_EOF && s->sliding == FULLFRAME && s->xpos > 0 &&
  756. s->outpicref) {
  757. if (s->orientation == VERTICAL) {
  758. for (i = 0; i < outlink->h; i++) {
  759. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
  760. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
  761. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
  762. }
  763. } else {
  764. for (i = s->xpos; i < outlink->h; i++) {
  765. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
  766. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
  767. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
  768. }
  769. }
  770. ret = ff_filter_frame(outlink, s->outpicref);
  771. s->outpicref = NULL;
  772. }
  773. return ret;
  774. }
  775. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  776. {
  777. AVFilterContext *ctx = inlink->dst;
  778. ShowSpectrumContext *s = ctx->priv;
  779. AVFrame *fin = NULL;
  780. int ret = 0, consumed = 0;
  781. if (s->pts == AV_NOPTS_VALUE)
  782. s->pts = insamples->pts - av_audio_fifo_size(s->fifo);
  783. av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  784. av_frame_free(&insamples);
  785. while (av_audio_fifo_size(s->fifo) >= s->win_size) {
  786. fin = ff_get_audio_buffer(inlink, s->win_size);
  787. if (!fin) {
  788. ret = AVERROR(ENOMEM);
  789. goto fail;
  790. }
  791. fin->pts = s->pts + consumed;
  792. consumed += s->hop_size;
  793. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  794. if (ret < 0)
  795. goto fail;
  796. av_assert0(fin->nb_samples == s->win_size);
  797. ctx->internal->execute(ctx, run_channel_fft, fin, NULL, s->nb_display_channels);
  798. if (s->data == D_MAGNITUDE)
  799. ctx->internal->execute(ctx, calc_channel_magnitudes, NULL, NULL, s->nb_display_channels);
  800. if (s->data == D_PHASE)
  801. ctx->internal->execute(ctx, calc_channel_phases, NULL, NULL, s->nb_display_channels);
  802. ret = plot_spectrum_column(inlink, fin);
  803. av_frame_free(&fin);
  804. av_audio_fifo_drain(s->fifo, s->hop_size);
  805. if (ret < 0)
  806. goto fail;
  807. }
  808. fail:
  809. s->pts = AV_NOPTS_VALUE;
  810. av_frame_free(&fin);
  811. return ret;
  812. }
  813. static const AVFilterPad showspectrum_inputs[] = {
  814. {
  815. .name = "default",
  816. .type = AVMEDIA_TYPE_AUDIO,
  817. .filter_frame = filter_frame,
  818. },
  819. { NULL }
  820. };
  821. static const AVFilterPad showspectrum_outputs[] = {
  822. {
  823. .name = "default",
  824. .type = AVMEDIA_TYPE_VIDEO,
  825. .config_props = config_output,
  826. .request_frame = request_frame,
  827. },
  828. { NULL }
  829. };
  830. AVFilter ff_avf_showspectrum = {
  831. .name = "showspectrum",
  832. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
  833. .uninit = uninit,
  834. .query_formats = query_formats,
  835. .priv_size = sizeof(ShowSpectrumContext),
  836. .inputs = showspectrum_inputs,
  837. .outputs = showspectrum_outputs,
  838. .priv_class = &showspectrum_class,
  839. .flags = AVFILTER_FLAG_SLICE_THREADS,
  840. };
  841. #endif // CONFIG_SHOWSPECTRUM_FILTER
  842. #if CONFIG_SHOWSPECTRUMPIC_FILTER
  843. static const AVOption showspectrumpic_options[] = {
  844. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  845. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  846. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_MODES-1, FLAGS, "mode" },
  847. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  848. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  849. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=INTENSITY}, 0, NB_CLMODES-1, FLAGS, "color" },
  850. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  851. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  852. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  853. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  854. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  855. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  856. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  857. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  858. { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
  859. { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" },
  860. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" },
  861. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  862. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  863. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  864. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  865. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  866. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  867. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  868. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  869. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  870. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  871. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  872. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  873. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  874. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  875. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  876. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  877. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  878. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  879. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  880. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  881. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  882. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  883. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  884. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  885. { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
  886. { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
  887. { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
  888. { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
  889. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  890. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  891. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  892. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  893. { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
  894. { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
  895. { NULL }
  896. };
  897. AVFILTER_DEFINE_CLASS(showspectrumpic);
  898. static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
  899. {
  900. const uint8_t *font;
  901. int font_height;
  902. int i;
  903. font = avpriv_cga_font, font_height = 8;
  904. for (i = 0; txt[i]; i++) {
  905. int char_y, mask;
  906. if (o) {
  907. for (char_y = font_height - 1; char_y >= 0; char_y--) {
  908. uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x;
  909. for (mask = 0x80; mask; mask >>= 1) {
  910. if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
  911. p[char_y] = ~p[char_y];
  912. p += pic->linesize[0];
  913. }
  914. }
  915. } else {
  916. uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8);
  917. for (char_y = 0; char_y < font_height; char_y++) {
  918. for (mask = 0x80; mask; mask >>= 1) {
  919. if (font[txt[i] * font_height + char_y] & mask)
  920. *p = ~(*p);
  921. p++;
  922. }
  923. p += pic->linesize[0] - 8;
  924. }
  925. }
  926. }
  927. }
  928. static int showspectrumpic_request_frame(AVFilterLink *outlink)
  929. {
  930. AVFilterContext *ctx = outlink->src;
  931. ShowSpectrumContext *s = ctx->priv;
  932. AVFilterLink *inlink = ctx->inputs[0];
  933. int ret, samples;
  934. ret = ff_request_frame(inlink);
  935. samples = av_audio_fifo_size(s->fifo);
  936. if (ret == AVERROR_EOF && s->outpicref && samples > 0) {
  937. int consumed = 0;
  938. int y, x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
  939. int ch, spf, spb;
  940. AVFrame *fin;
  941. spf = s->win_size * (samples / ((s->win_size * sz) * ceil(samples / (float)(s->win_size * sz))));
  942. spf = FFMAX(1, spf);
  943. spb = (samples / (spf * sz)) * spf;
  944. fin = ff_get_audio_buffer(inlink, s->win_size);
  945. if (!fin)
  946. return AVERROR(ENOMEM);
  947. while (x < sz) {
  948. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  949. if (ret < 0) {
  950. av_frame_free(&fin);
  951. return ret;
  952. }
  953. av_audio_fifo_drain(s->fifo, spf);
  954. if (ret < s->win_size) {
  955. for (ch = 0; ch < s->nb_display_channels; ch++) {
  956. memset(fin->extended_data[ch] + ret * sizeof(float), 0,
  957. (s->win_size - ret) * sizeof(float));
  958. }
  959. }
  960. ctx->internal->execute(ctx, run_channel_fft, fin, NULL, s->nb_display_channels);
  961. acalc_magnitudes(s);
  962. consumed += spf;
  963. if (consumed >= spb) {
  964. int h = s->orientation == VERTICAL ? s->h : s->w;
  965. scale_magnitudes(s, 1. / (consumed / spf));
  966. plot_spectrum_column(inlink, fin);
  967. consumed = 0;
  968. x++;
  969. for (ch = 0; ch < s->nb_display_channels; ch++)
  970. memset(s->magnitudes[ch], 0, h * sizeof(float));
  971. }
  972. }
  973. av_frame_free(&fin);
  974. s->outpicref->pts = 0;
  975. if (s->legend) {
  976. int multi = (s->mode == SEPARATE && s->color_mode == CHANNEL);
  977. float spp = samples / (float)sz;
  978. char *text;
  979. uint8_t *dst;
  980. char chlayout_str[128];
  981. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), inlink->channels,
  982. inlink->channel_layout);
  983. text = av_asprintf("%d Hz | %s", inlink->sample_rate, chlayout_str);
  984. drawtext(s->outpicref, 2, outlink->h - 10, "CREATED BY LIBAVFILTER", 0);
  985. drawtext(s->outpicref, outlink->w - 2 - strlen(text) * 10, outlink->h - 10, text, 0);
  986. av_freep(&text);
  987. dst = s->outpicref->data[0] + (s->start_y - 1) * s->outpicref->linesize[0] + s->start_x - 1;
  988. for (x = 0; x < s->w + 1; x++)
  989. dst[x] = 200;
  990. dst = s->outpicref->data[0] + (s->start_y + s->h) * s->outpicref->linesize[0] + s->start_x - 1;
  991. for (x = 0; x < s->w + 1; x++)
  992. dst[x] = 200;
  993. for (y = 0; y < s->h + 2; y++) {
  994. dst = s->outpicref->data[0] + (y + s->start_y - 1) * s->outpicref->linesize[0];
  995. dst[s->start_x - 1] = 200;
  996. dst[s->start_x + s->w] = 200;
  997. }
  998. if (s->orientation == VERTICAL) {
  999. int h = s->mode == SEPARATE ? s->h / s->nb_display_channels : s->h;
  1000. int hh = s->mode == SEPARATE ? -(s->h % s->nb_display_channels) + 1 : 1;
  1001. for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
  1002. for (y = 0; y < h; y += 20) {
  1003. dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - hh) * s->outpicref->linesize[0];
  1004. dst[s->start_x - 2] = 200;
  1005. dst[s->start_x + s->w + 1] = 200;
  1006. }
  1007. for (y = 0; y < h; y += 40) {
  1008. dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - hh) * s->outpicref->linesize[0];
  1009. dst[s->start_x - 3] = 200;
  1010. dst[s->start_x + s->w + 2] = 200;
  1011. }
  1012. dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x;
  1013. for (x = 0; x < s->w; x+=40)
  1014. dst[x] = 200;
  1015. dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x;
  1016. for (x = 0; x < s->w; x+=80)
  1017. dst[x] = 200;
  1018. dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x;
  1019. for (x = 0; x < s->w; x+=40) {
  1020. dst[x] = 200;
  1021. }
  1022. dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x;
  1023. for (x = 0; x < s->w; x+=80) {
  1024. dst[x] = 200;
  1025. }
  1026. for (y = 0; y < h; y += 40) {
  1027. float hertz = y * (inlink->sample_rate / 2) / (float)(1 << (int)ceil(log2(h)));
  1028. char *units;
  1029. if (hertz == 0)
  1030. units = av_asprintf("DC");
  1031. else
  1032. units = av_asprintf("%.2f", hertz);
  1033. if (!units)
  1034. return AVERROR(ENOMEM);
  1035. drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, h * (ch + 1) + s->start_y - y - 4 - hh, units, 0);
  1036. av_free(units);
  1037. }
  1038. }
  1039. for (x = 0; x < s->w; x+=80) {
  1040. float seconds = x * spp / inlink->sample_rate;
  1041. char *units;
  1042. if (x == 0)
  1043. units = av_asprintf("0");
  1044. else if (log10(seconds) > 6)
  1045. units = av_asprintf("%.2fh", seconds / (60 * 60));
  1046. else if (log10(seconds) > 3)
  1047. units = av_asprintf("%.2fm", seconds / 60);
  1048. else
  1049. units = av_asprintf("%.2fs", seconds);
  1050. if (!units)
  1051. return AVERROR(ENOMEM);
  1052. drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->h + s->start_y + 6, units, 0);
  1053. drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->start_y - 12, units, 0);
  1054. av_free(units);
  1055. }
  1056. drawtext(s->outpicref, outlink->w / 2 - 4 * 4, outlink->h - s->start_y / 2, "TIME", 0);
  1057. drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 14 * 4, "FREQUENCY (Hz)", 1);
  1058. } else {
  1059. int w = s->mode == SEPARATE ? s->w / s->nb_display_channels : s->w;
  1060. for (y = 0; y < s->h; y += 20) {
  1061. dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
  1062. dst[s->start_x - 2] = 200;
  1063. dst[s->start_x + s->w + 1] = 200;
  1064. }
  1065. for (y = 0; y < s->h; y += 40) {
  1066. dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
  1067. dst[s->start_x - 3] = 200;
  1068. dst[s->start_x + s->w + 2] = 200;
  1069. }
  1070. for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
  1071. dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
  1072. for (x = 0; x < w; x+=40)
  1073. dst[x] = 200;
  1074. dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x + w * ch;
  1075. for (x = 0; x < w; x+=80)
  1076. dst[x] = 200;
  1077. dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x + w * ch;
  1078. for (x = 0; x < w; x+=40) {
  1079. dst[x] = 200;
  1080. }
  1081. dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
  1082. for (x = 0; x < w; x+=80) {
  1083. dst[x] = 200;
  1084. }
  1085. for (x = 0; x < w - 79; x += 80) {
  1086. float hertz = x * (inlink->sample_rate / 2) / (float)(1 << (int)ceil(log2(w)));
  1087. char *units;
  1088. if (hertz == 0)
  1089. units = av_asprintf("DC");
  1090. else
  1091. units = av_asprintf("%.2f", hertz);
  1092. if (!units)
  1093. return AVERROR(ENOMEM);
  1094. drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->start_y - 12, units, 0);
  1095. drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->h + s->start_y + 6, units, 0);
  1096. av_free(units);
  1097. }
  1098. }
  1099. for (y = 0; y < s->h; y+=40) {
  1100. float seconds = y * spp / inlink->sample_rate;
  1101. char *units;
  1102. if (x == 0)
  1103. units = av_asprintf("0");
  1104. else if (log10(seconds) > 6)
  1105. units = av_asprintf("%.2fh", seconds / (60 * 60));
  1106. else if (log10(seconds) > 3)
  1107. units = av_asprintf("%.2fm", seconds / 60);
  1108. else
  1109. units = av_asprintf("%.2fs", seconds);
  1110. if (!units)
  1111. return AVERROR(ENOMEM);
  1112. drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, s->start_y + y - 4, units, 0);
  1113. av_free(units);
  1114. }
  1115. drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 4 * 4, "TIME", 1);
  1116. drawtext(s->outpicref, outlink->w / 2 - 14 * 4, outlink->h - s->start_y / 2, "FREQUENCY (Hz)", 0);
  1117. }
  1118. for (ch = 0; ch < (multi ? s->nb_display_channels : 1); ch++) {
  1119. int h = multi ? s->h / s->nb_display_channels : s->h;
  1120. for (y = 0; y < h; y++) {
  1121. float out[3] = { 0., 127.5, 127.5};
  1122. int chn;
  1123. for (chn = 0; chn < (s->mode == SEPARATE ? 1 : s->nb_display_channels); chn++) {
  1124. float yf, uf, vf;
  1125. int channel = (multi) ? s->nb_display_channels - ch - 1 : chn;
  1126. float lout[3];
  1127. color_range(s, channel, &yf, &uf, &vf);
  1128. pick_color(s, yf, uf, vf, y / (float)h, lout);
  1129. out[0] += lout[0];
  1130. out[1] += lout[1];
  1131. out[2] += lout[2];
  1132. }
  1133. memset(s->outpicref->data[0]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0] + s->w + s->start_x + 20, av_clip_uint8(out[0]), 10);
  1134. memset(s->outpicref->data[1]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[1] + s->w + s->start_x + 20, av_clip_uint8(out[1]), 10);
  1135. memset(s->outpicref->data[2]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[2] + s->w + s->start_x + 20, av_clip_uint8(out[2]), 10);
  1136. }
  1137. for (y = 0; ch == 0 && y < h; y += h / 10) {
  1138. float value = 120.0 * log10(1. - y / (float)h);
  1139. char *text;
  1140. if (value < -120)
  1141. break;
  1142. text = av_asprintf("%.0f dB", value);
  1143. if (!text)
  1144. continue;
  1145. drawtext(s->outpicref, s->w + s->start_x + 35, s->start_y + y - 5, text, 0);
  1146. av_free(text);
  1147. }
  1148. }
  1149. }
  1150. ret = ff_filter_frame(outlink, s->outpicref);
  1151. s->outpicref = NULL;
  1152. }
  1153. return ret;
  1154. }
  1155. static int showspectrumpic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  1156. {
  1157. AVFilterContext *ctx = inlink->dst;
  1158. ShowSpectrumContext *s = ctx->priv;
  1159. int ret;
  1160. ret = av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  1161. av_frame_free(&insamples);
  1162. return ret;
  1163. }
  1164. static const AVFilterPad showspectrumpic_inputs[] = {
  1165. {
  1166. .name = "default",
  1167. .type = AVMEDIA_TYPE_AUDIO,
  1168. .filter_frame = showspectrumpic_filter_frame,
  1169. },
  1170. { NULL }
  1171. };
  1172. static const AVFilterPad showspectrumpic_outputs[] = {
  1173. {
  1174. .name = "default",
  1175. .type = AVMEDIA_TYPE_VIDEO,
  1176. .config_props = config_output,
  1177. .request_frame = showspectrumpic_request_frame,
  1178. },
  1179. { NULL }
  1180. };
  1181. AVFilter ff_avf_showspectrumpic = {
  1182. .name = "showspectrumpic",
  1183. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output single picture."),
  1184. .uninit = uninit,
  1185. .query_formats = query_formats,
  1186. .priv_size = sizeof(ShowSpectrumContext),
  1187. .inputs = showspectrumpic_inputs,
  1188. .outputs = showspectrumpic_outputs,
  1189. .priv_class = &showspectrumpic_class,
  1190. .flags = AVFILTER_FLAG_SLICE_THREADS,
  1191. };
  1192. #endif // CONFIG_SHOWSPECTRUMPIC_FILTER