You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1175 lines
50KB

  1. /*
  2. * Copyright (c) 2012-2013 Clément Bœsch
  3. * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
  4. * Copyright (c) 2015 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
  25. * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
  26. */
  27. #include <math.h>
  28. #include "libavcodec/avfft.h"
  29. #include "libavutil/audio_fifo.h"
  30. #include "libavutil/avassert.h"
  31. #include "libavutil/avstring.h"
  32. #include "libavutil/channel_layout.h"
  33. #include "libavutil/opt.h"
  34. #include "libavutil/xga_font_data.h"
  35. #include "audio.h"
  36. #include "video.h"
  37. #include "avfilter.h"
  38. #include "internal.h"
  39. #include "window_func.h"
  40. enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
  41. enum DisplayScale { LINEAR, SQRT, CBRT, LOG, FOURTHRT, FIFTHRT, NB_SCALES };
  42. enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, COOL, NB_CLMODES };
  43. enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
  44. enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
  45. typedef struct {
  46. const AVClass *class;
  47. int w, h;
  48. AVFrame *outpicref;
  49. int nb_display_channels;
  50. int orientation;
  51. int channel_width;
  52. int channel_height;
  53. int sliding; ///< 1 if sliding mode, 0 otherwise
  54. int mode; ///< channel display mode
  55. int color_mode; ///< display color scheme
  56. int scale;
  57. float saturation; ///< color saturation multiplier
  58. int xpos; ///< x position (current column)
  59. FFTContext *fft; ///< Fast Fourier Transform context
  60. int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
  61. FFTComplex **fft_data; ///< bins holder for each (displayed) channels
  62. float *window_func_lut; ///< Window function LUT
  63. float **magnitudes;
  64. int win_func;
  65. int win_size;
  66. double win_scale;
  67. float overlap;
  68. float gain;
  69. int skip_samples;
  70. float *combine_buffer; ///< color combining buffer (3 * h items)
  71. AVAudioFifo *fifo;
  72. int64_t pts;
  73. int single_pic;
  74. int legend;
  75. int start_x, start_y;
  76. } ShowSpectrumContext;
  77. #define OFFSET(x) offsetof(ShowSpectrumContext, x)
  78. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  79. static const AVOption showspectrum_options[] = {
  80. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  81. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  82. { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
  83. { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
  84. { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
  85. { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
  86. { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
  87. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
  88. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  89. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  90. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
  91. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  92. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  93. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  94. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  95. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  96. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  97. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  98. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  99. { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
  100. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
  101. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  102. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  103. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  104. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  105. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  106. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  107. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  108. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  109. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  110. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  111. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  112. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  113. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  114. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  115. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  116. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  117. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  118. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  119. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  120. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  121. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  122. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  123. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  124. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  125. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  126. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  127. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  128. { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
  129. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  130. { NULL }
  131. };
  132. AVFILTER_DEFINE_CLASS(showspectrum);
  133. static const struct ColorTable {
  134. float a, y, u, v;
  135. } color_table[][8] = {
  136. [INTENSITY] = {
  137. { 0, 0, 0, 0 },
  138. { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
  139. { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
  140. { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
  141. { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
  142. { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
  143. { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
  144. { 1, 1, 0, 0 }},
  145. [RAINBOW] = {
  146. { 0, 0, 0, 0 },
  147. { 0.13, 44/256., (189-128)/256., (138-128)/256. },
  148. { 0.25, 29/256., (186-128)/256., (119-128)/256. },
  149. { 0.38, 119/256., (194-128)/256., (53-128)/256. },
  150. { 0.60, 111/256., (73-128)/256., (59-128)/256. },
  151. { 0.73, 205/256., (19-128)/256., (149-128)/256. },
  152. { 0.86, 135/256., (83-128)/256., (200-128)/256. },
  153. { 1, 73/256., (95-128)/256., (225-128)/256. }},
  154. [MORELAND] = {
  155. { 0, 44/256., (181-128)/256., (112-128)/256. },
  156. { 0.13, 126/256., (177-128)/256., (106-128)/256. },
  157. { 0.25, 164/256., (163-128)/256., (109-128)/256. },
  158. { 0.38, 200/256., (140-128)/256., (120-128)/256. },
  159. { 0.60, 201/256., (117-128)/256., (141-128)/256. },
  160. { 0.73, 177/256., (103-128)/256., (165-128)/256. },
  161. { 0.86, 136/256., (100-128)/256., (183-128)/256. },
  162. { 1, 68/256., (117-128)/256., (203-128)/256. }},
  163. [NEBULAE] = {
  164. { 0, 10/256., (134-128)/256., (132-128)/256. },
  165. { 0.23, 21/256., (137-128)/256., (130-128)/256. },
  166. { 0.45, 35/256., (134-128)/256., (134-128)/256. },
  167. { 0.57, 51/256., (130-128)/256., (139-128)/256. },
  168. { 0.67, 104/256., (116-128)/256., (162-128)/256. },
  169. { 0.77, 120/256., (105-128)/256., (188-128)/256. },
  170. { 0.87, 140/256., (105-128)/256., (188-128)/256. },
  171. { 1, 1, 0, 0 }},
  172. [FIRE] = {
  173. { 0, 0, 0, 0 },
  174. { 0.23, 44/256., (132-128)/256., (127-128)/256. },
  175. { 0.45, 62/256., (116-128)/256., (140-128)/256. },
  176. { 0.57, 75/256., (105-128)/256., (152-128)/256. },
  177. { 0.67, 95/256., (91-128)/256., (166-128)/256. },
  178. { 0.77, 126/256., (74-128)/256., (172-128)/256. },
  179. { 0.87, 164/256., (73-128)/256., (162-128)/256. },
  180. { 1, 1, 0, 0 }},
  181. [FIERY] = {
  182. { 0, 0, 0, 0 },
  183. { 0.23, 36/256., (116-128)/256., (163-128)/256. },
  184. { 0.45, 52/256., (102-128)/256., (200-128)/256. },
  185. { 0.57, 116/256., (84-128)/256., (196-128)/256. },
  186. { 0.67, 157/256., (67-128)/256., (181-128)/256. },
  187. { 0.77, 193/256., (40-128)/256., (155-128)/256. },
  188. { 0.87, 221/256., (101-128)/256., (134-128)/256. },
  189. { 1, 1, 0, 0 }},
  190. [FRUIT] = {
  191. { 0, 0, 0, 0 },
  192. { 0.20, 29/256., (136-128)/256., (119-128)/256. },
  193. { 0.30, 60/256., (119-128)/256., (90-128)/256. },
  194. { 0.40, 85/256., (91-128)/256., (85-128)/256. },
  195. { 0.50, 116/256., (70-128)/256., (105-128)/256. },
  196. { 0.60, 151/256., (50-128)/256., (146-128)/256. },
  197. { 0.70, 191/256., (63-128)/256., (178-128)/256. },
  198. { 1, 98/256., (80-128)/256., (221-128)/256. }},
  199. [COOL] = {
  200. { 0, 0, 0, 0 },
  201. { .15, 0, .5, -.5 },
  202. { 1, 1, -.5, .5 }},
  203. };
  204. static av_cold void uninit(AVFilterContext *ctx)
  205. {
  206. ShowSpectrumContext *s = ctx->priv;
  207. int i;
  208. av_freep(&s->combine_buffer);
  209. av_fft_end(s->fft);
  210. if (s->fft_data) {
  211. for (i = 0; i < s->nb_display_channels; i++)
  212. av_freep(&s->fft_data[i]);
  213. }
  214. av_freep(&s->fft_data);
  215. av_freep(&s->window_func_lut);
  216. if (s->magnitudes) {
  217. for (i = 0; i < s->nb_display_channels; i++)
  218. av_freep(&s->magnitudes[i]);
  219. }
  220. av_freep(&s->magnitudes);
  221. av_frame_free(&s->outpicref);
  222. av_audio_fifo_free(s->fifo);
  223. }
  224. static int query_formats(AVFilterContext *ctx)
  225. {
  226. AVFilterFormats *formats = NULL;
  227. AVFilterChannelLayouts *layouts = NULL;
  228. AVFilterLink *inlink = ctx->inputs[0];
  229. AVFilterLink *outlink = ctx->outputs[0];
  230. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
  231. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
  232. int ret;
  233. /* set input audio formats */
  234. formats = ff_make_format_list(sample_fmts);
  235. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  236. return ret;
  237. layouts = ff_all_channel_layouts();
  238. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  239. return ret;
  240. formats = ff_all_samplerates();
  241. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  242. return ret;
  243. /* set output video format */
  244. formats = ff_make_format_list(pix_fmts);
  245. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  246. return ret;
  247. return 0;
  248. }
  249. static int config_output(AVFilterLink *outlink)
  250. {
  251. AVFilterContext *ctx = outlink->src;
  252. AVFilterLink *inlink = ctx->inputs[0];
  253. ShowSpectrumContext *s = ctx->priv;
  254. int i, fft_bits, h, w;
  255. float overlap;
  256. if (!strcmp(ctx->filter->name, "showspectrumpic"))
  257. s->single_pic = 1;
  258. outlink->w = s->w;
  259. outlink->h = s->h;
  260. if (s->legend) {
  261. s->start_x = log10(inlink->sample_rate) * 25;
  262. s->start_y = 64;
  263. outlink->w += s->start_x * 2;
  264. outlink->h += s->start_y * 2;
  265. }
  266. h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? s->h : s->h / inlink->channels;
  267. w = (s->mode == COMBINED || s->orientation == VERTICAL) ? s->w : s->w / inlink->channels;
  268. s->channel_height = h;
  269. s->channel_width = w;
  270. if (s->orientation == VERTICAL) {
  271. /* FFT window size (precision) according to the requested output frame height */
  272. for (fft_bits = 1; 1 << fft_bits < 2 * h; fft_bits++);
  273. } else {
  274. /* FFT window size (precision) according to the requested output frame width */
  275. for (fft_bits = 1; 1 << fft_bits < 2 * w; fft_bits++);
  276. }
  277. s->win_size = 1 << fft_bits;
  278. /* (re-)configuration if the video output changed (or first init) */
  279. if (fft_bits != s->fft_bits) {
  280. AVFrame *outpicref;
  281. av_fft_end(s->fft);
  282. s->fft = av_fft_init(fft_bits, 0);
  283. if (!s->fft) {
  284. av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
  285. "The window size might be too high.\n");
  286. return AVERROR(EINVAL);
  287. }
  288. s->fft_bits = fft_bits;
  289. /* FFT buffers: x2 for each (display) channel buffer.
  290. * Note: we use free and malloc instead of a realloc-like function to
  291. * make sure the buffer is aligned in memory for the FFT functions. */
  292. for (i = 0; i < s->nb_display_channels; i++)
  293. av_freep(&s->fft_data[i]);
  294. av_freep(&s->fft_data);
  295. s->nb_display_channels = inlink->channels;
  296. s->magnitudes = av_calloc(s->nb_display_channels, sizeof(*s->magnitudes));
  297. if (!s->magnitudes)
  298. return AVERROR(ENOMEM);
  299. for (i = 0; i < s->nb_display_channels; i++) {
  300. s->magnitudes[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->magnitudes));
  301. if (!s->magnitudes[i])
  302. return AVERROR(ENOMEM);
  303. }
  304. s->fft_data = av_calloc(s->nb_display_channels, sizeof(*s->fft_data));
  305. if (!s->fft_data)
  306. return AVERROR(ENOMEM);
  307. for (i = 0; i < s->nb_display_channels; i++) {
  308. s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
  309. if (!s->fft_data[i])
  310. return AVERROR(ENOMEM);
  311. }
  312. /* pre-calc windowing function */
  313. s->window_func_lut =
  314. av_realloc_f(s->window_func_lut, s->win_size,
  315. sizeof(*s->window_func_lut));
  316. if (!s->window_func_lut)
  317. return AVERROR(ENOMEM);
  318. ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
  319. if (s->overlap == 1)
  320. s->overlap = overlap;
  321. s->skip_samples = (1. - s->overlap) * s->win_size;
  322. if (s->skip_samples < 1) {
  323. av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
  324. return AVERROR(EINVAL);
  325. }
  326. for (s->win_scale = 0, i = 0; i < s->win_size; i++) {
  327. s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
  328. }
  329. s->win_scale = 1. / sqrt(s->win_scale);
  330. /* prepare the initial picref buffer (black frame) */
  331. av_frame_free(&s->outpicref);
  332. s->outpicref = outpicref =
  333. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  334. if (!outpicref)
  335. return AVERROR(ENOMEM);
  336. outlink->sample_aspect_ratio = (AVRational){1,1};
  337. for (i = 0; i < outlink->h; i++) {
  338. memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
  339. memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
  340. memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
  341. }
  342. av_frame_set_color_range(outpicref, AVCOL_RANGE_JPEG);
  343. }
  344. if ((s->orientation == VERTICAL && s->xpos >= s->w) ||
  345. (s->orientation == HORIZONTAL && s->xpos >= s->h))
  346. s->xpos = 0;
  347. outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
  348. if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
  349. outlink->frame_rate.den *= s->w;
  350. if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
  351. outlink->frame_rate.den *= s->h;
  352. if (s->orientation == VERTICAL) {
  353. s->combine_buffer =
  354. av_realloc_f(s->combine_buffer, s->h * 3,
  355. sizeof(*s->combine_buffer));
  356. } else {
  357. s->combine_buffer =
  358. av_realloc_f(s->combine_buffer, s->w * 3,
  359. sizeof(*s->combine_buffer));
  360. }
  361. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d FFT window size:%d\n",
  362. s->w, s->h, s->win_size);
  363. av_audio_fifo_free(s->fifo);
  364. s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
  365. if (!s->fifo)
  366. return AVERROR(ENOMEM);
  367. return 0;
  368. }
  369. static void run_fft(ShowSpectrumContext *s, AVFrame *fin)
  370. {
  371. int ch, n;
  372. /* fill FFT input with the number of samples available */
  373. for (ch = 0; ch < s->nb_display_channels; ch++) {
  374. const float *p = (float *)fin->extended_data[ch];
  375. for (n = 0; n < s->win_size; n++) {
  376. s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
  377. s->fft_data[ch][n].im = 0;
  378. }
  379. }
  380. /* run FFT on each samples set */
  381. for (ch = 0; ch < s->nb_display_channels; ch++) {
  382. av_fft_permute(s->fft, s->fft_data[ch]);
  383. av_fft_calc(s->fft, s->fft_data[ch]);
  384. }
  385. }
  386. #define RE(y, ch) s->fft_data[ch][y].re
  387. #define IM(y, ch) s->fft_data[ch][y].im
  388. #define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
  389. static void calc_magnitudes(ShowSpectrumContext *s)
  390. {
  391. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  392. for (ch = 0; ch < s->nb_display_channels; ch++) {
  393. float *magnitudes = s->magnitudes[ch];
  394. for (y = 0; y < h; y++)
  395. magnitudes[y] = MAGNITUDE(y, ch);
  396. }
  397. }
  398. static void acalc_magnitudes(ShowSpectrumContext *s)
  399. {
  400. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  401. for (ch = 0; ch < s->nb_display_channels; ch++) {
  402. float *magnitudes = s->magnitudes[ch];
  403. for (y = 0; y < h; y++)
  404. magnitudes[y] += MAGNITUDE(y, ch);
  405. }
  406. }
  407. static void scale_magnitudes(ShowSpectrumContext *s, float scale)
  408. {
  409. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  410. for (ch = 0; ch < s->nb_display_channels; ch++) {
  411. float *magnitudes = s->magnitudes[ch];
  412. for (y = 0; y < h; y++)
  413. magnitudes[y] *= scale;
  414. }
  415. }
  416. static void color_range(ShowSpectrumContext *s, int ch,
  417. float *yf, float *uf, float *vf)
  418. {
  419. switch (s->mode) {
  420. case COMBINED:
  421. // reduce range by channel count
  422. *yf = 256.0f / s->nb_display_channels;
  423. switch (s->color_mode) {
  424. case RAINBOW:
  425. case MORELAND:
  426. case NEBULAE:
  427. case FIRE:
  428. case FIERY:
  429. case FRUIT:
  430. case COOL:
  431. case INTENSITY:
  432. *uf = *yf;
  433. *vf = *yf;
  434. break;
  435. case CHANNEL:
  436. /* adjust saturation for mixed UV coloring */
  437. /* this factor is correct for infinite channels, an approximation otherwise */
  438. *uf = *yf * M_PI;
  439. *vf = *yf * M_PI;
  440. break;
  441. default:
  442. av_assert0(0);
  443. }
  444. break;
  445. case SEPARATE:
  446. // full range
  447. *yf = 256.0f;
  448. *uf = 256.0f;
  449. *vf = 256.0f;
  450. break;
  451. default:
  452. av_assert0(0);
  453. }
  454. if (s->color_mode == CHANNEL) {
  455. if (s->nb_display_channels > 1) {
  456. *uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
  457. *vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
  458. } else {
  459. *uf = 0.0f;
  460. *vf = 0.0f;
  461. }
  462. }
  463. *uf *= s->saturation;
  464. *vf *= s->saturation;
  465. }
  466. static void pick_color(ShowSpectrumContext *s,
  467. float yf, float uf, float vf,
  468. float a, float *out)
  469. {
  470. if (s->color_mode > CHANNEL) {
  471. const int cm = s->color_mode;
  472. float y, u, v;
  473. int i;
  474. for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
  475. if (color_table[cm][i].a >= a)
  476. break;
  477. // i now is the first item >= the color
  478. // now we know to interpolate between item i - 1 and i
  479. if (a <= color_table[cm][i - 1].a) {
  480. y = color_table[cm][i - 1].y;
  481. u = color_table[cm][i - 1].u;
  482. v = color_table[cm][i - 1].v;
  483. } else if (a >= color_table[cm][i].a) {
  484. y = color_table[cm][i].y;
  485. u = color_table[cm][i].u;
  486. v = color_table[cm][i].v;
  487. } else {
  488. float start = color_table[cm][i - 1].a;
  489. float end = color_table[cm][i].a;
  490. float lerpfrac = (a - start) / (end - start);
  491. y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
  492. + color_table[cm][i].y * lerpfrac;
  493. u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
  494. + color_table[cm][i].u * lerpfrac;
  495. v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
  496. + color_table[cm][i].v * lerpfrac;
  497. }
  498. out[0] += y * yf;
  499. out[1] += u * uf;
  500. out[2] += v * vf;
  501. } else {
  502. out[0] += a * yf;
  503. out[1] += a * uf;
  504. out[2] += a * vf;
  505. }
  506. }
  507. static void clear_combine_buffer(ShowSpectrumContext *s, int size)
  508. {
  509. int y;
  510. for (y = 0; y < size; y++) {
  511. s->combine_buffer[3 * y ] = 0;
  512. s->combine_buffer[3 * y + 1] = 127.5;
  513. s->combine_buffer[3 * y + 2] = 127.5;
  514. }
  515. }
  516. static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
  517. {
  518. int ret;
  519. AVFilterContext *ctx = inlink->dst;
  520. AVFilterLink *outlink = ctx->outputs[0];
  521. ShowSpectrumContext *s = ctx->priv;
  522. AVFrame *outpicref = s->outpicref;
  523. const double w = s->win_scale;
  524. const float g = s->gain;
  525. int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
  526. int ch, plane, x, y;
  527. /* fill a new spectrum column */
  528. /* initialize buffer for combining to black */
  529. clear_combine_buffer(s, s->orientation == VERTICAL ? s->h : s->w);
  530. for (ch = 0; ch < s->nb_display_channels; ch++) {
  531. float *magnitudes = s->magnitudes[ch];
  532. float yf, uf, vf;
  533. /* decide color range */
  534. color_range(s, ch, &yf, &uf, &vf);
  535. /* draw the channel */
  536. for (y = 0; y < h; y++) {
  537. int row = (s->mode == COMBINED) ? y : ch * h + y;
  538. float *out = &s->combine_buffer[3 * row];
  539. /* get magnitude */
  540. float a = g * w * magnitudes[y];
  541. /* apply scale */
  542. switch (s->scale) {
  543. case LINEAR:
  544. a = av_clipf(a, 0, 1);
  545. break;
  546. case SQRT:
  547. a = av_clipf(sqrt(a), 0, 1);
  548. break;
  549. case CBRT:
  550. a = av_clipf(cbrt(a), 0, 1);
  551. break;
  552. case FOURTHRT:
  553. a = av_clipf(sqrt(sqrt(a)), 0, 1);
  554. break;
  555. case FIFTHRT:
  556. a = av_clipf(pow(a, 0.20), 0, 1);
  557. break;
  558. case LOG:
  559. a = 1 + log10(av_clipd(a * w, 1e-6, 1)) / 6; // zero = -120dBFS
  560. break;
  561. default:
  562. av_assert0(0);
  563. }
  564. pick_color(s, yf, uf, vf, a, out);
  565. }
  566. }
  567. av_frame_make_writable(s->outpicref);
  568. /* copy to output */
  569. if (s->orientation == VERTICAL) {
  570. if (s->sliding == SCROLL) {
  571. for (plane = 0; plane < 3; plane++) {
  572. for (y = 0; y < s->h; y++) {
  573. uint8_t *p = outpicref->data[plane] +
  574. y * outpicref->linesize[plane];
  575. memmove(p, p + 1, s->w - 1);
  576. }
  577. }
  578. s->xpos = s->w - 1;
  579. } else if (s->sliding == RSCROLL) {
  580. for (plane = 0; plane < 3; plane++) {
  581. for (y = 0; y < s->h; y++) {
  582. uint8_t *p = outpicref->data[plane] +
  583. y * outpicref->linesize[plane];
  584. memmove(p + 1, p, s->w - 1);
  585. }
  586. }
  587. s->xpos = 0;
  588. }
  589. for (plane = 0; plane < 3; plane++) {
  590. uint8_t *p = outpicref->data[plane] + s->start_x +
  591. (outlink->h - 1 - s->start_y) * outpicref->linesize[plane] +
  592. s->xpos;
  593. for (y = 0; y < s->h; y++) {
  594. *p = lrintf(av_clipf(s->combine_buffer[3 * y + plane], 0, 255));
  595. p -= outpicref->linesize[plane];
  596. }
  597. }
  598. } else {
  599. if (s->sliding == SCROLL) {
  600. for (plane = 0; plane < 3; plane++) {
  601. for (y = 1; y < s->h; y++) {
  602. memmove(outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  603. outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  604. s->w);
  605. }
  606. }
  607. s->xpos = s->h - 1;
  608. } else if (s->sliding == RSCROLL) {
  609. for (plane = 0; plane < 3; plane++) {
  610. for (y = s->h - 1; y >= 1; y--) {
  611. memmove(outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  612. outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  613. s->w);
  614. }
  615. }
  616. s->xpos = 0;
  617. }
  618. for (plane = 0; plane < 3; plane++) {
  619. uint8_t *p = outpicref->data[plane] + s->start_x +
  620. (s->xpos + s->start_y) * outpicref->linesize[plane];
  621. for (x = 0; x < s->w; x++) {
  622. *p = lrintf(av_clipf(s->combine_buffer[3 * x + plane], 0, 255));
  623. p++;
  624. }
  625. }
  626. }
  627. if (s->sliding != FULLFRAME || s->xpos == 0)
  628. outpicref->pts = insamples->pts;
  629. s->xpos++;
  630. if (s->orientation == VERTICAL && s->xpos >= s->w)
  631. s->xpos = 0;
  632. if (s->orientation == HORIZONTAL && s->xpos >= s->h)
  633. s->xpos = 0;
  634. if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) {
  635. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  636. if (ret < 0)
  637. return ret;
  638. }
  639. return s->win_size;
  640. }
  641. #if CONFIG_SHOWSPECTRUM_FILTER
  642. static int request_frame(AVFilterLink *outlink)
  643. {
  644. ShowSpectrumContext *s = outlink->src->priv;
  645. AVFilterLink *inlink = outlink->src->inputs[0];
  646. unsigned i;
  647. int ret;
  648. ret = ff_request_frame(inlink);
  649. if (ret == AVERROR_EOF && s->sliding == FULLFRAME && s->xpos > 0 &&
  650. s->outpicref) {
  651. if (s->orientation == VERTICAL) {
  652. for (i = 0; i < outlink->h; i++) {
  653. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
  654. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
  655. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
  656. }
  657. } else {
  658. for (i = s->xpos; i < outlink->h; i++) {
  659. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
  660. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
  661. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
  662. }
  663. }
  664. ret = ff_filter_frame(outlink, s->outpicref);
  665. s->outpicref = NULL;
  666. }
  667. return ret;
  668. }
  669. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  670. {
  671. AVFilterContext *ctx = inlink->dst;
  672. ShowSpectrumContext *s = ctx->priv;
  673. AVFrame *fin = NULL;
  674. int ret = 0;
  675. av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  676. av_frame_free(&insamples);
  677. while (av_audio_fifo_size(s->fifo) >= s->win_size) {
  678. fin = ff_get_audio_buffer(inlink, s->win_size);
  679. if (!fin) {
  680. ret = AVERROR(ENOMEM);
  681. goto fail;
  682. }
  683. fin->pts = s->pts;
  684. s->pts += s->skip_samples;
  685. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  686. if (ret < 0)
  687. goto fail;
  688. av_assert0(fin->nb_samples == s->win_size);
  689. run_fft(s, fin);
  690. calc_magnitudes(s);
  691. ret = plot_spectrum_column(inlink, fin);
  692. av_frame_free(&fin);
  693. av_audio_fifo_drain(s->fifo, s->skip_samples);
  694. if (ret < 0)
  695. goto fail;
  696. }
  697. fail:
  698. av_frame_free(&fin);
  699. return ret;
  700. }
  701. static const AVFilterPad showspectrum_inputs[] = {
  702. {
  703. .name = "default",
  704. .type = AVMEDIA_TYPE_AUDIO,
  705. .filter_frame = filter_frame,
  706. },
  707. { NULL }
  708. };
  709. static const AVFilterPad showspectrum_outputs[] = {
  710. {
  711. .name = "default",
  712. .type = AVMEDIA_TYPE_VIDEO,
  713. .config_props = config_output,
  714. .request_frame = request_frame,
  715. },
  716. { NULL }
  717. };
  718. AVFilter ff_avf_showspectrum = {
  719. .name = "showspectrum",
  720. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
  721. .uninit = uninit,
  722. .query_formats = query_formats,
  723. .priv_size = sizeof(ShowSpectrumContext),
  724. .inputs = showspectrum_inputs,
  725. .outputs = showspectrum_outputs,
  726. .priv_class = &showspectrum_class,
  727. };
  728. #endif // CONFIG_SHOWSPECTRUM_FILTER
  729. #if CONFIG_SHOWSPECTRUMPIC_FILTER
  730. static const AVOption showspectrumpic_options[] = {
  731. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  732. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  733. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_MODES-1, FLAGS, "mode" },
  734. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  735. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  736. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=INTENSITY}, 0, NB_CLMODES-1, FLAGS, "color" },
  737. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  738. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  739. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  740. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  741. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  742. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  743. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  744. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  745. { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
  746. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" },
  747. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  748. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  749. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  750. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  751. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  752. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  753. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  754. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  755. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  756. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  757. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  758. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  759. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  760. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  761. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  762. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  763. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  764. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  765. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  766. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  767. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  768. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  769. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  770. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  771. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  772. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  773. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  774. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  775. { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
  776. { NULL }
  777. };
  778. AVFILTER_DEFINE_CLASS(showspectrumpic);
  779. static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
  780. {
  781. const uint8_t *font;
  782. int font_height;
  783. int i;
  784. font = avpriv_cga_font, font_height = 8;
  785. for (i = 0; txt[i]; i++) {
  786. int char_y, mask;
  787. if (o) {
  788. for (char_y = font_height - 1; char_y >= 0; char_y--) {
  789. uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x;
  790. for (mask = 0x80; mask; mask >>= 1) {
  791. if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
  792. p[char_y] = ~p[char_y];
  793. p += pic->linesize[0];
  794. }
  795. }
  796. } else {
  797. uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8);
  798. for (char_y = 0; char_y < font_height; char_y++) {
  799. for (mask = 0x80; mask; mask >>= 1) {
  800. if (font[txt[i] * font_height + char_y] & mask)
  801. *p = ~(*p);
  802. p++;
  803. }
  804. p += pic->linesize[0] - 8;
  805. }
  806. }
  807. }
  808. }
  809. static int showspectrumpic_request_frame(AVFilterLink *outlink)
  810. {
  811. ShowSpectrumContext *s = outlink->src->priv;
  812. AVFilterLink *inlink = outlink->src->inputs[0];
  813. int ret;
  814. ret = ff_request_frame(inlink);
  815. if (ret == AVERROR_EOF && s->outpicref) {
  816. int samples = av_audio_fifo_size(s->fifo);
  817. int consumed = 0;
  818. int y, x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
  819. int ch, spf, spb;
  820. AVFrame *fin;
  821. spf = s->win_size * (samples / ((s->win_size * sz) * ceil(samples / (float)(s->win_size * sz))));
  822. spb = (samples / (spf * sz)) * spf;
  823. fin = ff_get_audio_buffer(inlink, s->win_size);
  824. if (!fin)
  825. return AVERROR(ENOMEM);
  826. while (x < sz) {
  827. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  828. if (ret < 0) {
  829. av_frame_free(&fin);
  830. return ret;
  831. }
  832. av_audio_fifo_drain(s->fifo, spf);
  833. if (ret < s->win_size) {
  834. for (ch = 0; ch < s->nb_display_channels; ch++) {
  835. memset(fin->extended_data[ch] + ret * sizeof(float), 0,
  836. (s->win_size - ret) * sizeof(float));
  837. }
  838. }
  839. run_fft(s, fin);
  840. acalc_magnitudes(s);
  841. consumed += spf;
  842. if (consumed >= spb) {
  843. int h = s->orientation == VERTICAL ? s->h : s->w;
  844. scale_magnitudes(s, 1. / (consumed / spf));
  845. plot_spectrum_column(inlink, fin);
  846. consumed = 0;
  847. x++;
  848. for (ch = 0; ch < s->nb_display_channels; ch++)
  849. memset(s->magnitudes[ch], 0, h * sizeof(float));
  850. }
  851. }
  852. av_frame_free(&fin);
  853. s->outpicref->pts = 0;
  854. if (s->legend) {
  855. int multi = (s->mode == SEPARATE && s->color_mode == CHANNEL);
  856. float spp = samples / (float)sz;
  857. uint8_t *dst;
  858. drawtext(s->outpicref, 2, outlink->h - 10, "CREATED BY LIBAVFILTER", 0);
  859. dst = s->outpicref->data[0] + (s->start_y - 1) * s->outpicref->linesize[0] + s->start_x - 1;
  860. for (x = 0; x < s->w + 1; x++)
  861. dst[x] = 200;
  862. dst = s->outpicref->data[0] + (s->start_y + s->h) * s->outpicref->linesize[0] + s->start_x - 1;
  863. for (x = 0; x < s->w + 1; x++)
  864. dst[x] = 200;
  865. for (y = 0; y < s->h + 2; y++) {
  866. dst = s->outpicref->data[0] + (y + s->start_y - 1) * s->outpicref->linesize[0];
  867. dst[s->start_x - 1] = 200;
  868. dst[s->start_x + s->w] = 200;
  869. }
  870. if (s->orientation == VERTICAL) {
  871. int h = s->mode == SEPARATE ? s->h / s->nb_display_channels : s->h;
  872. for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
  873. for (y = 0; y < h; y += 20) {
  874. dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0];
  875. dst[s->start_x - 2] = 200;
  876. dst[s->start_x + s->w + 1] = 200;
  877. }
  878. for (y = 0; y < h; y += 40) {
  879. dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0];
  880. dst[s->start_x - 3] = 200;
  881. dst[s->start_x + s->w + 2] = 200;
  882. }
  883. dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x;
  884. for (x = 0; x < s->w; x+=40)
  885. dst[x] = 200;
  886. dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x;
  887. for (x = 0; x < s->w; x+=80)
  888. dst[x] = 200;
  889. dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x;
  890. for (x = 0; x < s->w; x+=40) {
  891. dst[x] = 200;
  892. }
  893. dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x;
  894. for (x = 0; x < s->w; x+=80) {
  895. dst[x] = 200;
  896. }
  897. for (y = 0; y < h; y += 40) {
  898. float hz = y * (inlink->sample_rate / 2) / (float)(1 << (int)ceil(log2(h)));
  899. char *units;
  900. if (hz == 0)
  901. units = av_asprintf("DC");
  902. else
  903. units = av_asprintf("%.2f", hz);
  904. if (!units)
  905. return AVERROR(ENOMEM);
  906. drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, h * (ch + 1) + s->start_y - y - 4, units, 0);
  907. av_free(units);
  908. }
  909. }
  910. for (x = 0; x < s->w; x+=80) {
  911. float seconds = x * spp / inlink->sample_rate;
  912. char *units;
  913. if (x == 0)
  914. units = av_asprintf("0");
  915. else if (log10(seconds) > 6)
  916. units = av_asprintf("%.2fh", seconds / (60 * 60));
  917. else if (log10(seconds) > 3)
  918. units = av_asprintf("%.2fm", seconds / 60);
  919. else
  920. units = av_asprintf("%.2fs", seconds);
  921. if (!units)
  922. return AVERROR(ENOMEM);
  923. drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->h + s->start_y + 6, units, 0);
  924. drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->start_y - 12, units, 0);
  925. av_free(units);
  926. }
  927. drawtext(s->outpicref, outlink->w / 2 - 4 * 4, outlink->h - s->start_y / 2, "TIME", 0);
  928. drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 14 * 4, "FREQUENCY (Hz)", 1);
  929. } else {
  930. int w = s->mode == SEPARATE ? s->w / s->nb_display_channels : s->w;
  931. for (y = 0; y < s->h; y += 20) {
  932. dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
  933. dst[s->start_x - 2] = 200;
  934. dst[s->start_x + s->w + 1] = 200;
  935. }
  936. for (y = 0; y < s->h; y += 40) {
  937. dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
  938. dst[s->start_x - 3] = 200;
  939. dst[s->start_x + s->w + 2] = 200;
  940. }
  941. for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
  942. dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
  943. for (x = 0; x < w; x+=40)
  944. dst[x] = 200;
  945. dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x + w * ch;
  946. for (x = 0; x < w; x+=80)
  947. dst[x] = 200;
  948. dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x + w * ch;
  949. for (x = 0; x < w; x+=40) {
  950. dst[x] = 200;
  951. }
  952. dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
  953. for (x = 0; x < w; x+=80) {
  954. dst[x] = 200;
  955. }
  956. for (x = 0; x < w; x += 80) {
  957. float hz = x * (inlink->sample_rate / 2) / (float)(1 << (int)ceil(log2(w)));
  958. char *units;
  959. if (hz == 0)
  960. units = av_asprintf("DC");
  961. else
  962. units = av_asprintf("%.2f", hz);
  963. if (!units)
  964. return AVERROR(ENOMEM);
  965. drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->start_y - 12, units, 0);
  966. drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->h + s->start_y + 6, units, 0);
  967. av_free(units);
  968. }
  969. }
  970. for (y = 0; y < s->h; y+=40) {
  971. float seconds = y * spp / inlink->sample_rate;
  972. char *units;
  973. if (x == 0)
  974. units = av_asprintf("0");
  975. else if (log10(seconds) > 6)
  976. units = av_asprintf("%.2fh", seconds / (60 * 60));
  977. else if (log10(seconds) > 3)
  978. units = av_asprintf("%.2fm", seconds / 60);
  979. else
  980. units = av_asprintf("%.2fs", seconds);
  981. if (!units)
  982. return AVERROR(ENOMEM);
  983. drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, s->start_y + y - 4, units, 0);
  984. av_free(units);
  985. }
  986. drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 4 * 4, "TIME", 1);
  987. drawtext(s->outpicref, outlink->w / 2 - 14 * 4, outlink->h - s->start_y / 2, "FREQUENCY (Hz)", 0);
  988. }
  989. for (ch = 0; ch < (multi ? s->nb_display_channels : 1); ch++) {
  990. int h = multi ? s->h / s->nb_display_channels : s->h;
  991. for (y = 0; y < h; y++) {
  992. float out[3] = { 0., 127.5, 127.5};
  993. int chn;
  994. for (chn = 0; chn < (s->mode == SEPARATE ? 1 : s->nb_display_channels); chn++) {
  995. float yf, uf, vf;
  996. int channel = (multi) ? s->nb_display_channels - ch - 1 : chn;
  997. color_range(s, channel, &yf, &uf, &vf);
  998. pick_color(s, yf, uf, vf, y / (float)h, out);
  999. }
  1000. memset(s->outpicref->data[0]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0] + s->w + s->start_x + 20, av_clip_uint8(out[0]), 10);
  1001. memset(s->outpicref->data[1]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[1] + s->w + s->start_x + 20, av_clip_uint8(out[1]), 10);
  1002. memset(s->outpicref->data[2]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[2] + s->w + s->start_x + 20, av_clip_uint8(out[2]), 10);
  1003. }
  1004. }
  1005. }
  1006. ret = ff_filter_frame(outlink, s->outpicref);
  1007. s->outpicref = NULL;
  1008. }
  1009. return ret;
  1010. }
  1011. static int showspectrumpic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  1012. {
  1013. AVFilterContext *ctx = inlink->dst;
  1014. ShowSpectrumContext *s = ctx->priv;
  1015. int ret;
  1016. ret = av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  1017. av_frame_free(&insamples);
  1018. return ret;
  1019. }
  1020. static const AVFilterPad showspectrumpic_inputs[] = {
  1021. {
  1022. .name = "default",
  1023. .type = AVMEDIA_TYPE_AUDIO,
  1024. .filter_frame = showspectrumpic_filter_frame,
  1025. },
  1026. { NULL }
  1027. };
  1028. static const AVFilterPad showspectrumpic_outputs[] = {
  1029. {
  1030. .name = "default",
  1031. .type = AVMEDIA_TYPE_VIDEO,
  1032. .config_props = config_output,
  1033. .request_frame = showspectrumpic_request_frame,
  1034. },
  1035. { NULL }
  1036. };
  1037. AVFilter ff_avf_showspectrumpic = {
  1038. .name = "showspectrumpic",
  1039. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output single picture."),
  1040. .uninit = uninit,
  1041. .query_formats = query_formats,
  1042. .priv_size = sizeof(ShowSpectrumContext),
  1043. .inputs = showspectrumpic_inputs,
  1044. .outputs = showspectrumpic_outputs,
  1045. .priv_class = &showspectrumpic_class,
  1046. };
  1047. #endif // CONFIG_SHOWSPECTRUMPIC_FILTER