You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

680 lines
28KB

  1. /*
  2. * Copyright (c) 2012-2013 Clément Bœsch
  3. * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
  4. * Copyright (c) 2015 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
  25. * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
  26. */
  27. #include <math.h>
  28. #include "libavcodec/avfft.h"
  29. #include "libavutil/audio_fifo.h"
  30. #include "libavutil/avassert.h"
  31. #include "libavutil/channel_layout.h"
  32. #include "libavutil/opt.h"
  33. #include "audio.h"
  34. #include "video.h"
  35. #include "avfilter.h"
  36. #include "internal.h"
  37. #include "window_func.h"
  38. enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
  39. enum DisplayScale { LINEAR, SQRT, CBRT, LOG, NB_SCALES };
  40. enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, NB_CLMODES };
  41. enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
  42. enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
  43. typedef struct {
  44. const AVClass *class;
  45. int w, h;
  46. AVFrame *outpicref;
  47. int nb_display_channels;
  48. int orientation;
  49. int channel_width;
  50. int channel_height;
  51. int sliding; ///< 1 if sliding mode, 0 otherwise
  52. int mode; ///< channel display mode
  53. int color_mode; ///< display color scheme
  54. int scale;
  55. float saturation; ///< color saturation multiplier
  56. int xpos; ///< x position (current column)
  57. RDFTContext *rdft; ///< Real Discrete Fourier Transform context
  58. int rdft_bits; ///< number of bits (RDFT window size = 1<<rdft_bits)
  59. FFTSample **rdft_data; ///< bins holder for each (displayed) channels
  60. float *window_func_lut; ///< Window function LUT
  61. int win_func;
  62. double win_scale;
  63. float overlap;
  64. int skip_samples;
  65. float *combine_buffer; ///< color combining buffer (3 * h items)
  66. AVAudioFifo *fifo;
  67. int64_t pts;
  68. } ShowSpectrumContext;
  69. #define OFFSET(x) offsetof(ShowSpectrumContext, x)
  70. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  71. static const AVOption showspectrum_options[] = {
  72. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  73. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  74. { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
  75. { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
  76. { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
  77. { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
  78. { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
  79. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
  80. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  81. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  82. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
  83. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  84. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  85. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  86. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  87. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  88. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  89. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
  90. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  91. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  92. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  93. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  94. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  95. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  96. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  97. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  98. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  99. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  100. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  101. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  102. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  103. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  104. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  105. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  106. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  107. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  108. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  109. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  110. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  111. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  112. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  113. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  114. { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
  115. { NULL }
  116. };
  117. AVFILTER_DEFINE_CLASS(showspectrum);
  118. static const struct ColorTable {
  119. float a, y, u, v;
  120. } color_table[][8] = {
  121. [INTENSITY] = {
  122. { 0, 0, 0, 0 },
  123. { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
  124. { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
  125. { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
  126. { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
  127. { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
  128. { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
  129. { 1, 1, 0, 0 }},
  130. [RAINBOW] = {
  131. { 0, 0, 0, 0 },
  132. { 0.13, 44/256., (189-128)/256., (138-128)/256. },
  133. { 0.25, 29/256., (186-128)/256., (119-128)/256. },
  134. { 0.38, 119/256., (194-128)/256., (53-128)/256. },
  135. { 0.60, 111/256., (73-128)/256., (59-128)/256. },
  136. { 0.73, 205/256., (19-128)/256., (149-128)/256. },
  137. { 0.86, 135/256., (83-128)/256., (200-128)/256. },
  138. { 1, 73/256., (95-128)/256., (225-128)/256. }},
  139. [MORELAND] = {
  140. { 0, 44/256., (181-128)/256., (112-128)/256. },
  141. { 0.13, 126/256., (177-128)/256., (106-128)/256. },
  142. { 0.25, 164/256., (163-128)/256., (109-128)/256. },
  143. { 0.38, 200/256., (140-128)/256., (120-128)/256. },
  144. { 0.60, 201/256., (117-128)/256., (141-128)/256. },
  145. { 0.73, 177/256., (103-128)/256., (165-128)/256. },
  146. { 0.86, 136/256., (100-128)/256., (183-128)/256. },
  147. { 1, 68/256., (117-128)/256., (203-128)/256. }},
  148. [NEBULAE] = {
  149. { 0, 10/256., (134-128)/256., (132-128)/256. },
  150. { 0.23, 21/256., (137-128)/256., (130-128)/256. },
  151. { 0.45, 35/256., (134-128)/256., (134-128)/256. },
  152. { 0.57, 51/256., (130-128)/256., (139-128)/256. },
  153. { 0.67, 104/256., (116-128)/256., (162-128)/256. },
  154. { 0.77, 120/256., (105-128)/256., (188-128)/256. },
  155. { 0.87, 140/256., (105-128)/256., (188-128)/256. },
  156. { 1, 1, 0, 0 }},
  157. [FIRE] = {
  158. { 0, 0, 0, 0 },
  159. { 0.23, 44/256., (132-128)/256., (127-128)/256. },
  160. { 0.45, 62/256., (116-128)/256., (140-128)/256. },
  161. { 0.57, 75/256., (105-128)/256., (152-128)/256. },
  162. { 0.67, 95/256., (91-128)/256., (166-128)/256. },
  163. { 0.77, 126/256., (74-128)/256., (172-128)/256. },
  164. { 0.87, 164/256., (73-128)/256., (162-128)/256. },
  165. { 1, 1, 0, 0 }},
  166. };
  167. static av_cold void uninit(AVFilterContext *ctx)
  168. {
  169. ShowSpectrumContext *s = ctx->priv;
  170. int i;
  171. av_freep(&s->combine_buffer);
  172. av_rdft_end(s->rdft);
  173. for (i = 0; i < s->nb_display_channels; i++)
  174. av_freep(&s->rdft_data[i]);
  175. av_freep(&s->rdft_data);
  176. av_freep(&s->window_func_lut);
  177. av_frame_free(&s->outpicref);
  178. av_audio_fifo_free(s->fifo);
  179. }
  180. static int query_formats(AVFilterContext *ctx)
  181. {
  182. AVFilterFormats *formats = NULL;
  183. AVFilterChannelLayouts *layouts = NULL;
  184. AVFilterLink *inlink = ctx->inputs[0];
  185. AVFilterLink *outlink = ctx->outputs[0];
  186. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE };
  187. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
  188. int ret;
  189. /* set input audio formats */
  190. formats = ff_make_format_list(sample_fmts);
  191. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  192. return ret;
  193. layouts = ff_all_channel_layouts();
  194. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  195. return ret;
  196. formats = ff_all_samplerates();
  197. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  198. return ret;
  199. /* set output video format */
  200. formats = ff_make_format_list(pix_fmts);
  201. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  202. return ret;
  203. return 0;
  204. }
  205. static int config_output(AVFilterLink *outlink)
  206. {
  207. AVFilterContext *ctx = outlink->src;
  208. AVFilterLink *inlink = ctx->inputs[0];
  209. ShowSpectrumContext *s = ctx->priv;
  210. int i, rdft_bits, win_size, h, w;
  211. float overlap;
  212. outlink->w = s->w;
  213. outlink->h = s->h;
  214. h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? outlink->h : outlink->h / inlink->channels;
  215. w = (s->mode == COMBINED || s->orientation == VERTICAL) ? outlink->w : outlink->w / inlink->channels;
  216. s->channel_height = h;
  217. s->channel_width = w;
  218. if (s->orientation == VERTICAL) {
  219. /* RDFT window size (precision) according to the requested output frame height */
  220. for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++);
  221. } else {
  222. /* RDFT window size (precision) according to the requested output frame width */
  223. for (rdft_bits = 1; 1 << rdft_bits < 2 * w; rdft_bits++);
  224. }
  225. win_size = 1 << rdft_bits;
  226. /* (re-)configuration if the video output changed (or first init) */
  227. if (rdft_bits != s->rdft_bits) {
  228. AVFrame *outpicref;
  229. av_rdft_end(s->rdft);
  230. s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
  231. if (!s->rdft) {
  232. av_log(ctx, AV_LOG_ERROR, "Unable to create RDFT context. "
  233. "The window size might be too high.\n");
  234. return AVERROR(EINVAL);
  235. }
  236. s->rdft_bits = rdft_bits;
  237. /* RDFT buffers: x2 for each (display) channel buffer.
  238. * Note: we use free and malloc instead of a realloc-like function to
  239. * make sure the buffer is aligned in memory for the FFT functions. */
  240. for (i = 0; i < s->nb_display_channels; i++)
  241. av_freep(&s->rdft_data[i]);
  242. av_freep(&s->rdft_data);
  243. s->nb_display_channels = inlink->channels;
  244. s->rdft_data = av_calloc(s->nb_display_channels, sizeof(*s->rdft_data));
  245. if (!s->rdft_data)
  246. return AVERROR(ENOMEM);
  247. for (i = 0; i < s->nb_display_channels; i++) {
  248. s->rdft_data[i] = av_calloc(win_size, sizeof(**s->rdft_data));
  249. if (!s->rdft_data[i])
  250. return AVERROR(ENOMEM);
  251. }
  252. /* pre-calc windowing function */
  253. s->window_func_lut =
  254. av_realloc_f(s->window_func_lut, win_size,
  255. sizeof(*s->window_func_lut));
  256. if (!s->window_func_lut)
  257. return AVERROR(ENOMEM);
  258. ff_generate_window_func(s->window_func_lut, win_size, s->win_func, &overlap);
  259. if (s->overlap == 1)
  260. s->overlap = overlap;
  261. s->skip_samples = (1. - s->overlap) * win_size;
  262. if (s->skip_samples < 1) {
  263. av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
  264. return AVERROR(EINVAL);
  265. }
  266. for (s->win_scale = 0, i = 0; i < win_size; i++) {
  267. s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
  268. }
  269. s->win_scale = 1. / (sqrt(s->win_scale) * 32768.);
  270. /* prepare the initial picref buffer (black frame) */
  271. av_frame_free(&s->outpicref);
  272. s->outpicref = outpicref =
  273. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  274. if (!outpicref)
  275. return AVERROR(ENOMEM);
  276. outlink->sample_aspect_ratio = (AVRational){1,1};
  277. for (i = 0; i < outlink->h; i++) {
  278. memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
  279. memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
  280. memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
  281. }
  282. }
  283. if ((s->orientation == VERTICAL && s->xpos >= outlink->w) ||
  284. (s->orientation == HORIZONTAL && s->xpos >= outlink->h))
  285. s->xpos = 0;
  286. outlink->frame_rate = av_make_q(inlink->sample_rate, win_size * (1.-s->overlap));
  287. if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
  288. outlink->frame_rate.den *= outlink->w;
  289. if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
  290. outlink->frame_rate.den *= outlink->h;
  291. if (s->orientation == VERTICAL) {
  292. s->combine_buffer =
  293. av_realloc_f(s->combine_buffer, outlink->h * 3,
  294. sizeof(*s->combine_buffer));
  295. } else {
  296. s->combine_buffer =
  297. av_realloc_f(s->combine_buffer, outlink->w * 3,
  298. sizeof(*s->combine_buffer));
  299. }
  300. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n",
  301. s->w, s->h, win_size);
  302. av_audio_fifo_free(s->fifo);
  303. s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, win_size);
  304. if (!s->fifo)
  305. return AVERROR(ENOMEM);
  306. return 0;
  307. }
  308. static int request_frame(AVFilterLink *outlink)
  309. {
  310. ShowSpectrumContext *s = outlink->src->priv;
  311. AVFilterLink *inlink = outlink->src->inputs[0];
  312. unsigned i;
  313. int ret;
  314. ret = ff_request_frame(inlink);
  315. if (ret == AVERROR_EOF && s->sliding == FULLFRAME && s->xpos > 0 &&
  316. s->outpicref) {
  317. if (s->orientation == VERTICAL) {
  318. for (i = 0; i < outlink->h; i++) {
  319. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
  320. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
  321. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
  322. }
  323. } else {
  324. for (i = s->xpos; i < outlink->h; i++) {
  325. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
  326. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
  327. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
  328. }
  329. }
  330. ret = ff_filter_frame(outlink, s->outpicref);
  331. s->outpicref = NULL;
  332. }
  333. return ret;
  334. }
  335. static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
  336. {
  337. int ret;
  338. AVFilterContext *ctx = inlink->dst;
  339. AVFilterLink *outlink = ctx->outputs[0];
  340. ShowSpectrumContext *s = ctx->priv;
  341. AVFrame *outpicref = s->outpicref;
  342. /* nb_freq contains the power of two superior or equal to the output image
  343. * height (or half the RDFT window size) */
  344. const int nb_freq = 1 << (s->rdft_bits - 1);
  345. const int win_size = nb_freq << 1;
  346. const double w = s->win_scale;
  347. int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
  348. int ch, plane, n, x, y;
  349. av_assert0(insamples->nb_samples == win_size);
  350. /* fill RDFT input with the number of samples available */
  351. for (ch = 0; ch < s->nb_display_channels; ch++) {
  352. const int16_t *p = (int16_t *)insamples->extended_data[ch];
  353. for (n = 0; n < win_size; n++)
  354. s->rdft_data[ch][n] = p[n] * s->window_func_lut[n];
  355. }
  356. /* run RDFT on each samples set */
  357. for (ch = 0; ch < s->nb_display_channels; ch++)
  358. av_rdft_calc(s->rdft, s->rdft_data[ch]);
  359. /* fill a new spectrum column */
  360. #define RE(y, ch) s->rdft_data[ch][2 * (y) + 0]
  361. #define IM(y, ch) s->rdft_data[ch][2 * (y) + 1]
  362. #define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
  363. /* initialize buffer for combining to black */
  364. if (s->orientation == VERTICAL) {
  365. for (y = 0; y < outlink->h; y++) {
  366. s->combine_buffer[3 * y ] = 0;
  367. s->combine_buffer[3 * y + 1] = 127.5;
  368. s->combine_buffer[3 * y + 2] = 127.5;
  369. }
  370. } else {
  371. for (y = 0; y < outlink->w; y++) {
  372. s->combine_buffer[3 * y ] = 0;
  373. s->combine_buffer[3 * y + 1] = 127.5;
  374. s->combine_buffer[3 * y + 2] = 127.5;
  375. }
  376. }
  377. for (ch = 0; ch < s->nb_display_channels; ch++) {
  378. float yf, uf, vf;
  379. /* decide color range */
  380. switch (s->mode) {
  381. case COMBINED:
  382. // reduce range by channel count
  383. yf = 256.0f / s->nb_display_channels;
  384. switch (s->color_mode) {
  385. case RAINBOW:
  386. case MORELAND:
  387. case NEBULAE:
  388. case FIRE:
  389. case INTENSITY:
  390. uf = yf;
  391. vf = yf;
  392. break;
  393. case CHANNEL:
  394. /* adjust saturation for mixed UV coloring */
  395. /* this factor is correct for infinite channels, an approximation otherwise */
  396. uf = yf * M_PI;
  397. vf = yf * M_PI;
  398. break;
  399. default:
  400. av_assert0(0);
  401. }
  402. break;
  403. case SEPARATE:
  404. // full range
  405. yf = 256.0f;
  406. uf = 256.0f;
  407. vf = 256.0f;
  408. break;
  409. default:
  410. av_assert0(0);
  411. }
  412. if (s->color_mode == CHANNEL) {
  413. if (s->nb_display_channels > 1) {
  414. uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
  415. vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
  416. } else {
  417. uf = 0.0f;
  418. vf = 0.0f;
  419. }
  420. }
  421. uf *= s->saturation;
  422. vf *= s->saturation;
  423. /* draw the channel */
  424. for (y = 0; y < h; y++) {
  425. int row = (s->mode == COMBINED) ? y : ch * h + y;
  426. float *out = &s->combine_buffer[3 * row];
  427. /* get magnitude */
  428. float a = w * MAGNITUDE(y, ch);
  429. /* apply scale */
  430. switch (s->scale) {
  431. case LINEAR:
  432. break;
  433. case SQRT:
  434. a = sqrt(a);
  435. break;
  436. case CBRT:
  437. a = cbrt(a);
  438. break;
  439. case LOG:
  440. a = 1 + log10(FFMAX(FFMIN(1, a), 1e-6)) / 5; // zero = -120dBFS
  441. break;
  442. default:
  443. av_assert0(0);
  444. }
  445. if (s->color_mode > CHANNEL) {
  446. const int cm = s->color_mode;
  447. float y, u, v;
  448. int i;
  449. for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
  450. if (color_table[cm][i].a >= a)
  451. break;
  452. // i now is the first item >= the color
  453. // now we know to interpolate between item i - 1 and i
  454. if (a <= color_table[cm][i - 1].a) {
  455. y = color_table[cm][i - 1].y;
  456. u = color_table[cm][i - 1].u;
  457. v = color_table[cm][i - 1].v;
  458. } else if (a >= color_table[cm][i].a) {
  459. y = color_table[cm][i].y;
  460. u = color_table[cm][i].u;
  461. v = color_table[cm][i].v;
  462. } else {
  463. float start = color_table[cm][i - 1].a;
  464. float end = color_table[cm][i].a;
  465. float lerpfrac = (a - start) / (end - start);
  466. y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
  467. + color_table[cm][i].y * lerpfrac;
  468. u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
  469. + color_table[cm][i].u * lerpfrac;
  470. v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
  471. + color_table[cm][i].v * lerpfrac;
  472. }
  473. out[0] += y * yf;
  474. out[1] += u * uf;
  475. out[2] += v * vf;
  476. } else {
  477. out[0] += a * yf;
  478. out[1] += a * uf;
  479. out[2] += a * vf;
  480. }
  481. }
  482. }
  483. av_frame_make_writable(s->outpicref);
  484. /* copy to output */
  485. if (s->orientation == VERTICAL) {
  486. if (s->sliding == SCROLL) {
  487. for (plane = 0; plane < 3; plane++) {
  488. for (y = 0; y < outlink->h; y++) {
  489. uint8_t *p = outpicref->data[plane] +
  490. y * outpicref->linesize[plane];
  491. memmove(p, p + 1, outlink->w - 1);
  492. }
  493. }
  494. s->xpos = outlink->w - 1;
  495. } else if (s->sliding == RSCROLL) {
  496. for (plane = 0; plane < 3; plane++) {
  497. for (y = 0; y < outlink->h; y++) {
  498. uint8_t *p = outpicref->data[plane] +
  499. y * outpicref->linesize[plane];
  500. memmove(p + 1, p, outlink->w - 1);
  501. }
  502. }
  503. s->xpos = 0;
  504. }
  505. for (plane = 0; plane < 3; plane++) {
  506. uint8_t *p = outpicref->data[plane] +
  507. (outlink->h - 1) * outpicref->linesize[plane] +
  508. s->xpos;
  509. for (y = 0; y < outlink->h; y++) {
  510. *p = lrint(FFMAX(0, FFMIN(s->combine_buffer[3 * y + plane], 255)));
  511. p -= outpicref->linesize[plane];
  512. }
  513. }
  514. } else {
  515. if (s->sliding == SCROLL) {
  516. for (plane = 0; plane < 3; plane++) {
  517. for (y = 1; y < outlink->h; y++) {
  518. memmove(outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  519. outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  520. outlink->w);
  521. }
  522. }
  523. s->xpos = outlink->h - 1;
  524. } else if (s->sliding == RSCROLL) {
  525. for (plane = 0; plane < 3; plane++) {
  526. for (y = outlink->h - 1; y >= 1; y--) {
  527. memmove(outpicref->data[plane] + (y ) * outpicref->linesize[plane],
  528. outpicref->data[plane] + (y-1) * outpicref->linesize[plane],
  529. outlink->w);
  530. }
  531. }
  532. s->xpos = 0;
  533. }
  534. for (plane = 0; plane < 3; plane++) {
  535. uint8_t *p = outpicref->data[plane] +
  536. s->xpos * outpicref->linesize[plane];
  537. for (x = 0; x < outlink->w; x++) {
  538. *p = lrint(FFMAX(0, FFMIN(s->combine_buffer[3 * x + plane], 255)));
  539. p++;
  540. }
  541. }
  542. }
  543. if (s->sliding != FULLFRAME || s->xpos == 0)
  544. outpicref->pts = insamples->pts;
  545. s->xpos++;
  546. if (s->orientation == VERTICAL && s->xpos >= outlink->w)
  547. s->xpos = 0;
  548. if (s->orientation == HORIZONTAL && s->xpos >= outlink->h)
  549. s->xpos = 0;
  550. if (s->sliding != FULLFRAME || s->xpos == 0) {
  551. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  552. if (ret < 0)
  553. return ret;
  554. }
  555. return win_size;
  556. }
  557. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  558. {
  559. AVFilterContext *ctx = inlink->dst;
  560. ShowSpectrumContext *s = ctx->priv;
  561. unsigned win_size = 1 << s->rdft_bits;
  562. AVFrame *fin = NULL;
  563. int ret = 0;
  564. av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  565. av_frame_free(&insamples);
  566. while (av_audio_fifo_size(s->fifo) >= win_size) {
  567. fin = ff_get_audio_buffer(inlink, win_size);
  568. if (!fin) {
  569. ret = AVERROR(ENOMEM);
  570. goto fail;
  571. }
  572. fin->pts = s->pts;
  573. s->pts += s->skip_samples;
  574. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, win_size);
  575. if (ret < 0)
  576. goto fail;
  577. ret = plot_spectrum_column(inlink, fin);
  578. av_frame_free(&fin);
  579. av_audio_fifo_drain(s->fifo, s->skip_samples);
  580. if (ret < 0)
  581. goto fail;
  582. }
  583. fail:
  584. av_frame_free(&fin);
  585. return ret;
  586. }
  587. static const AVFilterPad showspectrum_inputs[] = {
  588. {
  589. .name = "default",
  590. .type = AVMEDIA_TYPE_AUDIO,
  591. .filter_frame = filter_frame,
  592. },
  593. { NULL }
  594. };
  595. static const AVFilterPad showspectrum_outputs[] = {
  596. {
  597. .name = "default",
  598. .type = AVMEDIA_TYPE_VIDEO,
  599. .config_props = config_output,
  600. .request_frame = request_frame,
  601. },
  602. { NULL }
  603. };
  604. AVFilter ff_avf_showspectrum = {
  605. .name = "showspectrum",
  606. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
  607. .uninit = uninit,
  608. .query_formats = query_formats,
  609. .priv_size = sizeof(ShowSpectrumContext),
  610. .inputs = showspectrum_inputs,
  611. .outputs = showspectrum_outputs,
  612. .priv_class = &showspectrum_class,
  613. };