You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1721 lines
72KB

  1. /*
  2. * Copyright (c) 2012-2013 Clément Bœsch
  3. * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
  4. * Copyright (c) 2015 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
  25. * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
  26. */
  27. #include <math.h>
  28. #include "libavcodec/avfft.h"
  29. #include "libavutil/audio_fifo.h"
  30. #include "libavutil/avassert.h"
  31. #include "libavutil/avstring.h"
  32. #include "libavutil/channel_layout.h"
  33. #include "libavutil/opt.h"
  34. #include "libavutil/parseutils.h"
  35. #include "libavutil/xga_font_data.h"
  36. #include "audio.h"
  37. #include "video.h"
  38. #include "avfilter.h"
  39. #include "filters.h"
  40. #include "internal.h"
  41. #include "window_func.h"
  42. enum DisplayMode { COMBINED, SEPARATE, NB_MODES };
  43. enum DataMode { D_MAGNITUDE, D_PHASE, NB_DMODES };
  44. enum FrequencyScale { F_LINEAR, F_LOG, NB_FSCALES };
  45. enum DisplayScale { LINEAR, SQRT, CBRT, LOG, FOURTHRT, FIFTHRT, NB_SCALES };
  46. enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, COOL, MAGMA, GREEN, VIRIDIS, PLASMA, CIVIDIS, TERRAIN, NB_CLMODES };
  47. enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES };
  48. enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS };
  49. typedef struct ShowSpectrumContext {
  50. const AVClass *class;
  51. int w, h;
  52. char *rate_str;
  53. AVRational auto_frame_rate;
  54. AVRational frame_rate;
  55. AVFrame *outpicref;
  56. int nb_display_channels;
  57. int orientation;
  58. int channel_width;
  59. int channel_height;
  60. int sliding; ///< 1 if sliding mode, 0 otherwise
  61. int mode; ///< channel display mode
  62. int color_mode; ///< display color scheme
  63. int scale;
  64. int fscale;
  65. float saturation; ///< color saturation multiplier
  66. float rotation; ///< color rotation
  67. int start, stop; ///< zoom mode
  68. int data;
  69. int xpos; ///< x position (current column)
  70. FFTContext **fft; ///< Fast Fourier Transform context
  71. FFTContext **ifft; ///< Inverse Fast Fourier Transform context
  72. int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
  73. FFTComplex **fft_data; ///< bins holder for each (displayed) channels
  74. FFTComplex **fft_scratch; ///< scratch buffers
  75. float *window_func_lut; ///< Window function LUT
  76. float **magnitudes;
  77. float **phases;
  78. int win_func;
  79. int win_size;
  80. int buf_size;
  81. double win_scale;
  82. float overlap;
  83. float gain;
  84. int consumed;
  85. int hop_size;
  86. float *combine_buffer; ///< color combining buffer (3 * h items)
  87. float **color_buffer; ///< color buffer (3 * h * ch items)
  88. AVAudioFifo *fifo;
  89. int64_t pts;
  90. int64_t old_pts;
  91. int old_len;
  92. int single_pic;
  93. int legend;
  94. int start_x, start_y;
  95. int (*plot_channel)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  96. } ShowSpectrumContext;
  97. #define OFFSET(x) offsetof(ShowSpectrumContext, x)
  98. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  99. static const AVOption showspectrum_options[] = {
  100. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  101. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
  102. { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
  103. { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
  104. { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
  105. { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
  106. { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
  107. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
  108. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  109. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  110. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
  111. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  112. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  113. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  114. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  115. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  116. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  117. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  118. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  119. { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
  120. { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" },
  121. { "green", "green based coloring", 0, AV_OPT_TYPE_CONST, {.i64=GREEN}, 0, 0, FLAGS, "color" },
  122. { "viridis", "viridis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=VIRIDIS}, 0, 0, FLAGS, "color" },
  123. { "plasma", "plasma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=PLASMA}, 0, 0, FLAGS, "color" },
  124. { "cividis", "cividis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=CIVIDIS}, 0, 0, FLAGS, "color" },
  125. { "terrain", "terrain based coloring", 0, AV_OPT_TYPE_CONST, {.i64=TERRAIN}, 0, 0, FLAGS, "color" },
  126. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
  127. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  128. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  129. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  130. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  131. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  132. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  133. { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=F_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
  134. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=F_LINEAR}, 0, 0, FLAGS, "fscale" },
  135. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=F_LOG}, 0, 0, FLAGS, "fscale" },
  136. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  137. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  138. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  139. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  140. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  141. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  142. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  143. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  144. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  145. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  146. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  147. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  148. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  149. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  150. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  151. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  152. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  153. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  154. { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
  155. { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
  156. { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
  157. { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
  158. { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" },
  159. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  160. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  161. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  162. { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
  163. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  164. { "data", "set data mode", OFFSET(data), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_DMODES-1, FLAGS, "data" },
  165. { "magnitude", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_MAGNITUDE}, 0, 0, FLAGS, "data" },
  166. { "phase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_PHASE}, 0, 0, FLAGS, "data" },
  167. { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
  168. { "start", "start frequency", OFFSET(start), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
  169. { "stop", "stop frequency", OFFSET(stop), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
  170. { "fps", "set video rate", OFFSET(rate_str), AV_OPT_TYPE_STRING, {.str = "auto"}, 0, 0, FLAGS },
  171. { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  172. { NULL }
  173. };
  174. AVFILTER_DEFINE_CLASS(showspectrum);
  175. static const struct ColorTable {
  176. float a, y, u, v;
  177. } color_table[][8] = {
  178. [INTENSITY] = {
  179. { 0, 0, 0, 0 },
  180. { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
  181. { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
  182. { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
  183. { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
  184. { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
  185. { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
  186. { 1, 1, 0, 0 }},
  187. [RAINBOW] = {
  188. { 0, 0, 0, 0 },
  189. { 0.13, 44/256., (189-128)/256., (138-128)/256. },
  190. { 0.25, 29/256., (186-128)/256., (119-128)/256. },
  191. { 0.38, 119/256., (194-128)/256., (53-128)/256. },
  192. { 0.60, 111/256., (73-128)/256., (59-128)/256. },
  193. { 0.73, 205/256., (19-128)/256., (149-128)/256. },
  194. { 0.86, 135/256., (83-128)/256., (200-128)/256. },
  195. { 1, 73/256., (95-128)/256., (225-128)/256. }},
  196. [MORELAND] = {
  197. { 0, 44/256., (181-128)/256., (112-128)/256. },
  198. { 0.13, 126/256., (177-128)/256., (106-128)/256. },
  199. { 0.25, 164/256., (163-128)/256., (109-128)/256. },
  200. { 0.38, 200/256., (140-128)/256., (120-128)/256. },
  201. { 0.60, 201/256., (117-128)/256., (141-128)/256. },
  202. { 0.73, 177/256., (103-128)/256., (165-128)/256. },
  203. { 0.86, 136/256., (100-128)/256., (183-128)/256. },
  204. { 1, 68/256., (117-128)/256., (203-128)/256. }},
  205. [NEBULAE] = {
  206. { 0, 10/256., (134-128)/256., (132-128)/256. },
  207. { 0.23, 21/256., (137-128)/256., (130-128)/256. },
  208. { 0.45, 35/256., (134-128)/256., (134-128)/256. },
  209. { 0.57, 51/256., (130-128)/256., (139-128)/256. },
  210. { 0.67, 104/256., (116-128)/256., (162-128)/256. },
  211. { 0.77, 120/256., (105-128)/256., (188-128)/256. },
  212. { 0.87, 140/256., (105-128)/256., (188-128)/256. },
  213. { 1, 1, 0, 0 }},
  214. [FIRE] = {
  215. { 0, 0, 0, 0 },
  216. { 0.23, 44/256., (132-128)/256., (127-128)/256. },
  217. { 0.45, 62/256., (116-128)/256., (140-128)/256. },
  218. { 0.57, 75/256., (105-128)/256., (152-128)/256. },
  219. { 0.67, 95/256., (91-128)/256., (166-128)/256. },
  220. { 0.77, 126/256., (74-128)/256., (172-128)/256. },
  221. { 0.87, 164/256., (73-128)/256., (162-128)/256. },
  222. { 1, 1, 0, 0 }},
  223. [FIERY] = {
  224. { 0, 0, 0, 0 },
  225. { 0.23, 36/256., (116-128)/256., (163-128)/256. },
  226. { 0.45, 52/256., (102-128)/256., (200-128)/256. },
  227. { 0.57, 116/256., (84-128)/256., (196-128)/256. },
  228. { 0.67, 157/256., (67-128)/256., (181-128)/256. },
  229. { 0.77, 193/256., (40-128)/256., (155-128)/256. },
  230. { 0.87, 221/256., (101-128)/256., (134-128)/256. },
  231. { 1, 1, 0, 0 }},
  232. [FRUIT] = {
  233. { 0, 0, 0, 0 },
  234. { 0.20, 29/256., (136-128)/256., (119-128)/256. },
  235. { 0.30, 60/256., (119-128)/256., (90-128)/256. },
  236. { 0.40, 85/256., (91-128)/256., (85-128)/256. },
  237. { 0.50, 116/256., (70-128)/256., (105-128)/256. },
  238. { 0.60, 151/256., (50-128)/256., (146-128)/256. },
  239. { 0.70, 191/256., (63-128)/256., (178-128)/256. },
  240. { 1, 98/256., (80-128)/256., (221-128)/256. }},
  241. [COOL] = {
  242. { 0, 0, 0, 0 },
  243. { .15, 0, .5, -.5 },
  244. { 1, 1, -.5, .5 }},
  245. [MAGMA] = {
  246. { 0, 0, 0, 0 },
  247. { 0.10, 23/256., (175-128)/256., (120-128)/256. },
  248. { 0.23, 43/256., (158-128)/256., (144-128)/256. },
  249. { 0.35, 85/256., (138-128)/256., (179-128)/256. },
  250. { 0.48, 96/256., (128-128)/256., (189-128)/256. },
  251. { 0.64, 128/256., (103-128)/256., (214-128)/256. },
  252. { 0.92, 205/256., (80-128)/256., (152-128)/256. },
  253. { 1, 1, 0, 0 }},
  254. [GREEN] = {
  255. { 0, 0, 0, 0 },
  256. { .75, .5, 0, -.5 },
  257. { 1, 1, 0, 0 }},
  258. [VIRIDIS] = {
  259. { 0, 0, 0, 0 },
  260. { 0.10, 0x39/255., (0x9D -128)/255., (0x8F -128)/255. },
  261. { 0.23, 0x5C/255., (0x9A -128)/255., (0x68 -128)/255. },
  262. { 0.35, 0x69/255., (0x93 -128)/255., (0x57 -128)/255. },
  263. { 0.48, 0x76/255., (0x88 -128)/255., (0x4B -128)/255. },
  264. { 0.64, 0x8A/255., (0x72 -128)/255., (0x4F -128)/255. },
  265. { 0.80, 0xA3/255., (0x50 -128)/255., (0x66 -128)/255. },
  266. { 1, 0xCC/255., (0x2F -128)/255., (0x87 -128)/255. }},
  267. [PLASMA] = {
  268. { 0, 0, 0, 0 },
  269. { 0.10, 0x27/255., (0xC2 -128)/255., (0x82 -128)/255. },
  270. { 0.58, 0x5B/255., (0x9A -128)/255., (0xAE -128)/255. },
  271. { 0.70, 0x89/255., (0x44 -128)/255., (0xAB -128)/255. },
  272. { 0.80, 0xB4/255., (0x2B -128)/255., (0x9E -128)/255. },
  273. { 0.91, 0xD2/255., (0x38 -128)/255., (0x92 -128)/255. },
  274. { 1, 1, 0, 0. }},
  275. [CIVIDIS] = {
  276. { 0, 0, 0, 0 },
  277. { 0.20, 0x28/255., (0x98 -128)/255., (0x6F -128)/255. },
  278. { 0.50, 0x48/255., (0x95 -128)/255., (0x74 -128)/255. },
  279. { 0.63, 0x69/255., (0x84 -128)/255., (0x7F -128)/255. },
  280. { 0.76, 0x89/255., (0x75 -128)/255., (0x84 -128)/255. },
  281. { 0.90, 0xCE/255., (0x35 -128)/255., (0x95 -128)/255. },
  282. { 1, 1, 0, 0. }},
  283. [TERRAIN] = {
  284. { 0, 0, 0, 0 },
  285. { 0.15, 0, .5, 0 },
  286. { 0.60, 1, -.5, -.5 },
  287. { 0.85, 1, -.5, .5 },
  288. { 1, 1, 0, 0 }},
  289. };
  290. static av_cold void uninit(AVFilterContext *ctx)
  291. {
  292. ShowSpectrumContext *s = ctx->priv;
  293. int i;
  294. av_freep(&s->combine_buffer);
  295. if (s->fft) {
  296. for (i = 0; i < s->nb_display_channels; i++)
  297. av_fft_end(s->fft[i]);
  298. }
  299. av_freep(&s->fft);
  300. if (s->ifft) {
  301. for (i = 0; i < s->nb_display_channels; i++)
  302. av_fft_end(s->ifft[i]);
  303. }
  304. av_freep(&s->ifft);
  305. if (s->fft_data) {
  306. for (i = 0; i < s->nb_display_channels; i++)
  307. av_freep(&s->fft_data[i]);
  308. }
  309. av_freep(&s->fft_data);
  310. if (s->fft_scratch) {
  311. for (i = 0; i < s->nb_display_channels; i++)
  312. av_freep(&s->fft_scratch[i]);
  313. }
  314. av_freep(&s->fft_scratch);
  315. if (s->color_buffer) {
  316. for (i = 0; i < s->nb_display_channels; i++)
  317. av_freep(&s->color_buffer[i]);
  318. }
  319. av_freep(&s->color_buffer);
  320. av_freep(&s->window_func_lut);
  321. if (s->magnitudes) {
  322. for (i = 0; i < s->nb_display_channels; i++)
  323. av_freep(&s->magnitudes[i]);
  324. }
  325. av_freep(&s->magnitudes);
  326. av_frame_free(&s->outpicref);
  327. av_audio_fifo_free(s->fifo);
  328. if (s->phases) {
  329. for (i = 0; i < s->nb_display_channels; i++)
  330. av_freep(&s->phases[i]);
  331. }
  332. av_freep(&s->phases);
  333. }
  334. static int query_formats(AVFilterContext *ctx)
  335. {
  336. AVFilterFormats *formats = NULL;
  337. AVFilterChannelLayouts *layouts = NULL;
  338. AVFilterLink *inlink = ctx->inputs[0];
  339. AVFilterLink *outlink = ctx->outputs[0];
  340. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
  341. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
  342. int ret;
  343. /* set input audio formats */
  344. formats = ff_make_format_list(sample_fmts);
  345. if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
  346. return ret;
  347. layouts = ff_all_channel_layouts();
  348. if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
  349. return ret;
  350. formats = ff_all_samplerates();
  351. if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
  352. return ret;
  353. /* set output video format */
  354. formats = ff_make_format_list(pix_fmts);
  355. if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
  356. return ret;
  357. return 0;
  358. }
  359. static int run_channel_fft(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  360. {
  361. ShowSpectrumContext *s = ctx->priv;
  362. AVFilterLink *inlink = ctx->inputs[0];
  363. const float *window_func_lut = s->window_func_lut;
  364. AVFrame *fin = arg;
  365. const int ch = jobnr;
  366. int n;
  367. /* fill FFT input with the number of samples available */
  368. const float *p = (float *)fin->extended_data[ch];
  369. for (n = 0; n < s->win_size; n++) {
  370. s->fft_data[ch][n].re = p[n] * window_func_lut[n];
  371. s->fft_data[ch][n].im = 0;
  372. }
  373. if (s->stop) {
  374. float theta, phi, psi, a, b, S, c;
  375. FFTComplex *g = s->fft_data[ch];
  376. FFTComplex *h = s->fft_scratch[ch];
  377. int L = s->buf_size;
  378. int N = s->win_size;
  379. int M = s->win_size / 2;
  380. phi = 2.f * M_PI * (s->stop - s->start) / (float)inlink->sample_rate / (M - 1);
  381. theta = 2.f * M_PI * s->start / (float)inlink->sample_rate;
  382. for (int n = 0; n < M; n++) {
  383. h[n].re = cosf(n * n / 2.f * phi);
  384. h[n].im = sinf(n * n / 2.f * phi);
  385. }
  386. for (int n = M; n < L; n++) {
  387. h[n].re = 0.f;
  388. h[n].im = 0.f;
  389. }
  390. for (int n = L - N; n < L; n++) {
  391. h[n].re = cosf((L - n) * (L - n) / 2.f * phi);
  392. h[n].im = sinf((L - n) * (L - n) / 2.f * phi);
  393. }
  394. for (int n = 0; n < N; n++) {
  395. g[n].re = s->fft_data[ch][n].re;
  396. g[n].im = s->fft_data[ch][n].im;
  397. }
  398. for (int n = N; n < L; n++) {
  399. g[n].re = 0.f;
  400. g[n].im = 0.f;
  401. }
  402. for (int n = 0; n < N; n++) {
  403. psi = n * theta + n * n / 2.f * phi;
  404. c = cosf(psi);
  405. S = -sinf(psi);
  406. a = c * g[n].re - S * g[n].im;
  407. b = S * g[n].re + c * g[n].im;
  408. g[n].re = a;
  409. g[n].im = b;
  410. }
  411. av_fft_permute(s->fft[ch], h);
  412. av_fft_calc(s->fft[ch], h);
  413. av_fft_permute(s->fft[ch], g);
  414. av_fft_calc(s->fft[ch], g);
  415. for (int n = 0; n < L; n++) {
  416. c = g[n].re;
  417. S = g[n].im;
  418. a = c * h[n].re - S * h[n].im;
  419. b = S * h[n].re + c * h[n].im;
  420. g[n].re = a / L;
  421. g[n].im = b / L;
  422. }
  423. av_fft_permute(s->ifft[ch], g);
  424. av_fft_calc(s->ifft[ch], g);
  425. for (int k = 0; k < M; k++) {
  426. psi = k * k / 2.f * phi;
  427. c = cosf(psi);
  428. S = -sinf(psi);
  429. a = c * g[k].re - S * g[k].im;
  430. b = S * g[k].re + c * g[k].im;
  431. s->fft_data[ch][k].re = a;
  432. s->fft_data[ch][k].im = b;
  433. }
  434. } else {
  435. /* run FFT on each samples set */
  436. av_fft_permute(s->fft[ch], s->fft_data[ch]);
  437. av_fft_calc(s->fft[ch], s->fft_data[ch]);
  438. }
  439. return 0;
  440. }
  441. static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
  442. {
  443. const uint8_t *font;
  444. int font_height;
  445. int i;
  446. font = avpriv_cga_font, font_height = 8;
  447. for (i = 0; txt[i]; i++) {
  448. int char_y, mask;
  449. if (o) {
  450. for (char_y = font_height - 1; char_y >= 0; char_y--) {
  451. uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x;
  452. for (mask = 0x80; mask; mask >>= 1) {
  453. if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
  454. p[char_y] = ~p[char_y];
  455. p += pic->linesize[0];
  456. }
  457. }
  458. } else {
  459. uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8);
  460. for (char_y = 0; char_y < font_height; char_y++) {
  461. for (mask = 0x80; mask; mask >>= 1) {
  462. if (font[txt[i] * font_height + char_y] & mask)
  463. *p = ~(*p);
  464. p++;
  465. }
  466. p += pic->linesize[0] - 8;
  467. }
  468. }
  469. }
  470. }
  471. static void color_range(ShowSpectrumContext *s, int ch,
  472. float *yf, float *uf, float *vf)
  473. {
  474. switch (s->mode) {
  475. case COMBINED:
  476. // reduce range by channel count
  477. *yf = 256.0f / s->nb_display_channels;
  478. switch (s->color_mode) {
  479. case RAINBOW:
  480. case MORELAND:
  481. case NEBULAE:
  482. case FIRE:
  483. case FIERY:
  484. case FRUIT:
  485. case COOL:
  486. case GREEN:
  487. case VIRIDIS:
  488. case PLASMA:
  489. case CIVIDIS:
  490. case TERRAIN:
  491. case MAGMA:
  492. case INTENSITY:
  493. *uf = *yf;
  494. *vf = *yf;
  495. break;
  496. case CHANNEL:
  497. /* adjust saturation for mixed UV coloring */
  498. /* this factor is correct for infinite channels, an approximation otherwise */
  499. *uf = *yf * M_PI;
  500. *vf = *yf * M_PI;
  501. break;
  502. default:
  503. av_assert0(0);
  504. }
  505. break;
  506. case SEPARATE:
  507. // full range
  508. *yf = 256.0f;
  509. *uf = 256.0f;
  510. *vf = 256.0f;
  511. break;
  512. default:
  513. av_assert0(0);
  514. }
  515. if (s->color_mode == CHANNEL) {
  516. if (s->nb_display_channels > 1) {
  517. *uf *= 0.5f * sinf((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
  518. *vf *= 0.5f * cosf((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
  519. } else {
  520. *uf *= 0.5f * sinf(M_PI * s->rotation);
  521. *vf *= 0.5f * cosf(M_PI * s->rotation + M_PI_2);
  522. }
  523. } else {
  524. *uf += *uf * sinf(M_PI * s->rotation);
  525. *vf += *vf * cosf(M_PI * s->rotation + M_PI_2);
  526. }
  527. *uf *= s->saturation;
  528. *vf *= s->saturation;
  529. }
  530. static void pick_color(ShowSpectrumContext *s,
  531. float yf, float uf, float vf,
  532. float a, float *out)
  533. {
  534. if (s->color_mode > CHANNEL) {
  535. const int cm = s->color_mode;
  536. float y, u, v;
  537. int i;
  538. for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
  539. if (color_table[cm][i].a >= a)
  540. break;
  541. // i now is the first item >= the color
  542. // now we know to interpolate between item i - 1 and i
  543. if (a <= color_table[cm][i - 1].a) {
  544. y = color_table[cm][i - 1].y;
  545. u = color_table[cm][i - 1].u;
  546. v = color_table[cm][i - 1].v;
  547. } else if (a >= color_table[cm][i].a) {
  548. y = color_table[cm][i].y;
  549. u = color_table[cm][i].u;
  550. v = color_table[cm][i].v;
  551. } else {
  552. float start = color_table[cm][i - 1].a;
  553. float end = color_table[cm][i].a;
  554. float lerpfrac = (a - start) / (end - start);
  555. y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
  556. + color_table[cm][i].y * lerpfrac;
  557. u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
  558. + color_table[cm][i].u * lerpfrac;
  559. v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
  560. + color_table[cm][i].v * lerpfrac;
  561. }
  562. out[0] = y * yf;
  563. out[1] = u * uf;
  564. out[2] = v * vf;
  565. } else {
  566. out[0] = a * yf;
  567. out[1] = a * uf;
  568. out[2] = a * vf;
  569. }
  570. }
  571. static char *get_time(AVFilterContext *ctx, float seconds, int x)
  572. {
  573. char *units;
  574. if (x == 0)
  575. units = av_asprintf("0");
  576. else if (log10(seconds) > 6)
  577. units = av_asprintf("%.2fh", seconds / (60 * 60));
  578. else if (log10(seconds) > 3)
  579. units = av_asprintf("%.2fm", seconds / 60);
  580. else
  581. units = av_asprintf("%.2fs", seconds);
  582. return units;
  583. }
  584. static float log_scale(const float value, const float min, const float max)
  585. {
  586. if (value < min)
  587. return min;
  588. if (value > max)
  589. return max;
  590. {
  591. const float b = logf(max / min) / (max - min);
  592. const float a = max / expf(max * b);
  593. return expf(value * b) * a;
  594. }
  595. }
  596. static float get_log_hz(const int bin, const int num_bins, const float sample_rate)
  597. {
  598. const float max_freq = sample_rate / 2;
  599. const float hz_per_bin = max_freq / num_bins;
  600. const float freq = hz_per_bin * bin;
  601. const float scaled_freq = log_scale(freq + 1, 21, max_freq) - 1;
  602. return num_bins * scaled_freq / max_freq;
  603. }
  604. static float inv_log_scale(const float value, const float min, const float max)
  605. {
  606. if (value < min)
  607. return min;
  608. if (value > max)
  609. return max;
  610. {
  611. const float b = logf(max / min) / (max - min);
  612. const float a = max / expf(max * b);
  613. return logf(value / a) / b;
  614. }
  615. }
  616. static float bin_pos(const int bin, const int num_bins, const float sample_rate)
  617. {
  618. const float max_freq = sample_rate / 2;
  619. const float hz_per_bin = max_freq / num_bins;
  620. const float freq = hz_per_bin * bin;
  621. const float scaled_freq = inv_log_scale(freq + 1, 21, max_freq) - 1;
  622. return num_bins * scaled_freq / max_freq;
  623. }
  624. static int draw_legend(AVFilterContext *ctx, int samples)
  625. {
  626. ShowSpectrumContext *s = ctx->priv;
  627. AVFilterLink *inlink = ctx->inputs[0];
  628. AVFilterLink *outlink = ctx->outputs[0];
  629. int ch, y, x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
  630. int multi = (s->mode == SEPARATE && s->color_mode == CHANNEL);
  631. float spp = samples / (float)sz;
  632. char *text;
  633. uint8_t *dst;
  634. char chlayout_str[128];
  635. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), inlink->channels,
  636. inlink->channel_layout);
  637. text = av_asprintf("%d Hz | %s", inlink->sample_rate, chlayout_str);
  638. if (!text)
  639. return AVERROR(ENOMEM);
  640. drawtext(s->outpicref, 2, outlink->h - 10, "CREATED BY LIBAVFILTER", 0);
  641. drawtext(s->outpicref, outlink->w - 2 - strlen(text) * 10, outlink->h - 10, text, 0);
  642. av_freep(&text);
  643. if (s->stop) {
  644. text = av_asprintf("Zoom: %d Hz - %d Hz", s->start, s->stop);
  645. if (!text)
  646. return AVERROR(ENOMEM);
  647. drawtext(s->outpicref, outlink->w - 2 - strlen(text) * 10, 3, text, 0);
  648. av_freep(&text);
  649. }
  650. dst = s->outpicref->data[0] + (s->start_y - 1) * s->outpicref->linesize[0] + s->start_x - 1;
  651. for (x = 0; x < s->w + 1; x++)
  652. dst[x] = 200;
  653. dst = s->outpicref->data[0] + (s->start_y + s->h) * s->outpicref->linesize[0] + s->start_x - 1;
  654. for (x = 0; x < s->w + 1; x++)
  655. dst[x] = 200;
  656. for (y = 0; y < s->h + 2; y++) {
  657. dst = s->outpicref->data[0] + (y + s->start_y - 1) * s->outpicref->linesize[0];
  658. dst[s->start_x - 1] = 200;
  659. dst[s->start_x + s->w] = 200;
  660. }
  661. if (s->orientation == VERTICAL) {
  662. int h = s->mode == SEPARATE ? s->h / s->nb_display_channels : s->h;
  663. int hh = s->mode == SEPARATE ? -(s->h % s->nb_display_channels) + 1 : 1;
  664. for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
  665. for (y = 0; y < h; y += 20) {
  666. dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - hh) * s->outpicref->linesize[0];
  667. dst[s->start_x - 2] = 200;
  668. dst[s->start_x + s->w + 1] = 200;
  669. }
  670. for (y = 0; y < h; y += 40) {
  671. dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - hh) * s->outpicref->linesize[0];
  672. dst[s->start_x - 3] = 200;
  673. dst[s->start_x + s->w + 2] = 200;
  674. }
  675. dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x;
  676. for (x = 0; x < s->w; x+=40)
  677. dst[x] = 200;
  678. dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x;
  679. for (x = 0; x < s->w; x+=80)
  680. dst[x] = 200;
  681. dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x;
  682. for (x = 0; x < s->w; x+=40) {
  683. dst[x] = 200;
  684. }
  685. dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x;
  686. for (x = 0; x < s->w; x+=80) {
  687. dst[x] = 200;
  688. }
  689. for (y = 0; y < h; y += 40) {
  690. float range = s->stop ? s->stop - s->start : inlink->sample_rate / 2;
  691. float bin = s->fscale == F_LINEAR ? y : get_log_hz(y, h, inlink->sample_rate);
  692. float hertz = s->start + bin * range / (float)(1 << (int)ceil(log2(h)));
  693. char *units;
  694. if (hertz == 0)
  695. units = av_asprintf("DC");
  696. else
  697. units = av_asprintf("%.2f", hertz);
  698. if (!units)
  699. return AVERROR(ENOMEM);
  700. drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, h * (ch + 1) + s->start_y - y - 4 - hh, units, 0);
  701. av_free(units);
  702. }
  703. }
  704. for (x = 0; x < s->w && s->single_pic; x+=80) {
  705. float seconds = x * spp / inlink->sample_rate;
  706. char *units = get_time(ctx, seconds, x);
  707. if (!units)
  708. return AVERROR(ENOMEM);
  709. drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->h + s->start_y + 6, units, 0);
  710. drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->start_y - 12, units, 0);
  711. av_free(units);
  712. }
  713. drawtext(s->outpicref, outlink->w / 2 - 4 * 4, outlink->h - s->start_y / 2, "TIME", 0);
  714. drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 14 * 4, "FREQUENCY (Hz)", 1);
  715. } else {
  716. int w = s->mode == SEPARATE ? s->w / s->nb_display_channels : s->w;
  717. for (y = 0; y < s->h; y += 20) {
  718. dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
  719. dst[s->start_x - 2] = 200;
  720. dst[s->start_x + s->w + 1] = 200;
  721. }
  722. for (y = 0; y < s->h; y += 40) {
  723. dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
  724. dst[s->start_x - 3] = 200;
  725. dst[s->start_x + s->w + 2] = 200;
  726. }
  727. for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
  728. dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
  729. for (x = 0; x < w; x+=40)
  730. dst[x] = 200;
  731. dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x + w * ch;
  732. for (x = 0; x < w; x+=80)
  733. dst[x] = 200;
  734. dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x + w * ch;
  735. for (x = 0; x < w; x+=40) {
  736. dst[x] = 200;
  737. }
  738. dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
  739. for (x = 0; x < w; x+=80) {
  740. dst[x] = 200;
  741. }
  742. for (x = 0; x < w - 79; x += 80) {
  743. float range = s->stop ? s->stop - s->start : inlink->sample_rate / 2;
  744. float bin = s->fscale == F_LINEAR ? x : get_log_hz(x, w, inlink->sample_rate);
  745. float hertz = s->start + bin * range / (float)(1 << (int)ceil(log2(w)));
  746. char *units;
  747. if (hertz == 0)
  748. units = av_asprintf("DC");
  749. else
  750. units = av_asprintf("%.2f", hertz);
  751. if (!units)
  752. return AVERROR(ENOMEM);
  753. drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->start_y - 12, units, 0);
  754. drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->h + s->start_y + 6, units, 0);
  755. av_free(units);
  756. }
  757. }
  758. for (y = 0; y < s->h && s->single_pic; y+=40) {
  759. float seconds = y * spp / inlink->sample_rate;
  760. char *units = get_time(ctx, seconds, x);
  761. if (!units)
  762. return AVERROR(ENOMEM);
  763. drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, s->start_y + y - 4, units, 0);
  764. av_free(units);
  765. }
  766. drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 4 * 4, "TIME", 1);
  767. drawtext(s->outpicref, outlink->w / 2 - 14 * 4, outlink->h - s->start_y / 2, "FREQUENCY (Hz)", 0);
  768. }
  769. for (ch = 0; ch < (multi ? s->nb_display_channels : 1); ch++) {
  770. int h = multi ? s->h / s->nb_display_channels : s->h;
  771. for (y = 0; y < h; y++) {
  772. float out[3] = { 0., 127.5, 127.5};
  773. int chn;
  774. for (chn = 0; chn < (s->mode == SEPARATE ? 1 : s->nb_display_channels); chn++) {
  775. float yf, uf, vf;
  776. int channel = (multi) ? s->nb_display_channels - ch - 1 : chn;
  777. float lout[3];
  778. color_range(s, channel, &yf, &uf, &vf);
  779. pick_color(s, yf, uf, vf, y / (float)h, lout);
  780. out[0] += lout[0];
  781. out[1] += lout[1];
  782. out[2] += lout[2];
  783. }
  784. memset(s->outpicref->data[0]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0] + s->w + s->start_x + 20, av_clip_uint8(out[0]), 10);
  785. memset(s->outpicref->data[1]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[1] + s->w + s->start_x + 20, av_clip_uint8(out[1]), 10);
  786. memset(s->outpicref->data[2]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[2] + s->w + s->start_x + 20, av_clip_uint8(out[2]), 10);
  787. }
  788. for (y = 0; ch == 0 && y < h; y += h / 10) {
  789. float value = 120.f * log10f(1.f - y / (float)h);
  790. char *text;
  791. if (value < -120)
  792. break;
  793. text = av_asprintf("%.0f dB", value);
  794. if (!text)
  795. continue;
  796. drawtext(s->outpicref, s->w + s->start_x + 35, s->start_y + y - 5, text, 0);
  797. av_free(text);
  798. }
  799. }
  800. return 0;
  801. }
  802. static float get_value(AVFilterContext *ctx, int ch, int y)
  803. {
  804. ShowSpectrumContext *s = ctx->priv;
  805. float *magnitudes = s->magnitudes[ch];
  806. float *phases = s->phases[ch];
  807. float a;
  808. switch (s->data) {
  809. case D_MAGNITUDE:
  810. /* get magnitude */
  811. a = magnitudes[y];
  812. break;
  813. case D_PHASE:
  814. /* get phase */
  815. a = phases[y];
  816. break;
  817. default:
  818. av_assert0(0);
  819. }
  820. /* apply scale */
  821. switch (s->scale) {
  822. case LINEAR:
  823. a = av_clipf(a, 0, 1);
  824. break;
  825. case SQRT:
  826. a = av_clipf(sqrtf(a), 0, 1);
  827. break;
  828. case CBRT:
  829. a = av_clipf(cbrtf(a), 0, 1);
  830. break;
  831. case FOURTHRT:
  832. a = av_clipf(sqrtf(sqrtf(a)), 0, 1);
  833. break;
  834. case FIFTHRT:
  835. a = av_clipf(powf(a, 0.20), 0, 1);
  836. break;
  837. case LOG:
  838. a = 1.f + log10f(av_clipf(a, 1e-6, 1)) / 6.f; // zero = -120dBFS
  839. break;
  840. default:
  841. av_assert0(0);
  842. }
  843. return a;
  844. }
  845. static int plot_channel_lin(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  846. {
  847. ShowSpectrumContext *s = ctx->priv;
  848. const int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
  849. const int ch = jobnr;
  850. float yf, uf, vf;
  851. int y;
  852. /* decide color range */
  853. color_range(s, ch, &yf, &uf, &vf);
  854. /* draw the channel */
  855. for (y = 0; y < h; y++) {
  856. int row = (s->mode == COMBINED) ? y : ch * h + y;
  857. float *out = &s->color_buffer[ch][3 * row];
  858. float a = get_value(ctx, ch, y);
  859. pick_color(s, yf, uf, vf, a, out);
  860. }
  861. return 0;
  862. }
  863. static int plot_channel_log(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  864. {
  865. ShowSpectrumContext *s = ctx->priv;
  866. AVFilterLink *inlink = ctx->inputs[0];
  867. const int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
  868. const int ch = jobnr;
  869. float y, yf, uf, vf;
  870. int yy = 0;
  871. /* decide color range */
  872. color_range(s, ch, &yf, &uf, &vf);
  873. /* draw the channel */
  874. for (y = 0; y < h && yy < h; yy++) {
  875. float pos0 = bin_pos(yy+0, h, inlink->sample_rate);
  876. float pos1 = bin_pos(yy+1, h, inlink->sample_rate);
  877. float delta = pos1 - pos0;
  878. float a0, a1;
  879. a0 = get_value(ctx, ch, yy+0);
  880. a1 = get_value(ctx, ch, FFMIN(yy+1, h-1));
  881. for (float j = pos0; j < pos1 && y + j - pos0 < h; j++) {
  882. float row = (s->mode == COMBINED) ? y + j - pos0 : ch * h + y + j - pos0;
  883. float *out = &s->color_buffer[ch][3 * FFMIN(lrintf(row), h-1)];
  884. float lerpfrac = (j - pos0) / delta;
  885. pick_color(s, yf, uf, vf, lerpfrac * a1 + (1.f-lerpfrac) * a0, out);
  886. }
  887. y += delta;
  888. }
  889. return 0;
  890. }
  891. static int config_output(AVFilterLink *outlink)
  892. {
  893. AVFilterContext *ctx = outlink->src;
  894. AVFilterLink *inlink = ctx->inputs[0];
  895. ShowSpectrumContext *s = ctx->priv;
  896. int i, fft_bits, h, w;
  897. float overlap;
  898. switch (s->fscale) {
  899. case F_LINEAR: s->plot_channel = plot_channel_lin; break;
  900. case F_LOG: s->plot_channel = plot_channel_log; break;
  901. default: return AVERROR_BUG;
  902. }
  903. s->stop = FFMIN(s->stop, inlink->sample_rate / 2);
  904. if (s->stop && s->stop <= s->start) {
  905. av_log(ctx, AV_LOG_ERROR, "Stop frequency should be greater than start.\n");
  906. return AVERROR(EINVAL);
  907. }
  908. if (!strcmp(ctx->filter->name, "showspectrumpic"))
  909. s->single_pic = 1;
  910. outlink->w = s->w;
  911. outlink->h = s->h;
  912. outlink->sample_aspect_ratio = (AVRational){1,1};
  913. if (s->legend) {
  914. s->start_x = (log10(inlink->sample_rate) + 1) * 25;
  915. s->start_y = 64;
  916. outlink->w += s->start_x * 2;
  917. outlink->h += s->start_y * 2;
  918. }
  919. h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? s->h : s->h / inlink->channels;
  920. w = (s->mode == COMBINED || s->orientation == VERTICAL) ? s->w : s->w / inlink->channels;
  921. s->channel_height = h;
  922. s->channel_width = w;
  923. if (s->orientation == VERTICAL) {
  924. /* FFT window size (precision) according to the requested output frame height */
  925. for (fft_bits = 1; 1 << fft_bits < 2 * h; fft_bits++);
  926. } else {
  927. /* FFT window size (precision) according to the requested output frame width */
  928. for (fft_bits = 1; 1 << fft_bits < 2 * w; fft_bits++);
  929. }
  930. s->win_size = 1 << fft_bits;
  931. s->buf_size = s->win_size << !!s->stop;
  932. if (!s->fft) {
  933. s->fft = av_calloc(inlink->channels, sizeof(*s->fft));
  934. if (!s->fft)
  935. return AVERROR(ENOMEM);
  936. }
  937. if (s->stop) {
  938. if (!s->ifft) {
  939. s->ifft = av_calloc(inlink->channels, sizeof(*s->ifft));
  940. if (!s->ifft)
  941. return AVERROR(ENOMEM);
  942. }
  943. }
  944. /* (re-)configuration if the video output changed (or first init) */
  945. if (fft_bits != s->fft_bits) {
  946. AVFrame *outpicref;
  947. s->fft_bits = fft_bits;
  948. /* FFT buffers: x2 for each (display) channel buffer.
  949. * Note: we use free and malloc instead of a realloc-like function to
  950. * make sure the buffer is aligned in memory for the FFT functions. */
  951. for (i = 0; i < s->nb_display_channels; i++) {
  952. if (s->stop) {
  953. av_fft_end(s->ifft[i]);
  954. av_freep(&s->fft_scratch[i]);
  955. }
  956. av_fft_end(s->fft[i]);
  957. av_freep(&s->fft_data[i]);
  958. }
  959. av_freep(&s->fft_data);
  960. s->nb_display_channels = inlink->channels;
  961. for (i = 0; i < s->nb_display_channels; i++) {
  962. s->fft[i] = av_fft_init(fft_bits + !!s->stop, 0);
  963. if (s->stop) {
  964. s->ifft[i] = av_fft_init(fft_bits + !!s->stop, 1);
  965. if (!s->ifft[i]) {
  966. av_log(ctx, AV_LOG_ERROR, "Unable to create Inverse FFT context. "
  967. "The window size might be too high.\n");
  968. return AVERROR(EINVAL);
  969. }
  970. }
  971. if (!s->fft[i]) {
  972. av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
  973. "The window size might be too high.\n");
  974. return AVERROR(EINVAL);
  975. }
  976. }
  977. s->magnitudes = av_calloc(s->nb_display_channels, sizeof(*s->magnitudes));
  978. if (!s->magnitudes)
  979. return AVERROR(ENOMEM);
  980. for (i = 0; i < s->nb_display_channels; i++) {
  981. s->magnitudes[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->magnitudes));
  982. if (!s->magnitudes[i])
  983. return AVERROR(ENOMEM);
  984. }
  985. s->phases = av_calloc(s->nb_display_channels, sizeof(*s->phases));
  986. if (!s->phases)
  987. return AVERROR(ENOMEM);
  988. for (i = 0; i < s->nb_display_channels; i++) {
  989. s->phases[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->phases));
  990. if (!s->phases[i])
  991. return AVERROR(ENOMEM);
  992. }
  993. av_freep(&s->color_buffer);
  994. s->color_buffer = av_calloc(s->nb_display_channels, sizeof(*s->color_buffer));
  995. if (!s->color_buffer)
  996. return AVERROR(ENOMEM);
  997. for (i = 0; i < s->nb_display_channels; i++) {
  998. s->color_buffer[i] = av_calloc(s->orientation == VERTICAL ? s->h * 3 : s->w * 3, sizeof(**s->color_buffer));
  999. if (!s->color_buffer[i])
  1000. return AVERROR(ENOMEM);
  1001. }
  1002. s->fft_data = av_calloc(s->nb_display_channels, sizeof(*s->fft_data));
  1003. if (!s->fft_data)
  1004. return AVERROR(ENOMEM);
  1005. s->fft_scratch = av_calloc(s->nb_display_channels, sizeof(*s->fft_scratch));
  1006. if (!s->fft_scratch)
  1007. return AVERROR(ENOMEM);
  1008. for (i = 0; i < s->nb_display_channels; i++) {
  1009. s->fft_data[i] = av_calloc(s->buf_size, sizeof(**s->fft_data));
  1010. if (!s->fft_data[i])
  1011. return AVERROR(ENOMEM);
  1012. s->fft_scratch[i] = av_calloc(s->buf_size, sizeof(**s->fft_scratch));
  1013. if (!s->fft_scratch[i])
  1014. return AVERROR(ENOMEM);
  1015. }
  1016. /* pre-calc windowing function */
  1017. s->window_func_lut =
  1018. av_realloc_f(s->window_func_lut, s->win_size,
  1019. sizeof(*s->window_func_lut));
  1020. if (!s->window_func_lut)
  1021. return AVERROR(ENOMEM);
  1022. generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
  1023. if (s->overlap == 1)
  1024. s->overlap = overlap;
  1025. s->hop_size = (1.f - s->overlap) * s->win_size;
  1026. if (s->hop_size < 1) {
  1027. av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
  1028. return AVERROR(EINVAL);
  1029. }
  1030. for (s->win_scale = 0, i = 0; i < s->win_size; i++) {
  1031. s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
  1032. }
  1033. s->win_scale = 1.f / sqrtf(s->win_scale);
  1034. /* prepare the initial picref buffer (black frame) */
  1035. av_frame_free(&s->outpicref);
  1036. s->outpicref = outpicref =
  1037. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  1038. if (!outpicref)
  1039. return AVERROR(ENOMEM);
  1040. outpicref->sample_aspect_ratio = (AVRational){1,1};
  1041. for (i = 0; i < outlink->h; i++) {
  1042. memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
  1043. memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
  1044. memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
  1045. }
  1046. outpicref->color_range = AVCOL_RANGE_JPEG;
  1047. if (!s->single_pic && s->legend)
  1048. draw_legend(ctx, 0);
  1049. }
  1050. if ((s->orientation == VERTICAL && s->xpos >= s->w) ||
  1051. (s->orientation == HORIZONTAL && s->xpos >= s->h))
  1052. s->xpos = 0;
  1053. s->auto_frame_rate = av_make_q(inlink->sample_rate, s->hop_size);
  1054. if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
  1055. s->auto_frame_rate.den *= s->w;
  1056. if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
  1057. s->auto_frame_rate.den *= s->h;
  1058. if (!s->single_pic && strcmp(s->rate_str, "auto")) {
  1059. int ret = av_parse_video_rate(&s->frame_rate, s->rate_str);
  1060. if (ret < 0)
  1061. return ret;
  1062. } else {
  1063. s->frame_rate = s->auto_frame_rate;
  1064. }
  1065. outlink->frame_rate = s->frame_rate;
  1066. outlink->time_base = av_inv_q(outlink->frame_rate);
  1067. if (s->orientation == VERTICAL) {
  1068. s->combine_buffer =
  1069. av_realloc_f(s->combine_buffer, s->h * 3,
  1070. sizeof(*s->combine_buffer));
  1071. } else {
  1072. s->combine_buffer =
  1073. av_realloc_f(s->combine_buffer, s->w * 3,
  1074. sizeof(*s->combine_buffer));
  1075. }
  1076. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d FFT window size:%d\n",
  1077. s->w, s->h, s->win_size);
  1078. av_audio_fifo_free(s->fifo);
  1079. s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
  1080. if (!s->fifo)
  1081. return AVERROR(ENOMEM);
  1082. return 0;
  1083. }
  1084. #define RE(y, ch) s->fft_data[ch][y].re
  1085. #define IM(y, ch) s->fft_data[ch][y].im
  1086. #define MAGNITUDE(y, ch) hypotf(RE(y, ch), IM(y, ch))
  1087. #define PHASE(y, ch) atan2f(IM(y, ch), RE(y, ch))
  1088. static int calc_channel_magnitudes(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  1089. {
  1090. ShowSpectrumContext *s = ctx->priv;
  1091. const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
  1092. int y, h = s->orientation == VERTICAL ? s->h : s->w;
  1093. const float f = s->gain * w;
  1094. const int ch = jobnr;
  1095. float *magnitudes = s->magnitudes[ch];
  1096. for (y = 0; y < h; y++)
  1097. magnitudes[y] = MAGNITUDE(y, ch) * f;
  1098. return 0;
  1099. }
  1100. static int calc_channel_phases(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  1101. {
  1102. ShowSpectrumContext *s = ctx->priv;
  1103. const int h = s->orientation == VERTICAL ? s->h : s->w;
  1104. const int ch = jobnr;
  1105. float *phases = s->phases[ch];
  1106. int y;
  1107. for (y = 0; y < h; y++)
  1108. phases[y] = (PHASE(y, ch) / M_PI + 1) / 2;
  1109. return 0;
  1110. }
  1111. static void acalc_magnitudes(ShowSpectrumContext *s)
  1112. {
  1113. const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
  1114. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  1115. const float f = s->gain * w;
  1116. for (ch = 0; ch < s->nb_display_channels; ch++) {
  1117. float *magnitudes = s->magnitudes[ch];
  1118. for (y = 0; y < h; y++)
  1119. magnitudes[y] += MAGNITUDE(y, ch) * f;
  1120. }
  1121. }
  1122. static void scale_magnitudes(ShowSpectrumContext *s, float scale)
  1123. {
  1124. int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
  1125. for (ch = 0; ch < s->nb_display_channels; ch++) {
  1126. float *magnitudes = s->magnitudes[ch];
  1127. for (y = 0; y < h; y++)
  1128. magnitudes[y] *= scale;
  1129. }
  1130. }
  1131. static void clear_combine_buffer(ShowSpectrumContext *s, int size)
  1132. {
  1133. int y;
  1134. for (y = 0; y < size; y++) {
  1135. s->combine_buffer[3 * y ] = 0;
  1136. s->combine_buffer[3 * y + 1] = 127.5;
  1137. s->combine_buffer[3 * y + 2] = 127.5;
  1138. }
  1139. }
  1140. static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
  1141. {
  1142. AVFilterContext *ctx = inlink->dst;
  1143. AVFilterLink *outlink = ctx->outputs[0];
  1144. ShowSpectrumContext *s = ctx->priv;
  1145. AVFrame *outpicref = s->outpicref;
  1146. int ret, plane, x, y, z = s->orientation == VERTICAL ? s->h : s->w;
  1147. /* fill a new spectrum column */
  1148. /* initialize buffer for combining to black */
  1149. clear_combine_buffer(s, z);
  1150. ctx->internal->execute(ctx, s->plot_channel, NULL, NULL, s->nb_display_channels);
  1151. for (y = 0; y < z * 3; y++) {
  1152. for (x = 0; x < s->nb_display_channels; x++) {
  1153. s->combine_buffer[y] += s->color_buffer[x][y];
  1154. }
  1155. }
  1156. av_frame_make_writable(s->outpicref);
  1157. /* copy to output */
  1158. if (s->orientation == VERTICAL) {
  1159. if (s->sliding == SCROLL) {
  1160. for (plane = 0; plane < 3; plane++) {
  1161. for (y = 0; y < s->h; y++) {
  1162. uint8_t *p = outpicref->data[plane] + s->start_x +
  1163. (y + s->start_y) * outpicref->linesize[plane];
  1164. memmove(p, p + 1, s->w - 1);
  1165. }
  1166. }
  1167. s->xpos = s->w - 1;
  1168. } else if (s->sliding == RSCROLL) {
  1169. for (plane = 0; plane < 3; plane++) {
  1170. for (y = 0; y < s->h; y++) {
  1171. uint8_t *p = outpicref->data[plane] + s->start_x +
  1172. (y + s->start_y) * outpicref->linesize[plane];
  1173. memmove(p + 1, p, s->w - 1);
  1174. }
  1175. }
  1176. s->xpos = 0;
  1177. }
  1178. for (plane = 0; plane < 3; plane++) {
  1179. uint8_t *p = outpicref->data[plane] + s->start_x +
  1180. (outlink->h - 1 - s->start_y) * outpicref->linesize[plane] +
  1181. s->xpos;
  1182. for (y = 0; y < s->h; y++) {
  1183. *p = lrintf(av_clipf(s->combine_buffer[3 * y + plane], 0, 255));
  1184. p -= outpicref->linesize[plane];
  1185. }
  1186. }
  1187. } else {
  1188. if (s->sliding == SCROLL) {
  1189. for (plane = 0; plane < 3; plane++) {
  1190. for (y = 1; y < s->h; y++) {
  1191. memmove(outpicref->data[plane] + (y-1 + s->start_y) * outpicref->linesize[plane] + s->start_x,
  1192. outpicref->data[plane] + (y + s->start_y) * outpicref->linesize[plane] + s->start_x,
  1193. s->w);
  1194. }
  1195. }
  1196. s->xpos = s->h - 1;
  1197. } else if (s->sliding == RSCROLL) {
  1198. for (plane = 0; plane < 3; plane++) {
  1199. for (y = s->h - 1; y >= 1; y--) {
  1200. memmove(outpicref->data[plane] + (y + s->start_y) * outpicref->linesize[plane] + s->start_x,
  1201. outpicref->data[plane] + (y-1 + s->start_y) * outpicref->linesize[plane] + s->start_x,
  1202. s->w);
  1203. }
  1204. }
  1205. s->xpos = 0;
  1206. }
  1207. for (plane = 0; plane < 3; plane++) {
  1208. uint8_t *p = outpicref->data[plane] + s->start_x +
  1209. (s->xpos + s->start_y) * outpicref->linesize[plane];
  1210. for (x = 0; x < s->w; x++) {
  1211. *p = lrintf(av_clipf(s->combine_buffer[3 * x + plane], 0, 255));
  1212. p++;
  1213. }
  1214. }
  1215. }
  1216. if (s->sliding != FULLFRAME || s->xpos == 0)
  1217. outpicref->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base);
  1218. s->xpos++;
  1219. if (s->orientation == VERTICAL && s->xpos >= s->w)
  1220. s->xpos = 0;
  1221. if (s->orientation == HORIZONTAL && s->xpos >= s->h)
  1222. s->xpos = 0;
  1223. if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) {
  1224. if (s->old_pts < outpicref->pts) {
  1225. AVFrame *clone;
  1226. if (s->legend) {
  1227. char *units = get_time(ctx, insamples->pts /(float)inlink->sample_rate, x);
  1228. if (!units)
  1229. return AVERROR(ENOMEM);
  1230. if (s->orientation == VERTICAL) {
  1231. for (y = 0; y < 10; y++) {
  1232. memset(s->outpicref->data[0] + outlink->w / 2 - 4 * s->old_len +
  1233. (outlink->h - s->start_y / 2 - 20 + y) * s->outpicref->linesize[0], 0, 10 * s->old_len);
  1234. }
  1235. drawtext(s->outpicref,
  1236. outlink->w / 2 - 4 * strlen(units),
  1237. outlink->h - s->start_y / 2 - 20,
  1238. units, 0);
  1239. } else {
  1240. for (y = 0; y < 10 * s->old_len; y++) {
  1241. memset(s->outpicref->data[0] + s->start_x / 7 + 20 +
  1242. (outlink->h / 2 - 4 * s->old_len + y) * s->outpicref->linesize[0], 0, 10);
  1243. }
  1244. drawtext(s->outpicref,
  1245. s->start_x / 7 + 20,
  1246. outlink->h / 2 - 4 * strlen(units),
  1247. units, 1);
  1248. }
  1249. s->old_len = strlen(units);
  1250. av_free(units);
  1251. }
  1252. s->old_pts = outpicref->pts;
  1253. clone = av_frame_clone(s->outpicref);
  1254. if (!clone)
  1255. return AVERROR(ENOMEM);
  1256. ret = ff_filter_frame(outlink, clone);
  1257. if (ret < 0)
  1258. return ret;
  1259. return 0;
  1260. }
  1261. }
  1262. return 1;
  1263. }
  1264. #if CONFIG_SHOWSPECTRUM_FILTER
  1265. static int activate(AVFilterContext *ctx)
  1266. {
  1267. AVFilterLink *inlink = ctx->inputs[0];
  1268. AVFilterLink *outlink = ctx->outputs[0];
  1269. ShowSpectrumContext *s = ctx->priv;
  1270. int ret;
  1271. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  1272. if (av_audio_fifo_size(s->fifo) < s->win_size) {
  1273. AVFrame *frame = NULL;
  1274. ret = ff_inlink_consume_frame(inlink, &frame);
  1275. if (ret < 0)
  1276. return ret;
  1277. if (ret > 0) {
  1278. s->pts = frame->pts;
  1279. s->consumed = 0;
  1280. av_audio_fifo_write(s->fifo, (void **)frame->extended_data, frame->nb_samples);
  1281. av_frame_free(&frame);
  1282. }
  1283. }
  1284. if (s->outpicref && av_audio_fifo_size(s->fifo) >= s->win_size) {
  1285. AVFrame *fin = ff_get_audio_buffer(inlink, s->win_size);
  1286. if (!fin)
  1287. return AVERROR(ENOMEM);
  1288. fin->pts = s->pts + s->consumed;
  1289. s->consumed += s->hop_size;
  1290. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data,
  1291. FFMIN(s->win_size, av_audio_fifo_size(s->fifo)));
  1292. if (ret < 0) {
  1293. av_frame_free(&fin);
  1294. return ret;
  1295. }
  1296. av_assert0(fin->nb_samples == s->win_size);
  1297. ctx->internal->execute(ctx, run_channel_fft, fin, NULL, s->nb_display_channels);
  1298. if (s->data == D_MAGNITUDE)
  1299. ctx->internal->execute(ctx, calc_channel_magnitudes, NULL, NULL, s->nb_display_channels);
  1300. if (s->data == D_PHASE)
  1301. ctx->internal->execute(ctx, calc_channel_phases, NULL, NULL, s->nb_display_channels);
  1302. ret = plot_spectrum_column(inlink, fin);
  1303. av_frame_free(&fin);
  1304. av_audio_fifo_drain(s->fifo, s->hop_size);
  1305. if (ret <= 0)
  1306. return ret;
  1307. }
  1308. if (ff_outlink_get_status(inlink) == AVERROR_EOF &&
  1309. s->sliding == FULLFRAME &&
  1310. s->xpos > 0 && s->outpicref) {
  1311. int64_t pts;
  1312. if (s->orientation == VERTICAL) {
  1313. for (int i = 0; i < outlink->h; i++) {
  1314. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
  1315. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
  1316. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
  1317. }
  1318. } else {
  1319. for (int i = s->xpos; i < outlink->h; i++) {
  1320. memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
  1321. memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
  1322. memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
  1323. }
  1324. }
  1325. s->outpicref->pts += s->consumed;
  1326. pts = s->outpicref->pts;
  1327. ret = ff_filter_frame(outlink, s->outpicref);
  1328. s->outpicref = NULL;
  1329. ff_outlink_set_status(outlink, AVERROR_EOF, pts);
  1330. return 0;
  1331. }
  1332. FF_FILTER_FORWARD_STATUS(inlink, outlink);
  1333. if (ff_outlink_frame_wanted(outlink) && av_audio_fifo_size(s->fifo) < s->win_size) {
  1334. ff_inlink_request_frame(inlink);
  1335. return 0;
  1336. }
  1337. if (av_audio_fifo_size(s->fifo) >= s->win_size) {
  1338. ff_filter_set_ready(ctx, 10);
  1339. return 0;
  1340. }
  1341. return FFERROR_NOT_READY;
  1342. }
  1343. static const AVFilterPad showspectrum_inputs[] = {
  1344. {
  1345. .name = "default",
  1346. .type = AVMEDIA_TYPE_AUDIO,
  1347. },
  1348. { NULL }
  1349. };
  1350. static const AVFilterPad showspectrum_outputs[] = {
  1351. {
  1352. .name = "default",
  1353. .type = AVMEDIA_TYPE_VIDEO,
  1354. .config_props = config_output,
  1355. },
  1356. { NULL }
  1357. };
  1358. AVFilter ff_avf_showspectrum = {
  1359. .name = "showspectrum",
  1360. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
  1361. .uninit = uninit,
  1362. .query_formats = query_formats,
  1363. .priv_size = sizeof(ShowSpectrumContext),
  1364. .inputs = showspectrum_inputs,
  1365. .outputs = showspectrum_outputs,
  1366. .activate = activate,
  1367. .priv_class = &showspectrum_class,
  1368. .flags = AVFILTER_FLAG_SLICE_THREADS,
  1369. };
  1370. #endif // CONFIG_SHOWSPECTRUM_FILTER
  1371. #if CONFIG_SHOWSPECTRUMPIC_FILTER
  1372. static const AVOption showspectrumpic_options[] = {
  1373. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  1374. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
  1375. { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_MODES-1, FLAGS, "mode" },
  1376. { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
  1377. { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
  1378. { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=INTENSITY}, 0, NB_CLMODES-1, FLAGS, "color" },
  1379. { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
  1380. { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
  1381. { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
  1382. { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
  1383. { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
  1384. { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
  1385. { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
  1386. { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
  1387. { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
  1388. { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" },
  1389. { "green", "green based coloring", 0, AV_OPT_TYPE_CONST, {.i64=GREEN}, 0, 0, FLAGS, "color" },
  1390. { "viridis", "viridis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=VIRIDIS}, 0, 0, FLAGS, "color" },
  1391. { "plasma", "plasma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=PLASMA}, 0, 0, FLAGS, "color" },
  1392. { "cividis", "cividis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=CIVIDIS}, 0, 0, FLAGS, "color" },
  1393. { "terrain", "terrain based coloring", 0, AV_OPT_TYPE_CONST, {.i64=TERRAIN}, 0, 0, FLAGS, "color" },
  1394. { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" },
  1395. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
  1396. { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
  1397. { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
  1398. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
  1399. { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
  1400. { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
  1401. { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=F_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
  1402. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=F_LINEAR}, 0, 0, FLAGS, "fscale" },
  1403. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=F_LOG}, 0, 0, FLAGS, "fscale" },
  1404. { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
  1405. { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
  1406. { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
  1407. { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
  1408. { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  1409. { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
  1410. { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
  1411. { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
  1412. { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
  1413. { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
  1414. { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
  1415. { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
  1416. { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
  1417. { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
  1418. { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
  1419. { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
  1420. { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
  1421. { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
  1422. { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
  1423. { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
  1424. { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
  1425. { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
  1426. { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" },
  1427. { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
  1428. { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
  1429. { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
  1430. { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
  1431. { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
  1432. { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
  1433. { "start", "start frequency", OFFSET(start), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
  1434. { "stop", "stop frequency", OFFSET(stop), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
  1435. { NULL }
  1436. };
  1437. AVFILTER_DEFINE_CLASS(showspectrumpic);
  1438. static int showspectrumpic_request_frame(AVFilterLink *outlink)
  1439. {
  1440. AVFilterContext *ctx = outlink->src;
  1441. ShowSpectrumContext *s = ctx->priv;
  1442. AVFilterLink *inlink = ctx->inputs[0];
  1443. int ret, samples;
  1444. ret = ff_request_frame(inlink);
  1445. samples = av_audio_fifo_size(s->fifo);
  1446. if (ret == AVERROR_EOF && s->outpicref && samples > 0) {
  1447. int consumed = 0;
  1448. int x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
  1449. int ch, spf, spb;
  1450. AVFrame *fin;
  1451. spf = s->win_size * (samples / ((s->win_size * sz) * ceil(samples / (float)(s->win_size * sz))));
  1452. spf = FFMAX(1, spf);
  1453. spb = (samples / (spf * sz)) * spf;
  1454. fin = ff_get_audio_buffer(inlink, s->win_size);
  1455. if (!fin)
  1456. return AVERROR(ENOMEM);
  1457. while (x < sz) {
  1458. ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
  1459. if (ret < 0) {
  1460. av_frame_free(&fin);
  1461. return ret;
  1462. }
  1463. av_audio_fifo_drain(s->fifo, spf);
  1464. if (ret < s->win_size) {
  1465. for (ch = 0; ch < s->nb_display_channels; ch++) {
  1466. memset(fin->extended_data[ch] + ret * sizeof(float), 0,
  1467. (s->win_size - ret) * sizeof(float));
  1468. }
  1469. }
  1470. ctx->internal->execute(ctx, run_channel_fft, fin, NULL, s->nb_display_channels);
  1471. acalc_magnitudes(s);
  1472. consumed += spf;
  1473. if (consumed >= spb) {
  1474. int h = s->orientation == VERTICAL ? s->h : s->w;
  1475. scale_magnitudes(s, 1.f / (consumed / spf));
  1476. plot_spectrum_column(inlink, fin);
  1477. consumed = 0;
  1478. x++;
  1479. for (ch = 0; ch < s->nb_display_channels; ch++)
  1480. memset(s->magnitudes[ch], 0, h * sizeof(float));
  1481. }
  1482. }
  1483. av_frame_free(&fin);
  1484. s->outpicref->pts = 0;
  1485. if (s->legend)
  1486. draw_legend(ctx, samples);
  1487. ret = ff_filter_frame(outlink, s->outpicref);
  1488. s->outpicref = NULL;
  1489. }
  1490. return ret;
  1491. }
  1492. static int showspectrumpic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  1493. {
  1494. AVFilterContext *ctx = inlink->dst;
  1495. ShowSpectrumContext *s = ctx->priv;
  1496. int ret;
  1497. ret = av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
  1498. av_frame_free(&insamples);
  1499. return ret;
  1500. }
  1501. static const AVFilterPad showspectrumpic_inputs[] = {
  1502. {
  1503. .name = "default",
  1504. .type = AVMEDIA_TYPE_AUDIO,
  1505. .filter_frame = showspectrumpic_filter_frame,
  1506. },
  1507. { NULL }
  1508. };
  1509. static const AVFilterPad showspectrumpic_outputs[] = {
  1510. {
  1511. .name = "default",
  1512. .type = AVMEDIA_TYPE_VIDEO,
  1513. .config_props = config_output,
  1514. .request_frame = showspectrumpic_request_frame,
  1515. },
  1516. { NULL }
  1517. };
  1518. AVFilter ff_avf_showspectrumpic = {
  1519. .name = "showspectrumpic",
  1520. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output single picture."),
  1521. .uninit = uninit,
  1522. .query_formats = query_formats,
  1523. .priv_size = sizeof(ShowSpectrumContext),
  1524. .inputs = showspectrumpic_inputs,
  1525. .outputs = showspectrumpic_outputs,
  1526. .priv_class = &showspectrumpic_class,
  1527. .flags = AVFILTER_FLAG_SLICE_THREADS,
  1528. };
  1529. #endif // CONFIG_SHOWSPECTRUMPIC_FILTER