You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

841 lines
31KB

  1. /*
  2. * Copyright (c) 2014 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "config.h"
  21. #include "libavcodec/avfft.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/channel_layout.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/xga_font_data.h"
  26. #include "libavutil/qsort.h"
  27. #include "libavutil/time.h"
  28. #include "libavutil/eval.h"
  29. #include "avfilter.h"
  30. #include "internal.h"
  31. #include <math.h>
  32. #include <stdlib.h>
  33. #if CONFIG_LIBFREETYPE
  34. #include <ft2build.h>
  35. #include FT_FREETYPE_H
  36. #endif
  37. /* this filter is designed to do 16 bins/semitones constant Q transform with Brown-Puckette algorithm
  38. * start from E0 to D#10 (10 octaves)
  39. * so there are 16 bins/semitones * 12 semitones/octaves * 10 octaves = 1920 bins
  40. * match with full HD resolution */
  41. #define VIDEO_WIDTH 1920
  42. #define VIDEO_HEIGHT 1080
  43. #define FONT_HEIGHT 32
  44. #define SPECTOGRAM_HEIGHT ((VIDEO_HEIGHT-FONT_HEIGHT)/2)
  45. #define SPECTOGRAM_START (VIDEO_HEIGHT-SPECTOGRAM_HEIGHT)
  46. #define BASE_FREQ 20.051392800492
  47. #define COEFF_CLAMP 1.0e-4
  48. #define TLENGTH_MIN 0.001
  49. #define TLENGTH_DEFAULT "384/f*tc/(384/f+tc)"
  50. #define VOLUME_MIN 1e-10
  51. #define VOLUME_MAX 100.0
  52. #define FONTCOLOR_DEFAULT "st(0, (midi(f)-59.5)/12);" \
  53. "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
  54. "r(1-ld(1)) + b(ld(1))"
  55. typedef struct {
  56. FFTSample value;
  57. int index;
  58. } SparseCoeff;
  59. typedef struct {
  60. const AVClass *class;
  61. AVFrame *outpicref;
  62. FFTContext *fft_context;
  63. FFTComplex *fft_data;
  64. FFTComplex *fft_result_left;
  65. FFTComplex *fft_result_right;
  66. uint8_t *spectogram;
  67. SparseCoeff *coeff_sort;
  68. SparseCoeff *coeffs[VIDEO_WIDTH];
  69. uint8_t *font_alpha;
  70. char *fontfile; /* using freetype */
  71. int coeffs_len[VIDEO_WIDTH];
  72. uint8_t fontcolor_value[VIDEO_WIDTH*3]; /* result of fontcolor option */
  73. int64_t frame_count;
  74. int spectogram_count;
  75. int spectogram_index;
  76. int fft_bits;
  77. int req_fullfilled;
  78. int remaining_fill;
  79. char *tlength;
  80. char *volume;
  81. char *fontcolor;
  82. double timeclamp; /* lower timeclamp, time-accurate, higher timeclamp, freq-accurate (at low freq)*/
  83. float coeffclamp; /* lower coeffclamp, more precise, higher coeffclamp, faster */
  84. int fullhd; /* if true, output video is at full HD resolution, otherwise it will be halved */
  85. float gamma; /* lower gamma, more contrast, higher gamma, more range */
  86. float gamma2; /* gamma of bargraph */
  87. int fps; /* the required fps is so strict, so it's enough to be int, but 24000/1001 etc cannot be encoded */
  88. int count; /* fps * count = transform rate */
  89. } ShowCQTContext;
  90. #define OFFSET(x) offsetof(ShowCQTContext, x)
  91. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  92. static const AVOption showcqt_options[] = {
  93. { "volume", "set volume", OFFSET(volume), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
  94. { "tlength", "set transform length", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
  95. { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
  96. { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 0.1, 10, FLAGS },
  97. { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 3 }, 1, 7, FLAGS },
  98. { "gamma2", "set gamma of bargraph", OFFSET(gamma2), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 1, 7, FLAGS },
  99. { "fullhd", "set full HD resolution", OFFSET(fullhd), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
  100. { "fps", "set video fps", OFFSET(fps), AV_OPT_TYPE_INT, { .i64 = 25 }, 10, 100, FLAGS },
  101. { "count", "set number of transform per frame", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
  102. { "fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
  103. { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
  104. { NULL }
  105. };
  106. AVFILTER_DEFINE_CLASS(showcqt);
  107. static av_cold void uninit(AVFilterContext *ctx)
  108. {
  109. int k;
  110. ShowCQTContext *s = ctx->priv;
  111. av_fft_end(s->fft_context);
  112. s->fft_context = NULL;
  113. for (k = 0; k < VIDEO_WIDTH; k++)
  114. av_freep(&s->coeffs[k]);
  115. av_freep(&s->fft_data);
  116. av_freep(&s->fft_result_left);
  117. av_freep(&s->fft_result_right);
  118. av_freep(&s->coeff_sort);
  119. av_freep(&s->spectogram);
  120. av_freep(&s->font_alpha);
  121. av_frame_free(&s->outpicref);
  122. }
  123. static int query_formats(AVFilterContext *ctx)
  124. {
  125. AVFilterFormats *formats = NULL;
  126. AVFilterChannelLayouts *layouts = NULL;
  127. AVFilterLink *inlink = ctx->inputs[0];
  128. AVFilterLink *outlink = ctx->outputs[0];
  129. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
  130. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
  131. static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
  132. static const int samplerates[] = { 44100, 48000, -1 };
  133. /* set input audio formats */
  134. formats = ff_make_format_list(sample_fmts);
  135. if (!formats)
  136. return AVERROR(ENOMEM);
  137. ff_formats_ref(formats, &inlink->out_formats);
  138. layouts = avfilter_make_format64_list(channel_layouts);
  139. if (!layouts)
  140. return AVERROR(ENOMEM);
  141. ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  142. formats = ff_make_format_list(samplerates);
  143. if (!formats)
  144. return AVERROR(ENOMEM);
  145. ff_formats_ref(formats, &inlink->out_samplerates);
  146. /* set output video format */
  147. formats = ff_make_format_list(pix_fmts);
  148. if (!formats)
  149. return AVERROR(ENOMEM);
  150. ff_formats_ref(formats, &outlink->in_formats);
  151. return 0;
  152. }
  153. #if CONFIG_LIBFREETYPE
  154. static void load_freetype_font(AVFilterContext *ctx)
  155. {
  156. static const char str[] = "EF G A BC D ";
  157. ShowCQTContext *s = ctx->priv;
  158. FT_Library lib = NULL;
  159. FT_Face face = NULL;
  160. int video_scale = s->fullhd ? 2 : 1;
  161. int video_width = (VIDEO_WIDTH/2) * video_scale;
  162. int font_height = (FONT_HEIGHT/2) * video_scale;
  163. int font_width = 8 * video_scale;
  164. int font_repeat = font_width * 12;
  165. int linear_hori_advance = font_width * 65536;
  166. int non_monospace_warning = 0;
  167. int x;
  168. s->font_alpha = NULL;
  169. if (!s->fontfile)
  170. return;
  171. if (FT_Init_FreeType(&lib))
  172. goto fail;
  173. if (FT_New_Face(lib, s->fontfile, 0, &face))
  174. goto fail;
  175. if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
  176. goto fail;
  177. if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
  178. goto fail;
  179. if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
  180. goto fail;
  181. s->font_alpha = av_malloc_array(font_height, video_width);
  182. if (!s->font_alpha)
  183. goto fail;
  184. memset(s->font_alpha, 0, font_height * video_width);
  185. for (x = 0; x < 12; x++) {
  186. int sx, sy, rx, bx, by, dx, dy;
  187. if (str[x] == ' ')
  188. continue;
  189. if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
  190. goto fail;
  191. if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
  192. av_log(ctx, AV_LOG_WARNING, "Font is not monospace\n");
  193. non_monospace_warning = 1;
  194. }
  195. sy = font_height - 4*video_scale - face->glyph->bitmap_top;
  196. for (rx = 0; rx < 10; rx++) {
  197. sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
  198. for (by = 0; by < face->glyph->bitmap.rows; by++) {
  199. dy = by + sy;
  200. if (dy < 0)
  201. continue;
  202. if (dy >= font_height)
  203. break;
  204. for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
  205. dx = bx + sx;
  206. if (dx < 0)
  207. continue;
  208. if (dx >= video_width)
  209. break;
  210. s->font_alpha[dy*video_width+dx] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
  211. }
  212. }
  213. }
  214. }
  215. FT_Done_Face(face);
  216. FT_Done_FreeType(lib);
  217. return;
  218. fail:
  219. av_log(ctx, AV_LOG_WARNING, "Error while loading freetype font, using default font instead\n");
  220. FT_Done_Face(face);
  221. FT_Done_FreeType(lib);
  222. av_freep(&s->font_alpha);
  223. return;
  224. }
  225. #endif
  226. static double a_weighting(void *p, double f)
  227. {
  228. double ret = 12200.0*12200.0 * (f*f*f*f);
  229. ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
  230. sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
  231. return ret;
  232. }
  233. static double b_weighting(void *p, double f)
  234. {
  235. double ret = 12200.0*12200.0 * (f*f*f);
  236. ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
  237. return ret;
  238. }
  239. static double c_weighting(void *p, double f)
  240. {
  241. double ret = 12200.0*12200.0 * (f*f);
  242. ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
  243. return ret;
  244. }
  245. static double midi(void *p, double f)
  246. {
  247. return log2(f/440.0) * 12.0 + 69.0;
  248. }
  249. static double r_func(void *p, double x)
  250. {
  251. x = av_clipd(x, 0.0, 1.0);
  252. return (int)(x*255.0+0.5) << 16;
  253. }
  254. static double g_func(void *p, double x)
  255. {
  256. x = av_clipd(x, 0.0, 1.0);
  257. return (int)(x*255.0+0.5) << 8;
  258. }
  259. static double b_func(void *p, double x)
  260. {
  261. x = av_clipd(x, 0.0, 1.0);
  262. return (int)(x*255.0+0.5);
  263. }
  264. static inline int qsort_sparsecoeff(const SparseCoeff *a, const SparseCoeff *b)
  265. {
  266. if (fabsf(a->value) >= fabsf(b->value))
  267. return 1;
  268. else
  269. return -1;
  270. }
  271. static int config_output(AVFilterLink *outlink)
  272. {
  273. AVFilterContext *ctx = outlink->src;
  274. AVFilterLink *inlink = ctx->inputs[0];
  275. ShowCQTContext *s = ctx->priv;
  276. AVExpr *tlength_expr = NULL, *volume_expr = NULL, *fontcolor_expr = NULL;
  277. uint8_t *fontcolor_value = s->fontcolor_value;
  278. static const char * const expr_vars[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
  279. static const char * const expr_func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
  280. static const char * const expr_fontcolor_func_names[] = { "midi", "r", "g", "b", NULL };
  281. static double (* const expr_funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting, NULL };
  282. static double (* const expr_fontcolor_funcs[])(void *, double) = { midi, r_func, g_func, b_func, NULL };
  283. int fft_len, k, x, y, ret;
  284. int num_coeffs = 0;
  285. int rate = inlink->sample_rate;
  286. double max_len = rate * (double) s->timeclamp;
  287. int64_t start_time, end_time;
  288. int video_scale = s->fullhd ? 2 : 1;
  289. int video_width = (VIDEO_WIDTH/2) * video_scale;
  290. int video_height = (VIDEO_HEIGHT/2) * video_scale;
  291. int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  292. s->fft_bits = ceil(log2(max_len));
  293. fft_len = 1 << s->fft_bits;
  294. if (rate % (s->fps * s->count)) {
  295. av_log(ctx, AV_LOG_ERROR, "Rate (%u) is not divisible by fps*count (%u*%u)\n", rate, s->fps, s->count);
  296. return AVERROR(EINVAL);
  297. }
  298. s->fft_data = av_malloc_array(fft_len, sizeof(*s->fft_data));
  299. s->coeff_sort = av_malloc_array(fft_len, sizeof(*s->coeff_sort));
  300. s->fft_result_left = av_malloc_array(fft_len, sizeof(*s->fft_result_left));
  301. s->fft_result_right = av_malloc_array(fft_len, sizeof(*s->fft_result_right));
  302. s->fft_context = av_fft_init(s->fft_bits, 0);
  303. if (!s->fft_data || !s->coeff_sort || !s->fft_result_left || !s->fft_result_right || !s->fft_context)
  304. return AVERROR(ENOMEM);
  305. #if CONFIG_LIBFREETYPE
  306. load_freetype_font(ctx);
  307. #else
  308. if (s->fontfile)
  309. av_log(ctx, AV_LOG_WARNING, "Freetype is not available, ignoring fontfile option\n");
  310. s->font_alpha = NULL;
  311. #endif
  312. av_log(ctx, AV_LOG_INFO, "Calculating spectral kernel, please wait\n");
  313. start_time = av_gettime_relative();
  314. ret = av_expr_parse(&tlength_expr, s->tlength, expr_vars, NULL, NULL, NULL, NULL, 0, ctx);
  315. if (ret < 0)
  316. goto eval_error;
  317. ret = av_expr_parse(&volume_expr, s->volume, expr_vars, expr_func_names,
  318. expr_funcs, NULL, NULL, 0, ctx);
  319. if (ret < 0)
  320. goto eval_error;
  321. ret = av_expr_parse(&fontcolor_expr, s->fontcolor, expr_vars, expr_fontcolor_func_names,
  322. expr_fontcolor_funcs, NULL, NULL, 0, ctx);
  323. if (ret < 0)
  324. goto eval_error;
  325. for (k = 0; k < VIDEO_WIDTH; k++) {
  326. int hlen = fft_len >> 1;
  327. float total = 0;
  328. float partial = 0;
  329. double freq = BASE_FREQ * exp2(k * (1.0/192.0));
  330. double tlen, tlength, volume;
  331. double expr_vars_val[] = { s->timeclamp, s->timeclamp, freq, freq, freq, 0 };
  332. /* a window function from Albert H. Nuttall,
  333. * "Some Windows with Very Good Sidelobe Behavior"
  334. * -93.32 dB peak sidelobe and 18 dB/octave asymptotic decay
  335. * coefficient normalized to a0 = 1 */
  336. double a0 = 0.355768;
  337. double a1 = 0.487396/a0;
  338. double a2 = 0.144232/a0;
  339. double a3 = 0.012604/a0;
  340. double sv_step, cv_step, sv, cv;
  341. double sw_step, cw_step, sw, cw, w;
  342. tlength = av_expr_eval(tlength_expr, expr_vars_val, NULL);
  343. if (isnan(tlength)) {
  344. av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is nan, setting it to %g\n", freq, s->timeclamp);
  345. tlength = s->timeclamp;
  346. } else if (tlength < TLENGTH_MIN) {
  347. av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, TLENGTH_MIN);
  348. tlength = TLENGTH_MIN;
  349. } else if (tlength > s->timeclamp) {
  350. av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, s->timeclamp);
  351. tlength = s->timeclamp;
  352. }
  353. volume = FFABS(av_expr_eval(volume_expr, expr_vars_val, NULL));
  354. if (isnan(volume)) {
  355. av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is nan, setting it to 0\n", freq);
  356. volume = VOLUME_MIN;
  357. } else if (volume < VOLUME_MIN) {
  358. volume = VOLUME_MIN;
  359. } else if (volume > VOLUME_MAX) {
  360. av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is %g, setting it to %g\n", freq, volume, VOLUME_MAX);
  361. volume = VOLUME_MAX;
  362. }
  363. if (s->fullhd || !(k & 1)) {
  364. int fontcolor = av_expr_eval(fontcolor_expr, expr_vars_val, NULL);
  365. fontcolor_value[0] = (fontcolor >> 16) & 0xFF;
  366. fontcolor_value[1] = (fontcolor >> 8) & 0xFF;
  367. fontcolor_value[2] = fontcolor & 0xFF;
  368. fontcolor_value += 3;
  369. }
  370. tlen = tlength * rate;
  371. s->fft_data[0].re = 0;
  372. s->fft_data[0].im = 0;
  373. s->fft_data[hlen].re = (1.0 + a1 + a2 + a3) * (1.0/tlen) * volume * (1.0/fft_len);
  374. s->fft_data[hlen].im = 0;
  375. sv_step = sv = sin(2.0*M_PI*freq*(1.0/rate));
  376. cv_step = cv = cos(2.0*M_PI*freq*(1.0/rate));
  377. /* also optimizing window func */
  378. sw_step = sw = sin(2.0*M_PI*(1.0/tlen));
  379. cw_step = cw = cos(2.0*M_PI*(1.0/tlen));
  380. for (x = 1; x < 0.5 * tlen; x++) {
  381. double cv_tmp, cw_tmp;
  382. double cw2, cw3, sw2;
  383. cw2 = cw * cw - sw * sw;
  384. sw2 = cw * sw + sw * cw;
  385. cw3 = cw * cw2 - sw * sw2;
  386. w = (1.0 + a1 * cw + a2 * cw2 + a3 * cw3) * (1.0/tlen) * volume * (1.0/fft_len);
  387. s->fft_data[hlen + x].re = w * cv;
  388. s->fft_data[hlen + x].im = w * sv;
  389. s->fft_data[hlen - x].re = s->fft_data[hlen + x].re;
  390. s->fft_data[hlen - x].im = -s->fft_data[hlen + x].im;
  391. cv_tmp = cv * cv_step - sv * sv_step;
  392. sv = sv * cv_step + cv * sv_step;
  393. cv = cv_tmp;
  394. cw_tmp = cw * cw_step - sw * sw_step;
  395. sw = sw * cw_step + cw * sw_step;
  396. cw = cw_tmp;
  397. }
  398. for (; x < hlen; x++) {
  399. s->fft_data[hlen + x].re = 0;
  400. s->fft_data[hlen + x].im = 0;
  401. s->fft_data[hlen - x].re = 0;
  402. s->fft_data[hlen - x].im = 0;
  403. }
  404. av_fft_permute(s->fft_context, s->fft_data);
  405. av_fft_calc(s->fft_context, s->fft_data);
  406. for (x = 0; x < fft_len; x++) {
  407. s->coeff_sort[x].index = x;
  408. s->coeff_sort[x].value = s->fft_data[x].re;
  409. }
  410. AV_QSORT(s->coeff_sort, fft_len, SparseCoeff, qsort_sparsecoeff);
  411. for (x = 0; x < fft_len; x++)
  412. total += fabsf(s->coeff_sort[x].value);
  413. for (x = 0; x < fft_len; x++) {
  414. partial += fabsf(s->coeff_sort[x].value);
  415. if (partial > total * s->coeffclamp * COEFF_CLAMP) {
  416. s->coeffs_len[k] = fft_len - x;
  417. num_coeffs += s->coeffs_len[k];
  418. s->coeffs[k] = av_malloc_array(s->coeffs_len[k], sizeof(*s->coeffs[k]));
  419. if (!s->coeffs[k]) {
  420. ret = AVERROR(ENOMEM);
  421. goto eval_error;
  422. }
  423. for (y = 0; y < s->coeffs_len[k]; y++)
  424. s->coeffs[k][y] = s->coeff_sort[x+y];
  425. break;
  426. }
  427. }
  428. }
  429. av_expr_free(fontcolor_expr);
  430. av_expr_free(volume_expr);
  431. av_expr_free(tlength_expr);
  432. end_time = av_gettime_relative();
  433. av_log(ctx, AV_LOG_INFO, "Elapsed time %.6f s (fft_len=%u, num_coeffs=%u)\n", 1e-6 * (end_time-start_time), fft_len, num_coeffs);
  434. outlink->w = video_width;
  435. outlink->h = video_height;
  436. s->req_fullfilled = 0;
  437. s->spectogram_index = 0;
  438. s->frame_count = 0;
  439. s->spectogram_count = 0;
  440. s->remaining_fill = fft_len >> 1;
  441. memset(s->fft_data, 0, fft_len * sizeof(*s->fft_data));
  442. s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  443. if (!s->outpicref)
  444. return AVERROR(ENOMEM);
  445. s->spectogram = av_calloc(spectogram_height, s->outpicref->linesize[0]);
  446. if (!s->spectogram)
  447. return AVERROR(ENOMEM);
  448. outlink->sample_aspect_ratio = av_make_q(1, 1);
  449. outlink->time_base = av_make_q(1, s->fps);
  450. outlink->frame_rate = av_make_q(s->fps, 1);
  451. return 0;
  452. eval_error:
  453. av_expr_free(fontcolor_expr);
  454. av_expr_free(volume_expr);
  455. av_expr_free(tlength_expr);
  456. return ret;
  457. }
  458. static int plot_cqt(AVFilterLink *inlink)
  459. {
  460. AVFilterContext *ctx = inlink->dst;
  461. ShowCQTContext *s = ctx->priv;
  462. AVFilterLink *outlink = ctx->outputs[0];
  463. int fft_len = 1 << s->fft_bits;
  464. FFTSample result[VIDEO_WIDTH][4];
  465. int x, y, ret = 0;
  466. int linesize = s->outpicref->linesize[0];
  467. int video_scale = s->fullhd ? 2 : 1;
  468. int video_width = (VIDEO_WIDTH/2) * video_scale;
  469. int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  470. int spectogram_start = (SPECTOGRAM_START/2) * video_scale;
  471. int font_height = (FONT_HEIGHT/2) * video_scale;
  472. /* real part contains left samples, imaginary part contains right samples */
  473. memcpy(s->fft_result_left, s->fft_data, fft_len * sizeof(*s->fft_data));
  474. av_fft_permute(s->fft_context, s->fft_result_left);
  475. av_fft_calc(s->fft_context, s->fft_result_left);
  476. /* separate left and right, (and multiply by 2.0) */
  477. s->fft_result_right[0].re = 2.0f * s->fft_result_left[0].im;
  478. s->fft_result_right[0].im = 0;
  479. s->fft_result_left[0].re = 2.0f * s->fft_result_left[0].re;
  480. s->fft_result_left[0].im = 0;
  481. for (x = 1; x <= fft_len >> 1; x++) {
  482. FFTSample tmpy = s->fft_result_left[fft_len-x].im - s->fft_result_left[x].im;
  483. s->fft_result_right[x].re = s->fft_result_left[x].im + s->fft_result_left[fft_len-x].im;
  484. s->fft_result_right[x].im = s->fft_result_left[x].re - s->fft_result_left[fft_len-x].re;
  485. s->fft_result_right[fft_len-x].re = s->fft_result_right[x].re;
  486. s->fft_result_right[fft_len-x].im = -s->fft_result_right[x].im;
  487. s->fft_result_left[x].re = s->fft_result_left[x].re + s->fft_result_left[fft_len-x].re;
  488. s->fft_result_left[x].im = tmpy;
  489. s->fft_result_left[fft_len-x].re = s->fft_result_left[x].re;
  490. s->fft_result_left[fft_len-x].im = -s->fft_result_left[x].im;
  491. }
  492. /* calculating cqt */
  493. for (x = 0; x < VIDEO_WIDTH; x++) {
  494. int u;
  495. FFTComplex l = {0,0};
  496. FFTComplex r = {0,0};
  497. for (u = 0; u < s->coeffs_len[x]; u++) {
  498. FFTSample value = s->coeffs[x][u].value;
  499. int index = s->coeffs[x][u].index;
  500. l.re += value * s->fft_result_left[index].re;
  501. l.im += value * s->fft_result_left[index].im;
  502. r.re += value * s->fft_result_right[index].re;
  503. r.im += value * s->fft_result_right[index].im;
  504. }
  505. /* result is power, not amplitude */
  506. result[x][0] = l.re * l.re + l.im * l.im;
  507. result[x][2] = r.re * r.re + r.im * r.im;
  508. result[x][1] = 0.5f * (result[x][0] + result[x][2]);
  509. if (s->gamma2 == 1.0f)
  510. result[x][3] = result[x][1];
  511. else if (s->gamma2 == 2.0f)
  512. result[x][3] = sqrtf(result[x][1]);
  513. else if (s->gamma2 == 3.0f)
  514. result[x][3] = cbrtf(result[x][1]);
  515. else if (s->gamma2 == 4.0f)
  516. result[x][3] = sqrtf(sqrtf(result[x][1]));
  517. else
  518. result[x][3] = expf(logf(result[x][1]) * (1.0f / s->gamma2));
  519. result[x][0] = FFMIN(1.0f, result[x][0]);
  520. result[x][1] = FFMIN(1.0f, result[x][1]);
  521. result[x][2] = FFMIN(1.0f, result[x][2]);
  522. if (s->gamma == 1.0f) {
  523. result[x][0] = 255.0f * result[x][0];
  524. result[x][1] = 255.0f * result[x][1];
  525. result[x][2] = 255.0f * result[x][2];
  526. } else if (s->gamma == 2.0f) {
  527. result[x][0] = 255.0f * sqrtf(result[x][0]);
  528. result[x][1] = 255.0f * sqrtf(result[x][1]);
  529. result[x][2] = 255.0f * sqrtf(result[x][2]);
  530. } else if (s->gamma == 3.0f) {
  531. result[x][0] = 255.0f * cbrtf(result[x][0]);
  532. result[x][1] = 255.0f * cbrtf(result[x][1]);
  533. result[x][2] = 255.0f * cbrtf(result[x][2]);
  534. } else if (s->gamma == 4.0f) {
  535. result[x][0] = 255.0f * sqrtf(sqrtf(result[x][0]));
  536. result[x][1] = 255.0f * sqrtf(sqrtf(result[x][1]));
  537. result[x][2] = 255.0f * sqrtf(sqrtf(result[x][2]));
  538. } else {
  539. result[x][0] = 255.0f * expf(logf(result[x][0]) * (1.0f / s->gamma));
  540. result[x][1] = 255.0f * expf(logf(result[x][1]) * (1.0f / s->gamma));
  541. result[x][2] = 255.0f * expf(logf(result[x][2]) * (1.0f / s->gamma));
  542. }
  543. }
  544. if (!s->fullhd) {
  545. for (x = 0; x < video_width; x++) {
  546. result[x][0] = 0.5f * (result[2*x][0] + result[2*x+1][0]);
  547. result[x][1] = 0.5f * (result[2*x][1] + result[2*x+1][1]);
  548. result[x][2] = 0.5f * (result[2*x][2] + result[2*x+1][2]);
  549. result[x][3] = 0.5f * (result[2*x][3] + result[2*x+1][3]);
  550. }
  551. }
  552. for (x = 0; x < video_width; x++) {
  553. s->spectogram[s->spectogram_index*linesize + 3*x] = result[x][0] + 0.5f;
  554. s->spectogram[s->spectogram_index*linesize + 3*x + 1] = result[x][1] + 0.5f;
  555. s->spectogram[s->spectogram_index*linesize + 3*x + 2] = result[x][2] + 0.5f;
  556. }
  557. /* drawing */
  558. if (!s->spectogram_count) {
  559. uint8_t *data = (uint8_t*) s->outpicref->data[0];
  560. float rcp_result[VIDEO_WIDTH];
  561. int total_length = linesize * spectogram_height;
  562. int back_length = linesize * s->spectogram_index;
  563. for (x = 0; x < video_width; x++)
  564. rcp_result[x] = 1.0f / (result[x][3]+0.0001f);
  565. /* drawing bar */
  566. for (y = 0; y < spectogram_height; y++) {
  567. float height = (spectogram_height - y) * (1.0f/spectogram_height);
  568. uint8_t *lineptr = data + y * linesize;
  569. for (x = 0; x < video_width; x++) {
  570. float mul;
  571. if (result[x][3] <= height) {
  572. *lineptr++ = 0;
  573. *lineptr++ = 0;
  574. *lineptr++ = 0;
  575. } else {
  576. mul = (result[x][3] - height) * rcp_result[x];
  577. *lineptr++ = mul * result[x][0] + 0.5f;
  578. *lineptr++ = mul * result[x][1] + 0.5f;
  579. *lineptr++ = mul * result[x][2] + 0.5f;
  580. }
  581. }
  582. }
  583. /* drawing font */
  584. if (s->font_alpha) {
  585. for (y = 0; y < font_height; y++) {
  586. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  587. uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize;
  588. uint8_t *fontcolor_value = s->fontcolor_value;
  589. for (x = 0; x < video_width; x++) {
  590. uint8_t alpha = s->font_alpha[y*video_width+x];
  591. lineptr[3*x] = (spectogram_src[3*x] * (255-alpha) + fontcolor_value[0] * alpha + 255) >> 8;
  592. lineptr[3*x+1] = (spectogram_src[3*x+1] * (255-alpha) + fontcolor_value[1] * alpha + 255) >> 8;
  593. lineptr[3*x+2] = (spectogram_src[3*x+2] * (255-alpha) + fontcolor_value[2] * alpha + 255) >> 8;
  594. fontcolor_value += 3;
  595. }
  596. }
  597. } else {
  598. for (y = 0; y < font_height; y++) {
  599. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  600. memcpy(lineptr, s->spectogram + s->spectogram_index * linesize, video_width*3);
  601. }
  602. for (x = 0; x < video_width; x += video_width/10) {
  603. int u;
  604. static const char str[] = "EF G A BC D ";
  605. uint8_t *startptr = data + spectogram_height * linesize + x * 3;
  606. for (u = 0; str[u]; u++) {
  607. int v;
  608. for (v = 0; v < 16; v++) {
  609. uint8_t *p = startptr + v * linesize * video_scale + 8 * 3 * u * video_scale;
  610. int ux = x + 8 * u * video_scale;
  611. int mask;
  612. for (mask = 0x80; mask; mask >>= 1) {
  613. if (mask & avpriv_vga16_font[str[u] * 16 + v]) {
  614. p[0] = s->fontcolor_value[3*ux];
  615. p[1] = s->fontcolor_value[3*ux+1];
  616. p[2] = s->fontcolor_value[3*ux+2];
  617. if (video_scale == 2) {
  618. p[linesize] = p[0];
  619. p[linesize+1] = p[1];
  620. p[linesize+2] = p[2];
  621. p[3] = p[linesize+3] = s->fontcolor_value[3*ux+3];
  622. p[4] = p[linesize+4] = s->fontcolor_value[3*ux+4];
  623. p[5] = p[linesize+5] = s->fontcolor_value[3*ux+5];
  624. }
  625. }
  626. p += 3 * video_scale;
  627. ux += video_scale;
  628. }
  629. }
  630. }
  631. }
  632. }
  633. /* drawing spectogram/sonogram */
  634. data += spectogram_start * linesize;
  635. memcpy(data, s->spectogram + s->spectogram_index*linesize, total_length - back_length);
  636. data += total_length - back_length;
  637. if (back_length)
  638. memcpy(data, s->spectogram, back_length);
  639. s->outpicref->pts = s->frame_count;
  640. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  641. s->req_fullfilled = 1;
  642. s->frame_count++;
  643. }
  644. s->spectogram_count = (s->spectogram_count + 1) % s->count;
  645. s->spectogram_index = (s->spectogram_index + spectogram_height - 1) % spectogram_height;
  646. return ret;
  647. }
  648. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  649. {
  650. AVFilterContext *ctx = inlink->dst;
  651. ShowCQTContext *s = ctx->priv;
  652. int step = inlink->sample_rate / (s->fps * s->count);
  653. int fft_len = 1 << s->fft_bits;
  654. int remaining;
  655. float *audio_data;
  656. if (!insamples) {
  657. while (s->remaining_fill < (fft_len >> 1)) {
  658. int ret, x;
  659. memset(&s->fft_data[fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
  660. ret = plot_cqt(inlink);
  661. if (ret < 0)
  662. return ret;
  663. for (x = 0; x < (fft_len-step); x++)
  664. s->fft_data[x] = s->fft_data[x+step];
  665. s->remaining_fill += step;
  666. }
  667. return AVERROR_EOF;
  668. }
  669. remaining = insamples->nb_samples;
  670. audio_data = (float*) insamples->data[0];
  671. while (remaining) {
  672. if (remaining >= s->remaining_fill) {
  673. int i = insamples->nb_samples - remaining;
  674. int j = fft_len - s->remaining_fill;
  675. int m, ret;
  676. for (m = 0; m < s->remaining_fill; m++) {
  677. s->fft_data[j+m].re = audio_data[2*(i+m)];
  678. s->fft_data[j+m].im = audio_data[2*(i+m)+1];
  679. }
  680. ret = plot_cqt(inlink);
  681. if (ret < 0) {
  682. av_frame_free(&insamples);
  683. return ret;
  684. }
  685. remaining -= s->remaining_fill;
  686. for (m = 0; m < fft_len-step; m++)
  687. s->fft_data[m] = s->fft_data[m+step];
  688. s->remaining_fill = step;
  689. } else {
  690. int i = insamples->nb_samples - remaining;
  691. int j = fft_len - s->remaining_fill;
  692. int m;
  693. for (m = 0; m < remaining; m++) {
  694. s->fft_data[m+j].re = audio_data[2*(i+m)];
  695. s->fft_data[m+j].im = audio_data[2*(i+m)+1];
  696. }
  697. s->remaining_fill -= remaining;
  698. remaining = 0;
  699. }
  700. }
  701. av_frame_free(&insamples);
  702. return 0;
  703. }
  704. static int request_frame(AVFilterLink *outlink)
  705. {
  706. ShowCQTContext *s = outlink->src->priv;
  707. AVFilterLink *inlink = outlink->src->inputs[0];
  708. int ret;
  709. s->req_fullfilled = 0;
  710. do {
  711. ret = ff_request_frame(inlink);
  712. } while (!s->req_fullfilled && ret >= 0);
  713. if (ret == AVERROR_EOF && s->outpicref)
  714. filter_frame(inlink, NULL);
  715. return ret;
  716. }
  717. static const AVFilterPad showcqt_inputs[] = {
  718. {
  719. .name = "default",
  720. .type = AVMEDIA_TYPE_AUDIO,
  721. .filter_frame = filter_frame,
  722. },
  723. { NULL }
  724. };
  725. static const AVFilterPad showcqt_outputs[] = {
  726. {
  727. .name = "default",
  728. .type = AVMEDIA_TYPE_VIDEO,
  729. .config_props = config_output,
  730. .request_frame = request_frame,
  731. },
  732. { NULL }
  733. };
  734. AVFilter ff_avf_showcqt = {
  735. .name = "showcqt",
  736. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant Q Transform) spectrum video output."),
  737. .uninit = uninit,
  738. .query_formats = query_formats,
  739. .priv_size = sizeof(ShowCQTContext),
  740. .inputs = showcqt_inputs,
  741. .outputs = showcqt_outputs,
  742. .priv_class = &showcqt_class,
  743. };