You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

758 lines
28KB

  1. /*
  2. * Copyright (c) 2014 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "config.h"
  21. #include "libavcodec/avfft.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/channel_layout.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/xga_font_data.h"
  26. #include "libavutil/eval.h"
  27. #include "avfilter.h"
  28. #include "internal.h"
  29. #include <math.h>
  30. #include <stdlib.h>
  31. #if CONFIG_LIBFREETYPE
  32. #include <ft2build.h>
  33. #include FT_FREETYPE_H
  34. #endif
  35. /* this filter is designed to do 16 bins/semitones constant Q transform with Brown-Puckette algorithm
  36. * start from E0 to D#10 (10 octaves)
  37. * so there are 16 bins/semitones * 12 semitones/octaves * 10 octaves = 1920 bins
  38. * match with full HD resolution */
  39. #define VIDEO_WIDTH 1920
  40. #define VIDEO_HEIGHT 1080
  41. #define FONT_HEIGHT 32
  42. #define SPECTOGRAM_HEIGHT ((VIDEO_HEIGHT-FONT_HEIGHT)/2)
  43. #define SPECTOGRAM_START (VIDEO_HEIGHT-SPECTOGRAM_HEIGHT)
  44. #define BASE_FREQ 20.051392800492
  45. #define TLENGTH_MIN 0.001
  46. #define TLENGTH_DEFAULT "384/f*tc/(384/f+tc)"
  47. #define VOLUME_MIN 1e-10
  48. #define VOLUME_MAX 100.0
  49. #define FONTCOLOR_DEFAULT "st(0, (midi(f)-59.5)/12);" \
  50. "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
  51. "r(1-ld(1)) + b(ld(1))"
  52. typedef struct {
  53. FFTSample *values;
  54. int start, len;
  55. } Coeffs;
  56. typedef struct {
  57. const AVClass *class;
  58. AVFrame *outpicref;
  59. FFTContext *fft_context;
  60. FFTComplex *fft_data;
  61. FFTComplex *fft_result;
  62. uint8_t *spectogram;
  63. Coeffs coeffs[VIDEO_WIDTH];
  64. uint8_t *font_alpha;
  65. char *fontfile; /* using freetype */
  66. uint8_t fontcolor_value[VIDEO_WIDTH*3]; /* result of fontcolor option */
  67. int64_t frame_count;
  68. int spectogram_count;
  69. int spectogram_index;
  70. int fft_bits;
  71. int remaining_fill;
  72. char *tlength;
  73. char *volume;
  74. char *fontcolor;
  75. double timeclamp; /* lower timeclamp, time-accurate, higher timeclamp, freq-accurate (at low freq)*/
  76. float coeffclamp; /* lower coeffclamp, more precise, higher coeffclamp, faster */
  77. int fullhd; /* if true, output video is at full HD resolution, otherwise it will be halved */
  78. float gamma; /* lower gamma, more contrast, higher gamma, more range */
  79. float gamma2; /* gamma of bargraph */
  80. int fps; /* the required fps is so strict, so it's enough to be int, but 24000/1001 etc cannot be encoded */
  81. int count; /* fps * count = transform rate */
  82. int draw_text;
  83. } ShowCQTContext;
  84. #define OFFSET(x) offsetof(ShowCQTContext, x)
  85. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  86. static const AVOption showcqt_options[] = {
  87. { "volume", "set volume", OFFSET(volume), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
  88. { "tlength", "set transform length", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
  89. { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
  90. { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 0.1, 10, FLAGS },
  91. { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 3 }, 1, 7, FLAGS },
  92. { "gamma2", "set gamma of bargraph", OFFSET(gamma2), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 1, 7, FLAGS },
  93. { "fullhd", "set full HD resolution", OFFSET(fullhd), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
  94. { "fps", "set video fps", OFFSET(fps), AV_OPT_TYPE_INT, { .i64 = 25 }, 10, 100, FLAGS },
  95. { "count", "set number of transform per frame", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
  96. { "fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
  97. { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR_DEFAULT }, CHAR_MIN, CHAR_MAX, FLAGS },
  98. { "text", "draw text", OFFSET(draw_text), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
  99. { NULL }
  100. };
  101. AVFILTER_DEFINE_CLASS(showcqt);
  102. static av_cold void uninit(AVFilterContext *ctx)
  103. {
  104. int k;
  105. ShowCQTContext *s = ctx->priv;
  106. av_fft_end(s->fft_context);
  107. s->fft_context = NULL;
  108. for (k = 0; k < VIDEO_WIDTH; k++)
  109. av_freep(&s->coeffs[k].values);
  110. av_freep(&s->fft_data);
  111. av_freep(&s->fft_result);
  112. av_freep(&s->spectogram);
  113. av_freep(&s->font_alpha);
  114. av_frame_free(&s->outpicref);
  115. }
  116. static int query_formats(AVFilterContext *ctx)
  117. {
  118. AVFilterFormats *formats = NULL;
  119. AVFilterChannelLayouts *layouts = NULL;
  120. AVFilterLink *inlink = ctx->inputs[0];
  121. AVFilterLink *outlink = ctx->outputs[0];
  122. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
  123. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
  124. static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
  125. static const int samplerates[] = { 44100, 48000, -1 };
  126. /* set input audio formats */
  127. formats = ff_make_format_list(sample_fmts);
  128. if (!formats)
  129. return AVERROR(ENOMEM);
  130. ff_formats_ref(formats, &inlink->out_formats);
  131. layouts = avfilter_make_format64_list(channel_layouts);
  132. if (!layouts)
  133. return AVERROR(ENOMEM);
  134. ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  135. formats = ff_make_format_list(samplerates);
  136. if (!formats)
  137. return AVERROR(ENOMEM);
  138. ff_formats_ref(formats, &inlink->out_samplerates);
  139. /* set output video format */
  140. formats = ff_make_format_list(pix_fmts);
  141. if (!formats)
  142. return AVERROR(ENOMEM);
  143. ff_formats_ref(formats, &outlink->in_formats);
  144. return 0;
  145. }
  146. #if CONFIG_LIBFREETYPE
  147. static void load_freetype_font(AVFilterContext *ctx)
  148. {
  149. static const char str[] = "EF G A BC D ";
  150. ShowCQTContext *s = ctx->priv;
  151. FT_Library lib = NULL;
  152. FT_Face face = NULL;
  153. int video_scale = s->fullhd ? 2 : 1;
  154. int video_width = (VIDEO_WIDTH/2) * video_scale;
  155. int font_height = (FONT_HEIGHT/2) * video_scale;
  156. int font_width = 8 * video_scale;
  157. int font_repeat = font_width * 12;
  158. int linear_hori_advance = font_width * 65536;
  159. int non_monospace_warning = 0;
  160. int x;
  161. s->font_alpha = NULL;
  162. if (!s->fontfile)
  163. return;
  164. if (FT_Init_FreeType(&lib))
  165. goto fail;
  166. if (FT_New_Face(lib, s->fontfile, 0, &face))
  167. goto fail;
  168. if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
  169. goto fail;
  170. if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
  171. goto fail;
  172. if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
  173. goto fail;
  174. s->font_alpha = av_malloc_array(font_height, video_width);
  175. if (!s->font_alpha)
  176. goto fail;
  177. memset(s->font_alpha, 0, font_height * video_width);
  178. for (x = 0; x < 12; x++) {
  179. int sx, sy, rx, bx, by, dx, dy;
  180. if (str[x] == ' ')
  181. continue;
  182. if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
  183. goto fail;
  184. if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
  185. av_log(ctx, AV_LOG_WARNING, "Font is not monospace\n");
  186. non_monospace_warning = 1;
  187. }
  188. sy = font_height - 4*video_scale - face->glyph->bitmap_top;
  189. for (rx = 0; rx < 10; rx++) {
  190. sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
  191. for (by = 0; by < face->glyph->bitmap.rows; by++) {
  192. dy = by + sy;
  193. if (dy < 0)
  194. continue;
  195. if (dy >= font_height)
  196. break;
  197. for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
  198. dx = bx + sx;
  199. if (dx < 0)
  200. continue;
  201. if (dx >= video_width)
  202. break;
  203. s->font_alpha[dy*video_width+dx] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
  204. }
  205. }
  206. }
  207. }
  208. FT_Done_Face(face);
  209. FT_Done_FreeType(lib);
  210. return;
  211. fail:
  212. av_log(ctx, AV_LOG_WARNING, "Error while loading freetype font, using default font instead\n");
  213. FT_Done_Face(face);
  214. FT_Done_FreeType(lib);
  215. av_freep(&s->font_alpha);
  216. return;
  217. }
  218. #endif
  219. static double a_weighting(void *p, double f)
  220. {
  221. double ret = 12200.0*12200.0 * (f*f*f*f);
  222. ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
  223. sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
  224. return ret;
  225. }
  226. static double b_weighting(void *p, double f)
  227. {
  228. double ret = 12200.0*12200.0 * (f*f*f);
  229. ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
  230. return ret;
  231. }
  232. static double c_weighting(void *p, double f)
  233. {
  234. double ret = 12200.0*12200.0 * (f*f);
  235. ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
  236. return ret;
  237. }
  238. static double midi(void *p, double f)
  239. {
  240. return log2(f/440.0) * 12.0 + 69.0;
  241. }
  242. static double r_func(void *p, double x)
  243. {
  244. x = av_clipd(x, 0.0, 1.0);
  245. return (int)(x*255.0+0.5) << 16;
  246. }
  247. static double g_func(void *p, double x)
  248. {
  249. x = av_clipd(x, 0.0, 1.0);
  250. return (int)(x*255.0+0.5) << 8;
  251. }
  252. static double b_func(void *p, double x)
  253. {
  254. x = av_clipd(x, 0.0, 1.0);
  255. return (int)(x*255.0+0.5);
  256. }
  257. static int config_output(AVFilterLink *outlink)
  258. {
  259. AVFilterContext *ctx = outlink->src;
  260. AVFilterLink *inlink = ctx->inputs[0];
  261. ShowCQTContext *s = ctx->priv;
  262. AVExpr *tlength_expr = NULL, *volume_expr = NULL, *fontcolor_expr = NULL;
  263. uint8_t *fontcolor_value = s->fontcolor_value;
  264. static const char * const expr_vars[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
  265. static const char * const expr_func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
  266. static const char * const expr_fontcolor_func_names[] = { "midi", "r", "g", "b", NULL };
  267. static double (* const expr_funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting, NULL };
  268. static double (* const expr_fontcolor_funcs[])(void *, double) = { midi, r_func, g_func, b_func, NULL };
  269. int fft_len, k, x, ret;
  270. int num_coeffs = 0;
  271. int rate = inlink->sample_rate;
  272. double max_len = rate * (double) s->timeclamp;
  273. int video_scale = s->fullhd ? 2 : 1;
  274. int video_width = (VIDEO_WIDTH/2) * video_scale;
  275. int video_height = (VIDEO_HEIGHT/2) * video_scale;
  276. int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  277. s->fft_bits = ceil(log2(max_len));
  278. fft_len = 1 << s->fft_bits;
  279. if (rate % (s->fps * s->count)) {
  280. av_log(ctx, AV_LOG_ERROR, "Rate (%u) is not divisible by fps*count (%u*%u)\n", rate, s->fps, s->count);
  281. return AVERROR(EINVAL);
  282. }
  283. s->fft_data = av_malloc_array(fft_len, sizeof(*s->fft_data));
  284. s->fft_result = av_malloc_array(fft_len + 1, sizeof(*s->fft_result));
  285. s->fft_context = av_fft_init(s->fft_bits, 0);
  286. if (!s->fft_data || !s->fft_result || !s->fft_context)
  287. return AVERROR(ENOMEM);
  288. #if CONFIG_LIBFREETYPE
  289. load_freetype_font(ctx);
  290. #else
  291. if (s->fontfile)
  292. av_log(ctx, AV_LOG_WARNING, "Freetype is not available, ignoring fontfile option\n");
  293. s->font_alpha = NULL;
  294. #endif
  295. ret = av_expr_parse(&tlength_expr, s->tlength, expr_vars, NULL, NULL, NULL, NULL, 0, ctx);
  296. if (ret < 0)
  297. goto eval_error;
  298. ret = av_expr_parse(&volume_expr, s->volume, expr_vars, expr_func_names,
  299. expr_funcs, NULL, NULL, 0, ctx);
  300. if (ret < 0)
  301. goto eval_error;
  302. ret = av_expr_parse(&fontcolor_expr, s->fontcolor, expr_vars, expr_fontcolor_func_names,
  303. expr_fontcolor_funcs, NULL, NULL, 0, ctx);
  304. if (ret < 0)
  305. goto eval_error;
  306. for (k = 0; k < VIDEO_WIDTH; k++) {
  307. double freq = BASE_FREQ * exp2(k * (1.0/192.0));
  308. double flen, center, tlength, volume;
  309. int start, end;
  310. double expr_vars_val[] = { s->timeclamp, s->timeclamp, freq, freq, freq, 0 };
  311. tlength = av_expr_eval(tlength_expr, expr_vars_val, NULL);
  312. if (isnan(tlength)) {
  313. av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is nan, setting it to %g\n", freq, s->timeclamp);
  314. tlength = s->timeclamp;
  315. } else if (tlength < TLENGTH_MIN) {
  316. av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, TLENGTH_MIN);
  317. tlength = TLENGTH_MIN;
  318. } else if (tlength > s->timeclamp) {
  319. av_log(ctx, AV_LOG_WARNING, "at freq %g: tlength is %g, setting it to %g\n", freq, tlength, s->timeclamp);
  320. tlength = s->timeclamp;
  321. }
  322. volume = FFABS(av_expr_eval(volume_expr, expr_vars_val, NULL));
  323. if (isnan(volume)) {
  324. av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is nan, setting it to 0\n", freq);
  325. volume = VOLUME_MIN;
  326. } else if (volume < VOLUME_MIN) {
  327. volume = VOLUME_MIN;
  328. } else if (volume > VOLUME_MAX) {
  329. av_log(ctx, AV_LOG_WARNING, "at freq %g: volume is %g, setting it to %g\n", freq, volume, VOLUME_MAX);
  330. volume = VOLUME_MAX;
  331. }
  332. if (s->fullhd || !(k & 1)) {
  333. int fontcolor = av_expr_eval(fontcolor_expr, expr_vars_val, NULL);
  334. fontcolor_value[0] = (fontcolor >> 16) & 0xFF;
  335. fontcolor_value[1] = (fontcolor >> 8) & 0xFF;
  336. fontcolor_value[2] = fontcolor & 0xFF;
  337. fontcolor_value += 3;
  338. }
  339. /* direct frequency domain windowing */
  340. flen = 8.0 * fft_len / (tlength * rate);
  341. center = freq * fft_len / rate;
  342. start = FFMAX(0, ceil(center - 0.5 * flen));
  343. end = FFMIN(fft_len, floor(center + 0.5 * flen));
  344. s->coeffs[k].len = end - start + 1;
  345. s->coeffs[k].start = start;
  346. num_coeffs += s->coeffs[k].len;
  347. s->coeffs[k].values = av_malloc_array(s->coeffs[k].len, sizeof(*s->coeffs[k].values));
  348. if (!s->coeffs[k].values) {
  349. ret = AVERROR(ENOMEM);
  350. goto eval_error;
  351. }
  352. for (x = start; x <= end; x++) {
  353. int sign = (x & 1) ? (-1) : 1;
  354. double u = 2.0 * M_PI * (x - center) * (1.0/flen);
  355. /* nuttall window */
  356. double w = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
  357. s->coeffs[k].values[x-start] = sign * volume * (1.0/fft_len) * w;
  358. }
  359. }
  360. av_expr_free(fontcolor_expr);
  361. av_expr_free(volume_expr);
  362. av_expr_free(tlength_expr);
  363. av_log(ctx, AV_LOG_INFO, "fft_len=%u, num_coeffs=%u\n", fft_len, num_coeffs);
  364. outlink->w = video_width;
  365. outlink->h = video_height;
  366. s->spectogram_index = 0;
  367. s->frame_count = 0;
  368. s->spectogram_count = 0;
  369. s->remaining_fill = fft_len >> 1;
  370. memset(s->fft_data, 0, fft_len * sizeof(*s->fft_data));
  371. s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  372. if (!s->outpicref)
  373. return AVERROR(ENOMEM);
  374. s->spectogram = av_calloc(spectogram_height, s->outpicref->linesize[0]);
  375. if (!s->spectogram)
  376. return AVERROR(ENOMEM);
  377. outlink->sample_aspect_ratio = av_make_q(1, 1);
  378. outlink->time_base = av_make_q(1, s->fps);
  379. outlink->frame_rate = av_make_q(s->fps, 1);
  380. return 0;
  381. eval_error:
  382. av_expr_free(fontcolor_expr);
  383. av_expr_free(volume_expr);
  384. av_expr_free(tlength_expr);
  385. return ret;
  386. }
  387. static int plot_cqt(AVFilterLink *inlink)
  388. {
  389. AVFilterContext *ctx = inlink->dst;
  390. ShowCQTContext *s = ctx->priv;
  391. AVFilterLink *outlink = ctx->outputs[0];
  392. int fft_len = 1 << s->fft_bits;
  393. FFTSample result[VIDEO_WIDTH][4];
  394. int x, y, ret = 0;
  395. int linesize = s->outpicref->linesize[0];
  396. int video_scale = s->fullhd ? 2 : 1;
  397. int video_width = (VIDEO_WIDTH/2) * video_scale;
  398. int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  399. int spectogram_start = (SPECTOGRAM_START/2) * video_scale;
  400. int font_height = (FONT_HEIGHT/2) * video_scale;
  401. /* real part contains left samples, imaginary part contains right samples */
  402. memcpy(s->fft_result, s->fft_data, fft_len * sizeof(*s->fft_data));
  403. av_fft_permute(s->fft_context, s->fft_result);
  404. av_fft_calc(s->fft_context, s->fft_result);
  405. s->fft_result[fft_len] = s->fft_result[0];
  406. /* calculating cqt */
  407. for (x = 0; x < VIDEO_WIDTH; x++) {
  408. int u;
  409. FFTComplex v = {0,0};
  410. FFTComplex w = {0,0};
  411. FFTComplex l, r;
  412. for (u = 0; u < s->coeffs[x].len; u++) {
  413. FFTSample value = s->coeffs[x].values[u];
  414. int index = s->coeffs[x].start + u;
  415. v.re += value * s->fft_result[index].re;
  416. v.im += value * s->fft_result[index].im;
  417. w.re += value * s->fft_result[fft_len - index].re;
  418. w.im += value * s->fft_result[fft_len - index].im;
  419. }
  420. /* separate left and right, (and multiply by 2.0) */
  421. l.re = v.re + w.re;
  422. l.im = v.im - w.im;
  423. r.re = w.im + v.im;
  424. r.im = w.re - v.re;
  425. /* result is power, not amplitude */
  426. result[x][0] = l.re * l.re + l.im * l.im;
  427. result[x][2] = r.re * r.re + r.im * r.im;
  428. result[x][1] = 0.5f * (result[x][0] + result[x][2]);
  429. if (s->gamma2 == 1.0f)
  430. result[x][3] = result[x][1];
  431. else if (s->gamma2 == 2.0f)
  432. result[x][3] = sqrtf(result[x][1]);
  433. else if (s->gamma2 == 3.0f)
  434. result[x][3] = cbrtf(result[x][1]);
  435. else if (s->gamma2 == 4.0f)
  436. result[x][3] = sqrtf(sqrtf(result[x][1]));
  437. else
  438. result[x][3] = expf(logf(result[x][1]) * (1.0f / s->gamma2));
  439. result[x][0] = FFMIN(1.0f, result[x][0]);
  440. result[x][1] = FFMIN(1.0f, result[x][1]);
  441. result[x][2] = FFMIN(1.0f, result[x][2]);
  442. if (s->gamma == 1.0f) {
  443. result[x][0] = 255.0f * result[x][0];
  444. result[x][1] = 255.0f * result[x][1];
  445. result[x][2] = 255.0f * result[x][2];
  446. } else if (s->gamma == 2.0f) {
  447. result[x][0] = 255.0f * sqrtf(result[x][0]);
  448. result[x][1] = 255.0f * sqrtf(result[x][1]);
  449. result[x][2] = 255.0f * sqrtf(result[x][2]);
  450. } else if (s->gamma == 3.0f) {
  451. result[x][0] = 255.0f * cbrtf(result[x][0]);
  452. result[x][1] = 255.0f * cbrtf(result[x][1]);
  453. result[x][2] = 255.0f * cbrtf(result[x][2]);
  454. } else if (s->gamma == 4.0f) {
  455. result[x][0] = 255.0f * sqrtf(sqrtf(result[x][0]));
  456. result[x][1] = 255.0f * sqrtf(sqrtf(result[x][1]));
  457. result[x][2] = 255.0f * sqrtf(sqrtf(result[x][2]));
  458. } else {
  459. result[x][0] = 255.0f * expf(logf(result[x][0]) * (1.0f / s->gamma));
  460. result[x][1] = 255.0f * expf(logf(result[x][1]) * (1.0f / s->gamma));
  461. result[x][2] = 255.0f * expf(logf(result[x][2]) * (1.0f / s->gamma));
  462. }
  463. }
  464. if (!s->fullhd) {
  465. for (x = 0; x < video_width; x++) {
  466. result[x][0] = 0.5f * (result[2*x][0] + result[2*x+1][0]);
  467. result[x][1] = 0.5f * (result[2*x][1] + result[2*x+1][1]);
  468. result[x][2] = 0.5f * (result[2*x][2] + result[2*x+1][2]);
  469. result[x][3] = 0.5f * (result[2*x][3] + result[2*x+1][3]);
  470. }
  471. }
  472. for (x = 0; x < video_width; x++) {
  473. s->spectogram[s->spectogram_index*linesize + 3*x] = result[x][0] + 0.5f;
  474. s->spectogram[s->spectogram_index*linesize + 3*x + 1] = result[x][1] + 0.5f;
  475. s->spectogram[s->spectogram_index*linesize + 3*x + 2] = result[x][2] + 0.5f;
  476. }
  477. /* drawing */
  478. if (!s->spectogram_count) {
  479. uint8_t *data = (uint8_t*) s->outpicref->data[0];
  480. float rcp_result[VIDEO_WIDTH];
  481. int total_length = linesize * spectogram_height;
  482. int back_length = linesize * s->spectogram_index;
  483. for (x = 0; x < video_width; x++)
  484. rcp_result[x] = 1.0f / (result[x][3]+0.0001f);
  485. /* drawing bar */
  486. for (y = 0; y < spectogram_height; y++) {
  487. float height = (spectogram_height - y) * (1.0f/spectogram_height);
  488. uint8_t *lineptr = data + y * linesize;
  489. for (x = 0; x < video_width; x++) {
  490. float mul;
  491. if (result[x][3] <= height) {
  492. *lineptr++ = 0;
  493. *lineptr++ = 0;
  494. *lineptr++ = 0;
  495. } else {
  496. mul = (result[x][3] - height) * rcp_result[x];
  497. *lineptr++ = mul * result[x][0] + 0.5f;
  498. *lineptr++ = mul * result[x][1] + 0.5f;
  499. *lineptr++ = mul * result[x][2] + 0.5f;
  500. }
  501. }
  502. }
  503. /* drawing font */
  504. if (s->font_alpha && s->draw_text) {
  505. for (y = 0; y < font_height; y++) {
  506. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  507. uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize;
  508. uint8_t *fontcolor_value = s->fontcolor_value;
  509. for (x = 0; x < video_width; x++) {
  510. uint8_t alpha = s->font_alpha[y*video_width+x];
  511. lineptr[3*x] = (spectogram_src[3*x] * (255-alpha) + fontcolor_value[0] * alpha + 255) >> 8;
  512. lineptr[3*x+1] = (spectogram_src[3*x+1] * (255-alpha) + fontcolor_value[1] * alpha + 255) >> 8;
  513. lineptr[3*x+2] = (spectogram_src[3*x+2] * (255-alpha) + fontcolor_value[2] * alpha + 255) >> 8;
  514. fontcolor_value += 3;
  515. }
  516. }
  517. } else if (s->draw_text) {
  518. for (y = 0; y < font_height; y++) {
  519. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  520. memcpy(lineptr, s->spectogram + s->spectogram_index * linesize, video_width*3);
  521. }
  522. for (x = 0; x < video_width; x += video_width/10) {
  523. int u;
  524. static const char str[] = "EF G A BC D ";
  525. uint8_t *startptr = data + spectogram_height * linesize + x * 3;
  526. for (u = 0; str[u]; u++) {
  527. int v;
  528. for (v = 0; v < 16; v++) {
  529. uint8_t *p = startptr + v * linesize * video_scale + 8 * 3 * u * video_scale;
  530. int ux = x + 8 * u * video_scale;
  531. int mask;
  532. for (mask = 0x80; mask; mask >>= 1) {
  533. if (mask & avpriv_vga16_font[str[u] * 16 + v]) {
  534. p[0] = s->fontcolor_value[3*ux];
  535. p[1] = s->fontcolor_value[3*ux+1];
  536. p[2] = s->fontcolor_value[3*ux+2];
  537. if (video_scale == 2) {
  538. p[linesize] = p[0];
  539. p[linesize+1] = p[1];
  540. p[linesize+2] = p[2];
  541. p[3] = p[linesize+3] = s->fontcolor_value[3*ux+3];
  542. p[4] = p[linesize+4] = s->fontcolor_value[3*ux+4];
  543. p[5] = p[linesize+5] = s->fontcolor_value[3*ux+5];
  544. }
  545. }
  546. p += 3 * video_scale;
  547. ux += video_scale;
  548. }
  549. }
  550. }
  551. }
  552. } else {
  553. for (y = 0; y < font_height; y++) {
  554. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  555. uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize;
  556. for (x = 0; x < video_width; x++) {
  557. lineptr[3*x] = spectogram_src[3*x];
  558. lineptr[3*x+1] = spectogram_src[3*x+1];
  559. lineptr[3*x+2] = spectogram_src[3*x+2];
  560. }
  561. }
  562. }
  563. /* drawing spectogram/sonogram */
  564. data += spectogram_start * linesize;
  565. memcpy(data, s->spectogram + s->spectogram_index*linesize, total_length - back_length);
  566. data += total_length - back_length;
  567. if (back_length)
  568. memcpy(data, s->spectogram, back_length);
  569. s->outpicref->pts = s->frame_count;
  570. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  571. s->frame_count++;
  572. }
  573. s->spectogram_count = (s->spectogram_count + 1) % s->count;
  574. s->spectogram_index = (s->spectogram_index + spectogram_height - 1) % spectogram_height;
  575. return ret;
  576. }
  577. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  578. {
  579. AVFilterContext *ctx = inlink->dst;
  580. ShowCQTContext *s = ctx->priv;
  581. int step = inlink->sample_rate / (s->fps * s->count);
  582. int fft_len = 1 << s->fft_bits;
  583. int remaining;
  584. float *audio_data;
  585. if (!insamples) {
  586. while (s->remaining_fill < (fft_len >> 1)) {
  587. int ret, x;
  588. memset(&s->fft_data[fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
  589. ret = plot_cqt(inlink);
  590. if (ret < 0)
  591. return ret;
  592. for (x = 0; x < (fft_len-step); x++)
  593. s->fft_data[x] = s->fft_data[x+step];
  594. s->remaining_fill += step;
  595. }
  596. return AVERROR_EOF;
  597. }
  598. remaining = insamples->nb_samples;
  599. audio_data = (float*) insamples->data[0];
  600. while (remaining) {
  601. if (remaining >= s->remaining_fill) {
  602. int i = insamples->nb_samples - remaining;
  603. int j = fft_len - s->remaining_fill;
  604. int m, ret;
  605. for (m = 0; m < s->remaining_fill; m++) {
  606. s->fft_data[j+m].re = audio_data[2*(i+m)];
  607. s->fft_data[j+m].im = audio_data[2*(i+m)+1];
  608. }
  609. ret = plot_cqt(inlink);
  610. if (ret < 0) {
  611. av_frame_free(&insamples);
  612. return ret;
  613. }
  614. remaining -= s->remaining_fill;
  615. for (m = 0; m < fft_len-step; m++)
  616. s->fft_data[m] = s->fft_data[m+step];
  617. s->remaining_fill = step;
  618. } else {
  619. int i = insamples->nb_samples - remaining;
  620. int j = fft_len - s->remaining_fill;
  621. int m;
  622. for (m = 0; m < remaining; m++) {
  623. s->fft_data[m+j].re = audio_data[2*(i+m)];
  624. s->fft_data[m+j].im = audio_data[2*(i+m)+1];
  625. }
  626. s->remaining_fill -= remaining;
  627. remaining = 0;
  628. }
  629. }
  630. av_frame_free(&insamples);
  631. return 0;
  632. }
  633. static int request_frame(AVFilterLink *outlink)
  634. {
  635. ShowCQTContext *s = outlink->src->priv;
  636. AVFilterLink *inlink = outlink->src->inputs[0];
  637. int ret;
  638. ret = ff_request_frame(inlink);
  639. if (ret == AVERROR_EOF && s->outpicref)
  640. filter_frame(inlink, NULL);
  641. return ret;
  642. }
  643. static const AVFilterPad showcqt_inputs[] = {
  644. {
  645. .name = "default",
  646. .type = AVMEDIA_TYPE_AUDIO,
  647. .filter_frame = filter_frame,
  648. },
  649. { NULL }
  650. };
  651. static const AVFilterPad showcqt_outputs[] = {
  652. {
  653. .name = "default",
  654. .type = AVMEDIA_TYPE_VIDEO,
  655. .config_props = config_output,
  656. .request_frame = request_frame,
  657. },
  658. { NULL }
  659. };
  660. AVFilter ff_avf_showcqt = {
  661. .name = "showcqt",
  662. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant Q Transform) spectrum video output."),
  663. .uninit = uninit,
  664. .query_formats = query_formats,
  665. .priv_size = sizeof(ShowCQTContext),
  666. .inputs = showcqt_inputs,
  667. .outputs = showcqt_outputs,
  668. .priv_class = &showcqt_class,
  669. };