You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

697 lines
25KB

  1. /*
  2. * Copyright (c) 2014 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "config.h"
  21. #include "libavcodec/avfft.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/channel_layout.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/xga_font_data.h"
  26. #include "libavutil/qsort.h"
  27. #include "libavutil/time.h"
  28. #include "avfilter.h"
  29. #include "internal.h"
  30. #include <math.h>
  31. #include <stdlib.h>
  32. #if CONFIG_LIBFREETYPE
  33. #include <ft2build.h>
  34. #include FT_FREETYPE_H
  35. #endif
  36. /* this filter is designed to do 16 bins/semitones constant Q transform with Brown-Puckette algorithm
  37. * start from E0 to D#10 (10 octaves)
  38. * so there are 16 bins/semitones * 12 semitones/octaves * 10 octaves = 1920 bins
  39. * match with full HD resolution */
  40. #define VIDEO_WIDTH 1920
  41. #define VIDEO_HEIGHT 1080
  42. #define FONT_HEIGHT 32
  43. #define SPECTOGRAM_HEIGHT ((VIDEO_HEIGHT-FONT_HEIGHT)/2)
  44. #define SPECTOGRAM_START (VIDEO_HEIGHT-SPECTOGRAM_HEIGHT)
  45. #define BASE_FREQ 20.051392800492
  46. #define COEFF_CLAMP 1.0e-4
  47. typedef struct {
  48. FFTSample value;
  49. int index;
  50. } SparseCoeff;
  51. typedef struct {
  52. const AVClass *class;
  53. AVFrame *outpicref;
  54. FFTContext *fft_context;
  55. FFTComplex *fft_data;
  56. FFTComplex *fft_result_left;
  57. FFTComplex *fft_result_right;
  58. uint8_t *spectogram;
  59. SparseCoeff *coeff_sort;
  60. SparseCoeff *coeffs[VIDEO_WIDTH];
  61. uint8_t *font_alpha;
  62. char *fontfile; /* using freetype */
  63. int coeffs_len[VIDEO_WIDTH];
  64. uint8_t font_color[VIDEO_WIDTH];
  65. int64_t frame_count;
  66. int spectogram_count;
  67. int spectogram_index;
  68. int fft_bits;
  69. int req_fullfilled;
  70. int remaining_fill;
  71. double volume;
  72. double timeclamp; /* lower timeclamp, time-accurate, higher timeclamp, freq-accurate (at low freq)*/
  73. float coeffclamp; /* lower coeffclamp, more precise, higher coeffclamp, faster */
  74. int fullhd; /* if true, output video is at full HD resolution, otherwise it will be halved */
  75. float gamma; /* lower gamma, more contrast, higher gamma, more range */
  76. int fps; /* the required fps is so strict, so it's enough to be int, but 24000/1001 etc cannot be encoded */
  77. int count; /* fps * count = transform rate */
  78. } ShowCQTContext;
  79. #define OFFSET(x) offsetof(ShowCQTContext, x)
  80. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  81. static const AVOption showcqt_options[] = {
  82. { "volume", "set volume", OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 16 }, 0.1, 100, FLAGS },
  83. { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
  84. { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 0.1, 10, FLAGS },
  85. { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 3 }, 1, 7, FLAGS },
  86. { "fullhd", "set full HD resolution", OFFSET(fullhd), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS },
  87. { "fps", "set video fps", OFFSET(fps), AV_OPT_TYPE_INT, { .i64 = 25 }, 10, 100, FLAGS },
  88. { "count", "set number of transform per frame", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
  89. { "fontfile", "set font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
  90. { NULL }
  91. };
  92. AVFILTER_DEFINE_CLASS(showcqt);
  93. static av_cold void uninit(AVFilterContext *ctx)
  94. {
  95. int k;
  96. ShowCQTContext *s = ctx->priv;
  97. av_fft_end(s->fft_context);
  98. s->fft_context = NULL;
  99. for (k = 0; k < VIDEO_WIDTH; k++)
  100. av_freep(&s->coeffs[k]);
  101. av_freep(&s->fft_data);
  102. av_freep(&s->fft_result_left);
  103. av_freep(&s->fft_result_right);
  104. av_freep(&s->coeff_sort);
  105. av_freep(&s->spectogram);
  106. av_freep(&s->font_alpha);
  107. av_frame_free(&s->outpicref);
  108. }
  109. static int query_formats(AVFilterContext *ctx)
  110. {
  111. AVFilterFormats *formats = NULL;
  112. AVFilterChannelLayouts *layouts = NULL;
  113. AVFilterLink *inlink = ctx->inputs[0];
  114. AVFilterLink *outlink = ctx->outputs[0];
  115. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
  116. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
  117. static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
  118. static const int samplerates[] = { 44100, 48000, -1 };
  119. /* set input audio formats */
  120. formats = ff_make_format_list(sample_fmts);
  121. if (!formats)
  122. return AVERROR(ENOMEM);
  123. ff_formats_ref(formats, &inlink->out_formats);
  124. layouts = avfilter_make_format64_list(channel_layouts);
  125. if (!layouts)
  126. return AVERROR(ENOMEM);
  127. ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  128. formats = ff_make_format_list(samplerates);
  129. if (!formats)
  130. return AVERROR(ENOMEM);
  131. ff_formats_ref(formats, &inlink->out_samplerates);
  132. /* set output video format */
  133. formats = ff_make_format_list(pix_fmts);
  134. if (!formats)
  135. return AVERROR(ENOMEM);
  136. ff_formats_ref(formats, &outlink->in_formats);
  137. return 0;
  138. }
  139. #if CONFIG_LIBFREETYPE
  140. static void load_freetype_font(AVFilterContext *ctx)
  141. {
  142. static const char str[] = "EF G A BC D ";
  143. ShowCQTContext *s = ctx->priv;
  144. FT_Library lib = NULL;
  145. FT_Face face = NULL;
  146. int video_scale = s->fullhd ? 2 : 1;
  147. int video_width = (VIDEO_WIDTH/2) * video_scale;
  148. int font_height = (FONT_HEIGHT/2) * video_scale;
  149. int font_width = 8 * video_scale;
  150. int font_repeat = font_width * 12;
  151. int linear_hori_advance = font_width * 65536;
  152. int non_monospace_warning = 0;
  153. int x;
  154. s->font_alpha = NULL;
  155. if (!s->fontfile)
  156. return;
  157. if (FT_Init_FreeType(&lib))
  158. goto fail;
  159. if (FT_New_Face(lib, s->fontfile, 0, &face))
  160. goto fail;
  161. if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
  162. goto fail;
  163. if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
  164. goto fail;
  165. if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
  166. goto fail;
  167. s->font_alpha = av_malloc(font_height * video_width);
  168. if (!s->font_alpha)
  169. goto fail;
  170. memset(s->font_alpha, 0, font_height * video_width);
  171. for (x = 0; x < 12; x++) {
  172. int sx, sy, rx, bx, by, dx, dy;
  173. if (str[x] == ' ')
  174. continue;
  175. if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
  176. goto fail;
  177. if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
  178. av_log(ctx, AV_LOG_WARNING, "Font is not monospace\n");
  179. non_monospace_warning = 1;
  180. }
  181. sy = font_height - 4*video_scale - face->glyph->bitmap_top;
  182. for (rx = 0; rx < 10; rx++) {
  183. sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
  184. for (by = 0; by < face->glyph->bitmap.rows; by++) {
  185. dy = by + sy;
  186. if (dy < 0)
  187. continue;
  188. if (dy >= font_height)
  189. break;
  190. for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
  191. dx = bx + sx;
  192. if (dx < 0)
  193. continue;
  194. if (dx >= video_width)
  195. break;
  196. s->font_alpha[dy*video_width+dx] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
  197. }
  198. }
  199. }
  200. }
  201. FT_Done_Face(face);
  202. FT_Done_FreeType(lib);
  203. return;
  204. fail:
  205. av_log(ctx, AV_LOG_WARNING, "Error while loading freetype font, using default font instead\n");
  206. FT_Done_Face(face);
  207. FT_Done_FreeType(lib);
  208. av_freep(&s->font_alpha);
  209. return;
  210. }
  211. #endif
  212. static inline int qsort_sparsecoeff(const SparseCoeff *a, const SparseCoeff *b)
  213. {
  214. if (fabsf(a->value) >= fabsf(b->value))
  215. return 1;
  216. else
  217. return -1;
  218. }
  219. static int config_output(AVFilterLink *outlink)
  220. {
  221. AVFilterContext *ctx = outlink->src;
  222. AVFilterLink *inlink = ctx->inputs[0];
  223. ShowCQTContext *s = ctx->priv;
  224. int fft_len, k, x, y;
  225. int num_coeffs = 0;
  226. int rate = inlink->sample_rate;
  227. double max_len = rate * (double) s->timeclamp;
  228. int64_t start_time, end_time;
  229. int video_scale = s->fullhd ? 2 : 1;
  230. int video_width = (VIDEO_WIDTH/2) * video_scale;
  231. int video_height = (VIDEO_HEIGHT/2) * video_scale;
  232. int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  233. s->fft_bits = ceil(log2(max_len));
  234. fft_len = 1 << s->fft_bits;
  235. if (rate % (s->fps * s->count)) {
  236. av_log(ctx, AV_LOG_ERROR, "Rate (%u) is not divisible by fps*count (%u*%u)\n", rate, s->fps, s->count);
  237. return AVERROR(EINVAL);
  238. }
  239. s->fft_data = av_malloc_array(fft_len, sizeof(*s->fft_data));
  240. s->coeff_sort = av_malloc_array(fft_len, sizeof(*s->coeff_sort));
  241. s->fft_result_left = av_malloc_array(fft_len, sizeof(*s->fft_result_left));
  242. s->fft_result_right = av_malloc_array(fft_len, sizeof(*s->fft_result_right));
  243. s->fft_context = av_fft_init(s->fft_bits, 0);
  244. if (!s->fft_data || !s->coeff_sort || !s->fft_result_left || !s->fft_result_right || !s->fft_context)
  245. return AVERROR(ENOMEM);
  246. /* initializing font */
  247. for (x = 0; x < video_width; x++) {
  248. if (x >= (12*3+8)*8*video_scale && x < (12*4+8)*8*video_scale) {
  249. float fx = (x-(12*3+8)*8*video_scale) * (2.0f/(192.0f*video_scale));
  250. float sv = sinf(M_PI*fx);
  251. s->font_color[x] = sv*sv*255.0f + 0.5f;
  252. } else {
  253. s->font_color[x] = 0;
  254. }
  255. }
  256. #if CONFIG_LIBFREETYPE
  257. load_freetype_font(ctx);
  258. #else
  259. if (s->fontfile)
  260. av_log(ctx, AV_LOG_WARNING, "Freetype is not available, ignoring fontfile option\n");
  261. s->font_alpha = NULL;
  262. #endif
  263. av_log(ctx, AV_LOG_INFO, "Calculating spectral kernel, please wait\n");
  264. start_time = av_gettime_relative();
  265. for (k = 0; k < VIDEO_WIDTH; k++) {
  266. int hlen = fft_len >> 1;
  267. float total = 0;
  268. float partial = 0;
  269. double freq = BASE_FREQ * exp2(k * (1.0/192.0));
  270. double tlen = rate * (24.0 * 16.0) /freq;
  271. /* a window function from Albert H. Nuttall,
  272. * "Some Windows with Very Good Sidelobe Behavior"
  273. * -93.32 dB peak sidelobe and 18 dB/octave asymptotic decay
  274. * coefficient normalized to a0 = 1 */
  275. double a0 = 0.355768;
  276. double a1 = 0.487396/a0;
  277. double a2 = 0.144232/a0;
  278. double a3 = 0.012604/a0;
  279. double sv_step, cv_step, sv, cv;
  280. double sw_step, cw_step, sw, cw, w;
  281. tlen = tlen * max_len / (tlen + max_len);
  282. s->fft_data[0].re = 0;
  283. s->fft_data[0].im = 0;
  284. s->fft_data[hlen].re = (1.0 + a1 + a2 + a3) * (1.0/tlen) * s->volume * (1.0/fft_len);
  285. s->fft_data[hlen].im = 0;
  286. sv_step = sv = sin(2.0*M_PI*freq*(1.0/rate));
  287. cv_step = cv = cos(2.0*M_PI*freq*(1.0/rate));
  288. /* also optimizing window func */
  289. sw_step = sw = sin(2.0*M_PI*(1.0/tlen));
  290. cw_step = cw = cos(2.0*M_PI*(1.0/tlen));
  291. for (x = 1; x < 0.5 * tlen; x++) {
  292. double cv_tmp, cw_tmp;
  293. double cw2, cw3, sw2;
  294. cw2 = cw * cw - sw * sw;
  295. sw2 = cw * sw + sw * cw;
  296. cw3 = cw * cw2 - sw * sw2;
  297. w = (1.0 + a1 * cw + a2 * cw2 + a3 * cw3) * (1.0/tlen) * s->volume * (1.0/fft_len);
  298. s->fft_data[hlen + x].re = w * cv;
  299. s->fft_data[hlen + x].im = w * sv;
  300. s->fft_data[hlen - x].re = s->fft_data[hlen + x].re;
  301. s->fft_data[hlen - x].im = -s->fft_data[hlen + x].im;
  302. cv_tmp = cv * cv_step - sv * sv_step;
  303. sv = sv * cv_step + cv * sv_step;
  304. cv = cv_tmp;
  305. cw_tmp = cw * cw_step - sw * sw_step;
  306. sw = sw * cw_step + cw * sw_step;
  307. cw = cw_tmp;
  308. }
  309. for (; x < hlen; x++) {
  310. s->fft_data[hlen + x].re = 0;
  311. s->fft_data[hlen + x].im = 0;
  312. s->fft_data[hlen - x].re = 0;
  313. s->fft_data[hlen - x].im = 0;
  314. }
  315. av_fft_permute(s->fft_context, s->fft_data);
  316. av_fft_calc(s->fft_context, s->fft_data);
  317. for (x = 0; x < fft_len; x++) {
  318. s->coeff_sort[x].index = x;
  319. s->coeff_sort[x].value = s->fft_data[x].re;
  320. }
  321. AV_QSORT(s->coeff_sort, fft_len, SparseCoeff, qsort_sparsecoeff);
  322. for (x = 0; x < fft_len; x++)
  323. total += fabsf(s->coeff_sort[x].value);
  324. for (x = 0; x < fft_len; x++) {
  325. partial += fabsf(s->coeff_sort[x].value);
  326. if (partial > total * s->coeffclamp * COEFF_CLAMP) {
  327. s->coeffs_len[k] = fft_len - x;
  328. num_coeffs += s->coeffs_len[k];
  329. s->coeffs[k] = av_malloc_array(s->coeffs_len[k], sizeof(*s->coeffs[k]));
  330. if (!s->coeffs[k])
  331. return AVERROR(ENOMEM);
  332. for (y = 0; y < s->coeffs_len[k]; y++)
  333. s->coeffs[k][y] = s->coeff_sort[x+y];
  334. break;
  335. }
  336. }
  337. }
  338. end_time = av_gettime_relative();
  339. av_log(ctx, AV_LOG_INFO, "Elapsed time %.6f s (fft_len=%u, num_coeffs=%u)\n", 1e-6 * (end_time-start_time), fft_len, num_coeffs);
  340. outlink->w = video_width;
  341. outlink->h = video_height;
  342. s->req_fullfilled = 0;
  343. s->spectogram_index = 0;
  344. s->frame_count = 0;
  345. s->spectogram_count = 0;
  346. s->remaining_fill = fft_len >> 1;
  347. memset(s->fft_data, 0, fft_len * sizeof(*s->fft_data));
  348. s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  349. if (!s->outpicref)
  350. return AVERROR(ENOMEM);
  351. s->spectogram = av_calloc(spectogram_height, s->outpicref->linesize[0]);
  352. if (!s->spectogram)
  353. return AVERROR(ENOMEM);
  354. outlink->sample_aspect_ratio = av_make_q(1, 1);
  355. outlink->time_base = av_make_q(1, s->fps);
  356. outlink->frame_rate = av_make_q(s->fps, 1);
  357. return 0;
  358. }
  359. static int plot_cqt(AVFilterLink *inlink)
  360. {
  361. AVFilterContext *ctx = inlink->dst;
  362. ShowCQTContext *s = ctx->priv;
  363. AVFilterLink *outlink = ctx->outputs[0];
  364. int fft_len = 1 << s->fft_bits;
  365. FFTSample result[VIDEO_WIDTH][4];
  366. int x, y, ret = 0;
  367. int linesize = s->outpicref->linesize[0];
  368. int video_scale = s->fullhd ? 2 : 1;
  369. int video_width = (VIDEO_WIDTH/2) * video_scale;
  370. int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale;
  371. int spectogram_start = (SPECTOGRAM_START/2) * video_scale;
  372. int font_height = (FONT_HEIGHT/2) * video_scale;
  373. /* real part contains left samples, imaginary part contains right samples */
  374. memcpy(s->fft_result_left, s->fft_data, fft_len * sizeof(*s->fft_data));
  375. av_fft_permute(s->fft_context, s->fft_result_left);
  376. av_fft_calc(s->fft_context, s->fft_result_left);
  377. /* separate left and right, (and multiply by 2.0) */
  378. s->fft_result_right[0].re = 2.0f * s->fft_result_left[0].im;
  379. s->fft_result_right[0].im = 0;
  380. s->fft_result_left[0].re = 2.0f * s->fft_result_left[0].re;
  381. s->fft_result_left[0].im = 0;
  382. for (x = 1; x <= fft_len >> 1; x++) {
  383. FFTSample tmpy = s->fft_result_left[fft_len-x].im - s->fft_result_left[x].im;
  384. s->fft_result_right[x].re = s->fft_result_left[x].im + s->fft_result_left[fft_len-x].im;
  385. s->fft_result_right[x].im = s->fft_result_left[x].re - s->fft_result_left[fft_len-x].re;
  386. s->fft_result_right[fft_len-x].re = s->fft_result_right[x].re;
  387. s->fft_result_right[fft_len-x].im = -s->fft_result_right[x].im;
  388. s->fft_result_left[x].re = s->fft_result_left[x].re + s->fft_result_left[fft_len-x].re;
  389. s->fft_result_left[x].im = tmpy;
  390. s->fft_result_left[fft_len-x].re = s->fft_result_left[x].re;
  391. s->fft_result_left[fft_len-x].im = -s->fft_result_left[x].im;
  392. }
  393. /* calculating cqt */
  394. for (x = 0; x < VIDEO_WIDTH; x++) {
  395. int u;
  396. float g = 1.0f / s->gamma;
  397. FFTComplex l = {0,0};
  398. FFTComplex r = {0,0};
  399. for (u = 0; u < s->coeffs_len[x]; u++) {
  400. FFTSample value = s->coeffs[x][u].value;
  401. int index = s->coeffs[x][u].index;
  402. l.re += value * s->fft_result_left[index].re;
  403. l.im += value * s->fft_result_left[index].im;
  404. r.re += value * s->fft_result_right[index].re;
  405. r.im += value * s->fft_result_right[index].im;
  406. }
  407. /* result is power, not amplitude */
  408. result[x][0] = l.re * l.re + l.im * l.im;
  409. result[x][2] = r.re * r.re + r.im * r.im;
  410. result[x][1] = 0.5f * (result[x][0] + result[x][2]);
  411. result[x][3] = result[x][1];
  412. result[x][0] = 255.0f * powf(FFMIN(1.0f,result[x][0]), g);
  413. result[x][1] = 255.0f * powf(FFMIN(1.0f,result[x][1]), g);
  414. result[x][2] = 255.0f * powf(FFMIN(1.0f,result[x][2]), g);
  415. }
  416. if (!s->fullhd) {
  417. for (x = 0; x < video_width; x++) {
  418. result[x][0] = 0.5f * (result[2*x][0] + result[2*x+1][0]);
  419. result[x][1] = 0.5f * (result[2*x][1] + result[2*x+1][1]);
  420. result[x][2] = 0.5f * (result[2*x][2] + result[2*x+1][2]);
  421. result[x][3] = 0.5f * (result[2*x][3] + result[2*x+1][3]);
  422. }
  423. }
  424. for (x = 0; x < video_width; x++) {
  425. s->spectogram[s->spectogram_index*linesize + 3*x] = result[x][0] + 0.5f;
  426. s->spectogram[s->spectogram_index*linesize + 3*x + 1] = result[x][1] + 0.5f;
  427. s->spectogram[s->spectogram_index*linesize + 3*x + 2] = result[x][2] + 0.5f;
  428. }
  429. /* drawing */
  430. if (!s->spectogram_count) {
  431. uint8_t *data = (uint8_t*) s->outpicref->data[0];
  432. float rcp_result[VIDEO_WIDTH];
  433. int total_length = linesize * spectogram_height;
  434. int back_length = linesize * s->spectogram_index;
  435. for (x = 0; x < video_width; x++)
  436. rcp_result[x] = 1.0f / (result[x][3]+0.0001f);
  437. /* drawing bar */
  438. for (y = 0; y < spectogram_height; y++) {
  439. float height = (spectogram_height - y) * (1.0f/spectogram_height);
  440. uint8_t *lineptr = data + y * linesize;
  441. for (x = 0; x < video_width; x++) {
  442. float mul;
  443. if (result[x][3] <= height) {
  444. *lineptr++ = 0;
  445. *lineptr++ = 0;
  446. *lineptr++ = 0;
  447. } else {
  448. mul = (result[x][3] - height) * rcp_result[x];
  449. *lineptr++ = mul * result[x][0] + 0.5f;
  450. *lineptr++ = mul * result[x][1] + 0.5f;
  451. *lineptr++ = mul * result[x][2] + 0.5f;
  452. }
  453. }
  454. }
  455. /* drawing font */
  456. if (s->font_alpha) {
  457. for (y = 0; y < font_height; y++) {
  458. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  459. uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize;
  460. for (x = 0; x < video_width; x++) {
  461. uint8_t alpha = s->font_alpha[y*video_width+x];
  462. uint8_t color = s->font_color[x];
  463. lineptr[3*x] = (spectogram_src[3*x] * (255-alpha) + (255-color) * alpha + 255) >> 8;
  464. lineptr[3*x+1] = (spectogram_src[3*x+1] * (255-alpha) + 255) >> 8;
  465. lineptr[3*x+2] = (spectogram_src[3*x+2] * (255-alpha) + color * alpha + 255) >> 8;
  466. }
  467. }
  468. } else {
  469. for (y = 0; y < font_height; y++) {
  470. uint8_t *lineptr = data + (spectogram_height + y) * linesize;
  471. memcpy(lineptr, s->spectogram + s->spectogram_index * linesize, video_width*3);
  472. }
  473. for (x = 0; x < video_width; x += video_width/10) {
  474. int u;
  475. static const char str[] = "EF G A BC D ";
  476. uint8_t *startptr = data + spectogram_height * linesize + x * 3;
  477. for (u = 0; str[u]; u++) {
  478. int v;
  479. for (v = 0; v < 16; v++) {
  480. uint8_t *p = startptr + v * linesize * video_scale + 8 * 3 * u * video_scale;
  481. int ux = x + 8 * u * video_scale;
  482. int mask;
  483. for (mask = 0x80; mask; mask >>= 1) {
  484. if (mask & avpriv_vga16_font[str[u] * 16 + v]) {
  485. p[0] = 255 - s->font_color[ux];
  486. p[1] = 0;
  487. p[2] = s->font_color[ux];
  488. if (video_scale == 2) {
  489. p[linesize] = p[0];
  490. p[linesize+1] = p[1];
  491. p[linesize+2] = p[2];
  492. p[3] = p[linesize+3] = 255 - s->font_color[ux+1];
  493. p[4] = p[linesize+4] = 0;
  494. p[5] = p[linesize+5] = s->font_color[ux+1];
  495. }
  496. }
  497. p += 3 * video_scale;
  498. ux += video_scale;
  499. }
  500. }
  501. }
  502. }
  503. }
  504. /* drawing spectogram/sonogram */
  505. data += spectogram_start * linesize;
  506. memcpy(data, s->spectogram + s->spectogram_index*linesize, total_length - back_length);
  507. data += total_length - back_length;
  508. if (back_length)
  509. memcpy(data, s->spectogram, back_length);
  510. s->outpicref->pts = s->frame_count;
  511. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  512. s->req_fullfilled = 1;
  513. s->frame_count++;
  514. }
  515. s->spectogram_count = (s->spectogram_count + 1) % s->count;
  516. s->spectogram_index = (s->spectogram_index + spectogram_height - 1) % spectogram_height;
  517. return ret;
  518. }
  519. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  520. {
  521. AVFilterContext *ctx = inlink->dst;
  522. ShowCQTContext *s = ctx->priv;
  523. int step = inlink->sample_rate / (s->fps * s->count);
  524. int fft_len = 1 << s->fft_bits;
  525. int remaining;
  526. float *audio_data;
  527. if (!insamples) {
  528. while (s->remaining_fill < (fft_len >> 1)) {
  529. int ret, x;
  530. memset(&s->fft_data[fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
  531. ret = plot_cqt(inlink);
  532. if (ret < 0)
  533. return ret;
  534. for (x = 0; x < (fft_len-step); x++)
  535. s->fft_data[x] = s->fft_data[x+step];
  536. s->remaining_fill += step;
  537. }
  538. return AVERROR(EOF);
  539. }
  540. remaining = insamples->nb_samples;
  541. audio_data = (float*) insamples->data[0];
  542. while (remaining) {
  543. if (remaining >= s->remaining_fill) {
  544. int i = insamples->nb_samples - remaining;
  545. int j = fft_len - s->remaining_fill;
  546. int m, ret;
  547. for (m = 0; m < s->remaining_fill; m++) {
  548. s->fft_data[j+m].re = audio_data[2*(i+m)];
  549. s->fft_data[j+m].im = audio_data[2*(i+m)+1];
  550. }
  551. ret = plot_cqt(inlink);
  552. if (ret < 0) {
  553. av_frame_free(&insamples);
  554. return ret;
  555. }
  556. remaining -= s->remaining_fill;
  557. for (m = 0; m < fft_len-step; m++)
  558. s->fft_data[m] = s->fft_data[m+step];
  559. s->remaining_fill = step;
  560. } else {
  561. int i = insamples->nb_samples - remaining;
  562. int j = fft_len - s->remaining_fill;
  563. int m;
  564. for (m = 0; m < remaining; m++) {
  565. s->fft_data[m+j].re = audio_data[2*(i+m)];
  566. s->fft_data[m+j].im = audio_data[2*(i+m)+1];
  567. }
  568. s->remaining_fill -= remaining;
  569. remaining = 0;
  570. }
  571. }
  572. av_frame_free(&insamples);
  573. return 0;
  574. }
  575. static int request_frame(AVFilterLink *outlink)
  576. {
  577. ShowCQTContext *s = outlink->src->priv;
  578. AVFilterLink *inlink = outlink->src->inputs[0];
  579. int ret;
  580. s->req_fullfilled = 0;
  581. do {
  582. ret = ff_request_frame(inlink);
  583. } while (!s->req_fullfilled && ret >= 0);
  584. if (ret == AVERROR_EOF && s->outpicref)
  585. filter_frame(inlink, NULL);
  586. return ret;
  587. }
  588. static const AVFilterPad showcqt_inputs[] = {
  589. {
  590. .name = "default",
  591. .type = AVMEDIA_TYPE_AUDIO,
  592. .filter_frame = filter_frame,
  593. },
  594. { NULL }
  595. };
  596. static const AVFilterPad showcqt_outputs[] = {
  597. {
  598. .name = "default",
  599. .type = AVMEDIA_TYPE_VIDEO,
  600. .config_props = config_output,
  601. .request_frame = request_frame,
  602. },
  603. { NULL }
  604. };
  605. AVFilter ff_avf_showcqt = {
  606. .name = "showcqt",
  607. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant Q Transform) spectrum video output."),
  608. .uninit = uninit,
  609. .query_formats = query_formats,
  610. .priv_size = sizeof(ShowCQTContext),
  611. .inputs = showcqt_inputs,
  612. .outputs = showcqt_outputs,
  613. .priv_class = &showcqt_class,
  614. };