You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

586 lines
20KB

  1. /*
  2. * Copyright (c) 2014 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavcodec/avfft.h"
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/channel_layout.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/xga_font_data.h"
  25. #include "libavutil/qsort.h"
  26. #include "libavutil/time.h"
  27. #include "avfilter.h"
  28. #include "internal.h"
  29. #include <math.h>
  30. #include <stdlib.h>
  31. /* this filter is designed to do 16 bins/semitones constant Q transform with Brown-Puckette algorithm
  32. * start from E0 to D#10 (10 octaves)
  33. * so there are 16 bins/semitones * 12 semitones/octaves * 10 octaves = 1920 bins
  34. * match with full HD resolution */
  35. #define VIDEO_WIDTH 1920
  36. #define VIDEO_HEIGHT 1080
  37. #define FONT_HEIGHT 32
  38. #define SPECTOGRAM_HEIGHT ((VIDEO_HEIGHT-FONT_HEIGHT)/2)
  39. #define SPECTOGRAM_START (VIDEO_HEIGHT-SPECTOGRAM_HEIGHT)
  40. #define BASE_FREQ 20.051392800492
  41. #define COEFF_CLAMP 1.0e-4
  42. typedef struct {
  43. FFTSample value;
  44. int index;
  45. } SparseCoeff;
  46. static inline int qsort_sparsecoeff(const SparseCoeff *a, const SparseCoeff *b)
  47. {
  48. if (fabsf(a->value) >= fabsf(b->value))
  49. return 1;
  50. else
  51. return -1;
  52. }
  53. typedef struct {
  54. const AVClass *class;
  55. AVFrame *outpicref;
  56. FFTContext *fft_context;
  57. FFTComplex *fft_data;
  58. FFTComplex *fft_result_left;
  59. FFTComplex *fft_result_right;
  60. SparseCoeff *coeff_sort;
  61. SparseCoeff *coeffs[VIDEO_WIDTH];
  62. int coeffs_len[VIDEO_WIDTH];
  63. uint8_t font_color[VIDEO_WIDTH];
  64. uint8_t spectogram[SPECTOGRAM_HEIGHT][VIDEO_WIDTH][3];
  65. int64_t frame_count;
  66. int spectogram_count;
  67. int spectogram_index;
  68. int fft_bits;
  69. int req_fullfilled;
  70. int remaining_fill;
  71. double volume;
  72. double timeclamp; /* lower timeclamp, time-accurate, higher timeclamp, freq-accurate (at low freq)*/
  73. float coeffclamp; /* lower coeffclamp, more precise, higher coeffclamp, faster */
  74. float gamma; /* lower gamma, more contrast, higher gamma, more range */
  75. int fps; /* the required fps is so strict, so it's enough to be int, but 24000/1001 etc cannot be encoded */
  76. int count; /* fps * count = transform rate */
  77. } ShowCQTContext;
  78. #define OFFSET(x) offsetof(ShowCQTContext, x)
  79. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  80. static const AVOption showcqt_options[] = {
  81. { "volume", "set volume", OFFSET(volume), AV_OPT_TYPE_DOUBLE, { .dbl = 16 }, 0.1, 100, FLAGS },
  82. { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
  83. { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1 }, 0.1, 10, FLAGS },
  84. { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 3 }, 1, 7, FLAGS },
  85. { "fps", "set video fps", OFFSET(fps), AV_OPT_TYPE_INT, { .i64 = 25 }, 10, 100, FLAGS },
  86. { "count", "set number of transform per frame", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
  87. { NULL }
  88. };
  89. AVFILTER_DEFINE_CLASS(showcqt);
  90. static av_cold void uninit(AVFilterContext *ctx)
  91. {
  92. int k;
  93. ShowCQTContext *s = ctx->priv;
  94. av_fft_end(s->fft_context);
  95. s->fft_context = NULL;
  96. for (k = 0; k < VIDEO_WIDTH; k++)
  97. av_freep(&s->coeffs[k]);
  98. av_freep(&s->fft_data);
  99. av_freep(&s->fft_result_left);
  100. av_freep(&s->fft_result_right);
  101. av_freep(&s->coeff_sort);
  102. av_frame_free(&s->outpicref);
  103. }
  104. static int query_formats(AVFilterContext *ctx)
  105. {
  106. AVFilterFormats *formats = NULL;
  107. AVFilterChannelLayouts *layouts = NULL;
  108. AVFilterLink *inlink = ctx->inputs[0];
  109. AVFilterLink *outlink = ctx->outputs[0];
  110. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
  111. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE };
  112. static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
  113. static const int samplerates[] = { 44100, 48000, -1 };
  114. /* set input audio formats */
  115. formats = ff_make_format_list(sample_fmts);
  116. if (!formats)
  117. return AVERROR(ENOMEM);
  118. ff_formats_ref(formats, &inlink->out_formats);
  119. layouts = avfilter_make_format64_list(channel_layouts);
  120. if (!layouts)
  121. return AVERROR(ENOMEM);
  122. ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  123. formats = ff_make_format_list(samplerates);
  124. if (!formats)
  125. return AVERROR(ENOMEM);
  126. ff_formats_ref(formats, &inlink->out_samplerates);
  127. /* set output video format */
  128. formats = ff_make_format_list(pix_fmts);
  129. if (!formats)
  130. return AVERROR(ENOMEM);
  131. ff_formats_ref(formats, &outlink->in_formats);
  132. return 0;
  133. }
  134. static int config_output(AVFilterLink *outlink)
  135. {
  136. AVFilterContext *ctx = outlink->src;
  137. AVFilterLink *inlink = ctx->inputs[0];
  138. ShowCQTContext *s = ctx->priv;
  139. int fft_len, k, x, y;
  140. int num_coeffs = 0;
  141. int rate = inlink->sample_rate;
  142. double max_len = rate * (double) s->timeclamp;
  143. int64_t start_time, end_time;
  144. s->fft_bits = ceil(log2(max_len));
  145. fft_len = 1 << s->fft_bits;
  146. if (rate % (s->fps * s->count))
  147. {
  148. av_log(ctx, AV_LOG_ERROR, "Rate (%u) is not divisible by fps*count (%u*%u)\n", rate, s->fps, s->count);
  149. return AVERROR(EINVAL);
  150. }
  151. s->fft_data = av_malloc_array(fft_len, sizeof(*s->fft_data));
  152. s->coeff_sort = av_malloc_array(fft_len, sizeof(*s->coeff_sort));
  153. s->fft_result_left = av_malloc_array(fft_len, sizeof(*s->fft_result_left));
  154. s->fft_result_right = av_malloc_array(fft_len, sizeof(*s->fft_result_right));
  155. s->fft_context = av_fft_init(s->fft_bits, 0);
  156. if (!s->fft_data || !s->coeff_sort || !s->fft_result_left || !s->fft_result_right || !s->fft_context)
  157. return AVERROR(ENOMEM);
  158. /* initializing font */
  159. for (x = 0; x < VIDEO_WIDTH; x++)
  160. {
  161. if (x >= (12*3+8)*16 && x < (12*4+8)*16)
  162. {
  163. float fx = (x-(12*3+8)*16) * (1.0f/192.0f);
  164. float sv = sinf(M_PI*fx);
  165. s->font_color[x] = sv*sv*255.0f + 0.5f;
  166. }
  167. else
  168. s->font_color[x] = 0;
  169. }
  170. av_log(ctx, AV_LOG_INFO, "Calculating spectral kernel, please wait\n");
  171. start_time = av_gettime_relative();
  172. for (k = 0; k < VIDEO_WIDTH; k++)
  173. {
  174. int hlen = fft_len >> 1;
  175. float total = 0;
  176. float partial = 0;
  177. double freq = BASE_FREQ * exp2(k * (1.0/192.0));
  178. double tlen = rate * (24.0 * 16.0) /freq;
  179. /* a window function from Albert H. Nuttall,
  180. * "Some Windows with Very Good Sidelobe Behavior"
  181. * -93.32 dB peak sidelobe and 18 dB/octave asymptotic decay
  182. * coefficient normalized to a0 = 1 */
  183. double a0 = 0.355768;
  184. double a1 = 0.487396/a0;
  185. double a2 = 0.144232/a0;
  186. double a3 = 0.012604/a0;
  187. double sv_step, cv_step, sv, cv;
  188. double sw_step, cw_step, sw, cw, w;
  189. tlen = tlen * max_len / (tlen + max_len);
  190. s->fft_data[0].re = 0;
  191. s->fft_data[0].im = 0;
  192. s->fft_data[hlen].re = (1.0 + a1 + a2 + a3) * (1.0/tlen) * s->volume * (1.0/fft_len);
  193. s->fft_data[hlen].im = 0;
  194. sv_step = sv = sin(2.0*M_PI*freq*(1.0/rate));
  195. cv_step = cv = cos(2.0*M_PI*freq*(1.0/rate));
  196. /* also optimizing window func */
  197. sw_step = sw = sin(2.0*M_PI*(1.0/tlen));
  198. cw_step = cw = cos(2.0*M_PI*(1.0/tlen));
  199. for (x = 1; x < 0.5 * tlen; x++)
  200. {
  201. double cv_tmp, cw_tmp;
  202. double cw2, cw3, sw2;
  203. cw2 = cw * cw - sw * sw;
  204. sw2 = cw * sw + sw * cw;
  205. cw3 = cw * cw2 - sw * sw2;
  206. w = (1.0 + a1 * cw + a2 * cw2 + a3 * cw3) * (1.0/tlen) * s->volume * (1.0/fft_len);
  207. s->fft_data[hlen + x].re = w * cv;
  208. s->fft_data[hlen + x].im = w * sv;
  209. s->fft_data[hlen - x].re = s->fft_data[hlen + x].re;
  210. s->fft_data[hlen - x].im = -s->fft_data[hlen + x].im;
  211. cv_tmp = cv * cv_step - sv * sv_step;
  212. sv = sv * cv_step + cv * sv_step;
  213. cv = cv_tmp;
  214. cw_tmp = cw * cw_step - sw * sw_step;
  215. sw = sw * cw_step + cw * sw_step;
  216. cw = cw_tmp;
  217. }
  218. for (; x < hlen; x++)
  219. {
  220. s->fft_data[hlen + x].re = 0;
  221. s->fft_data[hlen + x].im = 0;
  222. s->fft_data[hlen - x].re = 0;
  223. s->fft_data[hlen - x].im = 0;
  224. }
  225. av_fft_permute(s->fft_context, s->fft_data);
  226. av_fft_calc(s->fft_context, s->fft_data);
  227. for (x = 0; x < fft_len; x++)
  228. {
  229. s->coeff_sort[x].index = x;
  230. s->coeff_sort[x].value = s->fft_data[x].re;
  231. }
  232. AV_QSORT(s->coeff_sort, fft_len, SparseCoeff, qsort_sparsecoeff);
  233. for (x = 0; x < fft_len; x++)
  234. total += fabsf(s->coeff_sort[x].value);
  235. for (x = 0; x < fft_len; x++)
  236. {
  237. partial += fabsf(s->coeff_sort[x].value);
  238. if (partial > (total * s->coeffclamp * COEFF_CLAMP))
  239. {
  240. s->coeffs_len[k] = fft_len - x;
  241. num_coeffs += s->coeffs_len[k];
  242. s->coeffs[k] = av_malloc_array(s->coeffs_len[k], sizeof(*s->coeffs[k]));
  243. if (!s->coeffs[k])
  244. return AVERROR(ENOMEM);
  245. for (y = 0; y < s->coeffs_len[k]; y++)
  246. s->coeffs[k][y] = s->coeff_sort[x+y];
  247. break;
  248. }
  249. }
  250. }
  251. end_time = av_gettime_relative();
  252. av_log(ctx, AV_LOG_INFO, "Elapsed time %.6f s (fft_len=%u, num_coeffs=%u)\n", 1e-6 * (end_time-start_time), fft_len, num_coeffs);
  253. outlink->w = VIDEO_WIDTH;
  254. outlink->h = VIDEO_HEIGHT;
  255. s->req_fullfilled = 0;
  256. s->spectogram_index = 0;
  257. s->frame_count = 0;
  258. s->spectogram_count = 0;
  259. s->remaining_fill = fft_len >> 1;
  260. memset(s->spectogram, 0, VIDEO_WIDTH * SPECTOGRAM_HEIGHT * 3);
  261. memset(s->fft_data, 0, fft_len * sizeof(*s->fft_data));
  262. s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  263. if (!s->outpicref)
  264. return AVERROR(ENOMEM);
  265. outlink->sample_aspect_ratio = av_make_q(1, 1);
  266. outlink->time_base = av_make_q(1, s->fps);
  267. outlink->frame_rate = av_make_q(s->fps, 1);
  268. return 0;
  269. }
  270. static int plot_cqt(AVFilterLink *inlink)
  271. {
  272. AVFilterContext *ctx = inlink->dst;
  273. ShowCQTContext *s = ctx->priv;
  274. AVFilterLink *outlink = ctx->outputs[0];
  275. int fft_len = 1 << s->fft_bits;
  276. FFTSample result[VIDEO_WIDTH][4];
  277. int x, y, ret = 0;
  278. /* real part contains left samples, imaginary part contains right samples */
  279. memcpy(s->fft_result_left, s->fft_data, fft_len * sizeof(*s->fft_data));
  280. av_fft_permute(s->fft_context, s->fft_result_left);
  281. av_fft_calc(s->fft_context, s->fft_result_left);
  282. /* separate left and right, (and multiply by 2.0) */
  283. s->fft_result_right[0].re = 2.0f * s->fft_result_left[0].im;
  284. s->fft_result_right[0].im = 0;
  285. s->fft_result_left[0].re = 2.0f * s->fft_result_left[0].re;
  286. s->fft_result_left[0].im = 0;
  287. for (x = 1; x <= (fft_len >> 1); x++)
  288. {
  289. FFTSample tmpy = s->fft_result_left[fft_len-x].im - s->fft_result_left[x].im;
  290. s->fft_result_right[x].re = s->fft_result_left[x].im + s->fft_result_left[fft_len-x].im;
  291. s->fft_result_right[x].im = s->fft_result_left[x].re - s->fft_result_left[fft_len-x].re;
  292. s->fft_result_right[fft_len-x].re = s->fft_result_right[x].re;
  293. s->fft_result_right[fft_len-x].im = -s->fft_result_right[x].im;
  294. s->fft_result_left[x].re = s->fft_result_left[x].re + s->fft_result_left[fft_len-x].re;
  295. s->fft_result_left[x].im = tmpy;
  296. s->fft_result_left[fft_len-x].re = s->fft_result_left[x].re;
  297. s->fft_result_left[fft_len-x].im = -s->fft_result_left[x].im;
  298. }
  299. /* calculating cqt */
  300. for (x = 0; x < VIDEO_WIDTH; x++)
  301. {
  302. int u;
  303. float g = 1.0f / s->gamma;
  304. FFTComplex l = {0,0};
  305. FFTComplex r = {0,0};
  306. for (u = 0; u < s->coeffs_len[x]; u++)
  307. {
  308. FFTSample value = s->coeffs[x][u].value;
  309. int index = s->coeffs[x][u].index;
  310. l.re += value * s->fft_result_left[index].re;
  311. l.im += value * s->fft_result_left[index].im;
  312. r.re += value * s->fft_result_right[index].re;
  313. r.im += value * s->fft_result_right[index].im;
  314. }
  315. /* result is power, not amplitude */
  316. result[x][0] = l.re * l.re + l.im * l.im;
  317. result[x][2] = r.re * r.re + r.im * r.im;
  318. result[x][1] = 0.5f * (result[x][0] + result[x][2]);
  319. result[x][3] = result[x][1];
  320. result[x][0] = 255.0f * powf(FFMIN(1.0f,result[x][0]), g);
  321. result[x][1] = 255.0f * powf(FFMIN(1.0f,result[x][1]), g);
  322. result[x][2] = 255.0f * powf(FFMIN(1.0f,result[x][2]), g);
  323. }
  324. for (x = 0; x < VIDEO_WIDTH; x++)
  325. {
  326. s->spectogram[s->spectogram_index][x][0] = result[x][0] + 0.5f;
  327. s->spectogram[s->spectogram_index][x][1] = result[x][1] + 0.5f;
  328. s->spectogram[s->spectogram_index][x][2] = result[x][2] + 0.5f;
  329. }
  330. /* drawing */
  331. if (!s->spectogram_count)
  332. {
  333. uint8_t *data = (uint8_t*) s->outpicref->data[0];
  334. int linesize = s->outpicref->linesize[0];
  335. float rcp_result[VIDEO_WIDTH];
  336. for (x = 0; x < VIDEO_WIDTH; x++)
  337. rcp_result[x] = 1.0f / (result[x][3]+0.0001f);
  338. /* drawing bar */
  339. for (y = 0; y < SPECTOGRAM_HEIGHT; y++)
  340. {
  341. float height = (SPECTOGRAM_HEIGHT - y) * (1.0f/SPECTOGRAM_HEIGHT);
  342. uint8_t *lineptr = data + y * linesize;
  343. for (x = 0; x < VIDEO_WIDTH; x++)
  344. {
  345. float mul;
  346. if (result[x][3] <= height)
  347. {
  348. *lineptr++ = 0;
  349. *lineptr++ = 0;
  350. *lineptr++ = 0;
  351. }
  352. else
  353. {
  354. mul = (result[x][3] - height) * rcp_result[x];
  355. *lineptr++ = mul * result[x][0] + 0.5f;
  356. *lineptr++ = mul * result[x][1] + 0.5f;
  357. *lineptr++ = mul * result[x][2] + 0.5f;
  358. }
  359. }
  360. }
  361. /* drawing font */
  362. for (y = 0; y < FONT_HEIGHT; y++)
  363. {
  364. uint8_t *lineptr = data + (SPECTOGRAM_HEIGHT + y) * linesize;
  365. memcpy(lineptr, s->spectogram[s->spectogram_index], VIDEO_WIDTH*3);
  366. }
  367. for (x = 0; x < VIDEO_WIDTH; x += VIDEO_WIDTH/10)
  368. {
  369. int u;
  370. static const char str[] = "EF G A BC D ";
  371. uint8_t *startptr = data + SPECTOGRAM_HEIGHT * linesize + x * 3;
  372. for (u = 0; str[u]; u++)
  373. {
  374. int v;
  375. for (v = 0; v < 16; v++)
  376. {
  377. uint8_t *p = startptr + 2 * v * linesize + 16 * 3 * u;
  378. int ux = x + 16 * u;
  379. int mask;
  380. for (mask = 0x80; mask; mask >>= 1)
  381. {
  382. if (mask & avpriv_vga16_font[str[u] * 16 + v])
  383. {
  384. p[0] = p[linesize] = 255 - s->font_color[ux];
  385. p[1] = p[linesize+1] = 0;
  386. p[2] = p[linesize+2] = s->font_color[ux];
  387. p[3] = p[linesize+3] = 255 - s->font_color[ux+1];
  388. p[4] = p[linesize+4] = 0;
  389. p[5] = p[linesize+5] = s->font_color[ux+1];
  390. }
  391. p += 6;
  392. ux += 2;
  393. }
  394. }
  395. }
  396. }
  397. /* drawing spectogram/sonogram */
  398. if (linesize == VIDEO_WIDTH * 3)
  399. {
  400. int total_length = VIDEO_WIDTH * SPECTOGRAM_HEIGHT * 3;
  401. int back_length = VIDEO_WIDTH * s->spectogram_index * 3;
  402. data += SPECTOGRAM_START * VIDEO_WIDTH * 3;
  403. memcpy(data, s->spectogram[s->spectogram_index], total_length - back_length);
  404. data += total_length - back_length;
  405. if(back_length)
  406. memcpy(data, s->spectogram[0], back_length);
  407. }
  408. else
  409. {
  410. for (y = 0; y < SPECTOGRAM_HEIGHT; y++)
  411. memcpy(data + (SPECTOGRAM_START + y) * linesize, s->spectogram[(s->spectogram_index + y) % SPECTOGRAM_HEIGHT], VIDEO_WIDTH * 3);
  412. }
  413. s->outpicref->pts = s->frame_count;
  414. ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
  415. s->req_fullfilled = 1;
  416. s->frame_count++;
  417. }
  418. s->spectogram_count = (s->spectogram_count + 1) % s->count;
  419. s->spectogram_index = (s->spectogram_index + SPECTOGRAM_HEIGHT - 1) % SPECTOGRAM_HEIGHT;
  420. return ret;
  421. }
  422. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  423. {
  424. AVFilterContext *ctx = inlink->dst;
  425. ShowCQTContext *s = ctx->priv;
  426. int step = inlink->sample_rate / (s->fps * s->count);
  427. int fft_len = 1 << s->fft_bits;
  428. int remaining;
  429. float *audio_data;
  430. if (!insamples)
  431. {
  432. while (s->remaining_fill < (fft_len >> 1))
  433. {
  434. int ret, x;
  435. memset(&s->fft_data[fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
  436. ret = plot_cqt(inlink);
  437. if (ret < 0)
  438. return ret;
  439. for (x = 0; x < (fft_len-step); x++)
  440. s->fft_data[x] = s->fft_data[x+step];
  441. s->remaining_fill += step;
  442. }
  443. return AVERROR(EOF);
  444. }
  445. remaining = insamples->nb_samples;
  446. audio_data = (float*) insamples->data[0];
  447. while (remaining)
  448. {
  449. if (remaining >= s->remaining_fill)
  450. {
  451. int i = insamples->nb_samples - remaining;
  452. int j = fft_len - s->remaining_fill;
  453. int m, ret;
  454. for (m = 0; m < s->remaining_fill; m++)
  455. {
  456. s->fft_data[j+m].re = audio_data[2*(i+m)];
  457. s->fft_data[j+m].im = audio_data[2*(i+m)+1];
  458. }
  459. ret = plot_cqt(inlink);
  460. if (ret < 0)
  461. {
  462. av_frame_free(&insamples);
  463. return ret;
  464. }
  465. remaining -= s->remaining_fill;
  466. for (m = 0; m < fft_len-step; m++)
  467. s->fft_data[m] = s->fft_data[m+step];
  468. s->remaining_fill = step;
  469. }
  470. else
  471. {
  472. int i = insamples->nb_samples - remaining;
  473. int j = fft_len - s->remaining_fill;
  474. int m;
  475. for (m = 0; m < remaining; m++)
  476. {
  477. s->fft_data[m+j].re = audio_data[2*(i+m)];
  478. s->fft_data[m+j].im = audio_data[2*(i+m)+1];
  479. }
  480. s->remaining_fill -= remaining;
  481. remaining = 0;
  482. }
  483. }
  484. av_frame_free(&insamples);
  485. return 0;
  486. }
  487. static int request_frame(AVFilterLink *outlink)
  488. {
  489. ShowCQTContext *s = outlink->src->priv;
  490. AVFilterLink *inlink = outlink->src->inputs[0];
  491. int ret;
  492. s->req_fullfilled = 0;
  493. do {
  494. ret = ff_request_frame(inlink);
  495. } while (!s->req_fullfilled && ret >= 0);
  496. if (ret == AVERROR_EOF && s->outpicref)
  497. filter_frame(inlink, NULL);
  498. return ret;
  499. }
  500. static const AVFilterPad showcqt_inputs[] = {
  501. {
  502. .name = "default",
  503. .type = AVMEDIA_TYPE_AUDIO,
  504. .filter_frame = filter_frame,
  505. },
  506. { NULL }
  507. };
  508. static const AVFilterPad showcqt_outputs[] = {
  509. {
  510. .name = "default",
  511. .type = AVMEDIA_TYPE_VIDEO,
  512. .config_props = config_output,
  513. .request_frame = request_frame,
  514. },
  515. { NULL }
  516. };
  517. AVFilter ff_avf_showcqt = {
  518. .name = "showcqt",
  519. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant Q Transform) spectrum video output."),
  520. .uninit = uninit,
  521. .query_formats = query_formats,
  522. .priv_size = sizeof(ShowCQTContext),
  523. .inputs = showcqt_inputs,
  524. .outputs = showcqt_outputs,
  525. .priv_class = &showcqt_class,
  526. };