You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

839 lines
29KB

  1. /*
  2. * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavcodec/avfft.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. #include "audio.h"
  27. #define RDFT_BITS_MIN 4
  28. #define RDFT_BITS_MAX 16
  29. enum WindowFunc {
  30. WFUNC_RECTANGULAR,
  31. WFUNC_HANN,
  32. WFUNC_HAMMING,
  33. WFUNC_BLACKMAN,
  34. WFUNC_NUTTALL3,
  35. WFUNC_MNUTTALL3,
  36. WFUNC_NUTTALL,
  37. WFUNC_BNUTTALL,
  38. WFUNC_BHARRIS,
  39. WFUNC_TUKEY,
  40. NB_WFUNC
  41. };
  42. enum Scale {
  43. SCALE_LINLIN,
  44. SCALE_LINLOG,
  45. SCALE_LOGLIN,
  46. SCALE_LOGLOG,
  47. NB_SCALE
  48. };
  49. #define NB_GAIN_ENTRY_MAX 4096
  50. typedef struct GainEntry {
  51. double freq;
  52. double gain;
  53. } GainEntry;
  54. typedef struct OverlapIndex {
  55. int buf_idx;
  56. int overlap_idx;
  57. } OverlapIndex;
  58. typedef struct FIREqualizerContext {
  59. const AVClass *class;
  60. RDFTContext *analysis_rdft;
  61. RDFTContext *analysis_irdft;
  62. RDFTContext *rdft;
  63. RDFTContext *irdft;
  64. FFTContext *fft_ctx;
  65. int analysis_rdft_len;
  66. int rdft_len;
  67. float *analysis_buf;
  68. float *dump_buf;
  69. float *kernel_tmp_buf;
  70. float *kernel_buf;
  71. float *conv_buf;
  72. OverlapIndex *conv_idx;
  73. int fir_len;
  74. int nsamples_max;
  75. int64_t next_pts;
  76. int frame_nsamples_max;
  77. int remaining;
  78. char *gain_cmd;
  79. char *gain_entry_cmd;
  80. const char *gain;
  81. const char *gain_entry;
  82. double delay;
  83. double accuracy;
  84. int wfunc;
  85. int fixed;
  86. int multi;
  87. int zero_phase;
  88. int scale;
  89. char *dumpfile;
  90. int dumpscale;
  91. int fft2;
  92. int nb_gain_entry;
  93. int gain_entry_err;
  94. GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
  95. } FIREqualizerContext;
  96. #define OFFSET(x) offsetof(FIREqualizerContext, x)
  97. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  98. static const AVOption firequalizer_options[] = {
  99. { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS },
  100. { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  101. { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
  102. { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
  103. { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" },
  104. { "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
  105. { "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
  106. { "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
  107. { "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
  108. { "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  109. { "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  110. { "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
  111. { "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
  112. { "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
  113. { "tukey", "tukey window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_TUKEY }, 0, 0, FLAGS, "wfunc" },
  114. { "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  115. { "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  116. { "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  117. { "scale", "set gain scale", OFFSET(scale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
  118. { "linlin", "linear-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLIN }, 0, 0, FLAGS, "scale" },
  119. { "linlog", "linear-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLOG }, 0, 0, FLAGS, "scale" },
  120. { "loglin", "logarithmic-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLIN }, 0, 0, FLAGS, "scale" },
  121. { "loglog", "logarithmic-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLOG }, 0, 0, FLAGS, "scale" },
  122. { "dumpfile", "set dump file", OFFSET(dumpfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  123. { "dumpscale", "set dump scale", OFFSET(dumpscale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
  124. { "fft2", "set 2-channels fft", OFFSET(fft2), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  125. { NULL }
  126. };
  127. AVFILTER_DEFINE_CLASS(firequalizer);
  128. static void common_uninit(FIREqualizerContext *s)
  129. {
  130. av_rdft_end(s->analysis_rdft);
  131. av_rdft_end(s->analysis_irdft);
  132. av_rdft_end(s->rdft);
  133. av_rdft_end(s->irdft);
  134. av_fft_end(s->fft_ctx);
  135. s->analysis_rdft = s->analysis_irdft = s->rdft = s->irdft = NULL;
  136. s->fft_ctx = NULL;
  137. av_freep(&s->analysis_buf);
  138. av_freep(&s->dump_buf);
  139. av_freep(&s->kernel_tmp_buf);
  140. av_freep(&s->kernel_buf);
  141. av_freep(&s->conv_buf);
  142. av_freep(&s->conv_idx);
  143. }
  144. static av_cold void uninit(AVFilterContext *ctx)
  145. {
  146. FIREqualizerContext *s = ctx->priv;
  147. common_uninit(s);
  148. av_freep(&s->gain_cmd);
  149. av_freep(&s->gain_entry_cmd);
  150. }
  151. static int query_formats(AVFilterContext *ctx)
  152. {
  153. AVFilterChannelLayouts *layouts;
  154. AVFilterFormats *formats;
  155. static const enum AVSampleFormat sample_fmts[] = {
  156. AV_SAMPLE_FMT_FLTP,
  157. AV_SAMPLE_FMT_NONE
  158. };
  159. int ret;
  160. layouts = ff_all_channel_counts();
  161. if (!layouts)
  162. return AVERROR(ENOMEM);
  163. ret = ff_set_common_channel_layouts(ctx, layouts);
  164. if (ret < 0)
  165. return ret;
  166. formats = ff_make_format_list(sample_fmts);
  167. if (!formats)
  168. return AVERROR(ENOMEM);
  169. ret = ff_set_common_formats(ctx, formats);
  170. if (ret < 0)
  171. return ret;
  172. formats = ff_all_samplerates();
  173. if (!formats)
  174. return AVERROR(ENOMEM);
  175. return ff_set_common_samplerates(ctx, formats);
  176. }
  177. static void fast_convolute(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf, float *av_restrict conv_buf,
  178. OverlapIndex *av_restrict idx, float *av_restrict data, int nsamples)
  179. {
  180. if (nsamples <= s->nsamples_max) {
  181. float *buf = conv_buf + idx->buf_idx * s->rdft_len;
  182. float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
  183. int center = s->fir_len/2;
  184. int k;
  185. memset(buf, 0, center * sizeof(*data));
  186. memcpy(buf + center, data, nsamples * sizeof(*data));
  187. memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*data));
  188. av_rdft_calc(s->rdft, buf);
  189. buf[0] *= kernel_buf[0];
  190. buf[1] *= kernel_buf[s->rdft_len/2];
  191. for (k = 1; k < s->rdft_len/2; k++) {
  192. buf[2*k] *= kernel_buf[k];
  193. buf[2*k+1] *= kernel_buf[k];
  194. }
  195. av_rdft_calc(s->irdft, buf);
  196. for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
  197. buf[k] += obuf[k];
  198. memcpy(data, buf, nsamples * sizeof(*data));
  199. idx->buf_idx = !idx->buf_idx;
  200. idx->overlap_idx = nsamples;
  201. } else {
  202. while (nsamples > s->nsamples_max * 2) {
  203. fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
  204. data += s->nsamples_max;
  205. nsamples -= s->nsamples_max;
  206. }
  207. fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
  208. fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
  209. }
  210. }
  211. static void fast_convolute2(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf, FFTComplex *av_restrict conv_buf,
  212. OverlapIndex *av_restrict idx, float *av_restrict data0, float *av_restrict data1, int nsamples)
  213. {
  214. if (nsamples <= s->nsamples_max) {
  215. FFTComplex *buf = conv_buf + idx->buf_idx * s->rdft_len;
  216. FFTComplex *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
  217. int center = s->fir_len/2;
  218. int k;
  219. float tmp;
  220. memset(buf, 0, center * sizeof(*buf));
  221. for (k = 0; k < nsamples; k++) {
  222. buf[center+k].re = data0[k];
  223. buf[center+k].im = data1[k];
  224. }
  225. memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*buf));
  226. av_fft_permute(s->fft_ctx, buf);
  227. av_fft_calc(s->fft_ctx, buf);
  228. /* swap re <-> im, do backward fft using forward fft_ctx */
  229. /* normalize with 0.5f */
  230. tmp = buf[0].re;
  231. buf[0].re = 0.5f * kernel_buf[0] * buf[0].im;
  232. buf[0].im = 0.5f * kernel_buf[0] * tmp;
  233. for (k = 1; k < s->rdft_len/2; k++) {
  234. int m = s->rdft_len - k;
  235. tmp = buf[k].re;
  236. buf[k].re = 0.5f * kernel_buf[k] * buf[k].im;
  237. buf[k].im = 0.5f * kernel_buf[k] * tmp;
  238. tmp = buf[m].re;
  239. buf[m].re = 0.5f * kernel_buf[k] * buf[m].im;
  240. buf[m].im = 0.5f * kernel_buf[k] * tmp;
  241. }
  242. tmp = buf[k].re;
  243. buf[k].re = 0.5f * kernel_buf[k] * buf[k].im;
  244. buf[k].im = 0.5f * kernel_buf[k] * tmp;
  245. av_fft_permute(s->fft_ctx, buf);
  246. av_fft_calc(s->fft_ctx, buf);
  247. for (k = 0; k < s->rdft_len - idx->overlap_idx; k++) {
  248. buf[k].re += obuf[k].re;
  249. buf[k].im += obuf[k].im;
  250. }
  251. /* swapped re <-> im */
  252. for (k = 0; k < nsamples; k++) {
  253. data0[k] = buf[k].im;
  254. data1[k] = buf[k].re;
  255. }
  256. idx->buf_idx = !idx->buf_idx;
  257. idx->overlap_idx = nsamples;
  258. } else {
  259. while (nsamples > s->nsamples_max * 2) {
  260. fast_convolute2(s, kernel_buf, conv_buf, idx, data0, data1, s->nsamples_max);
  261. data0 += s->nsamples_max;
  262. data1 += s->nsamples_max;
  263. nsamples -= s->nsamples_max;
  264. }
  265. fast_convolute2(s, kernel_buf, conv_buf, idx, data0, data1, nsamples/2);
  266. fast_convolute2(s, kernel_buf, conv_buf, idx, data0 + nsamples/2, data1 + nsamples/2, nsamples - nsamples/2);
  267. }
  268. }
  269. static void dump_fir(AVFilterContext *ctx, FILE *fp, int ch)
  270. {
  271. FIREqualizerContext *s = ctx->priv;
  272. int rate = ctx->inputs[0]->sample_rate;
  273. int xlog = s->dumpscale == SCALE_LOGLIN || s->dumpscale == SCALE_LOGLOG;
  274. int ylog = s->dumpscale == SCALE_LINLOG || s->dumpscale == SCALE_LOGLOG;
  275. int x;
  276. int center = s->fir_len / 2;
  277. double delay = s->zero_phase ? 0.0 : (double) center / rate;
  278. double vx, ya, yb;
  279. s->analysis_buf[0] *= s->rdft_len/2;
  280. for (x = 1; x <= center; x++) {
  281. s->analysis_buf[x] *= s->rdft_len/2;
  282. s->analysis_buf[s->analysis_rdft_len - x] *= s->rdft_len/2;
  283. }
  284. if (ch)
  285. fprintf(fp, "\n\n");
  286. fprintf(fp, "# time[%d] (time amplitude)\n", ch);
  287. for (x = center; x > 0; x--)
  288. fprintf(fp, "%15.10f %15.10f\n", delay - (double) x / rate, (double) s->analysis_buf[s->analysis_rdft_len - x]);
  289. for (x = 0; x <= center; x++)
  290. fprintf(fp, "%15.10f %15.10f\n", delay + (double)x / rate , (double) s->analysis_buf[x]);
  291. av_rdft_calc(s->analysis_rdft, s->analysis_buf);
  292. fprintf(fp, "\n\n# freq[%d] (frequency desired_gain actual_gain)\n", ch);
  293. for (x = 0; x <= s->analysis_rdft_len/2; x++) {
  294. int i = (x == s->analysis_rdft_len/2) ? 1 : 2 * x;
  295. vx = (double)x * rate / s->analysis_rdft_len;
  296. if (xlog)
  297. vx = log2(0.05*vx);
  298. ya = s->dump_buf[i];
  299. yb = s->analysis_buf[i];
  300. if (ylog) {
  301. ya = 20.0 * log10(fabs(ya));
  302. yb = 20.0 * log10(fabs(yb));
  303. }
  304. fprintf(fp, "%17.10f %17.10f %17.10f\n", vx, ya, yb);
  305. }
  306. }
  307. static double entry_func(void *p, double freq, double gain)
  308. {
  309. AVFilterContext *ctx = p;
  310. FIREqualizerContext *s = ctx->priv;
  311. if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
  312. av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
  313. s->gain_entry_err = AVERROR(EINVAL);
  314. return 0;
  315. }
  316. if (isnan(freq)) {
  317. av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
  318. s->gain_entry_err = AVERROR(EINVAL);
  319. return 0;
  320. }
  321. if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
  322. av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
  323. s->gain_entry_err = AVERROR(EINVAL);
  324. return 0;
  325. }
  326. s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
  327. s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
  328. s->nb_gain_entry++;
  329. return 0;
  330. }
  331. static int gain_entry_compare(const void *key, const void *memb)
  332. {
  333. const double *freq = key;
  334. const GainEntry *entry = memb;
  335. if (*freq < entry[0].freq)
  336. return -1;
  337. if (*freq > entry[1].freq)
  338. return 1;
  339. return 0;
  340. }
  341. static double gain_interpolate_func(void *p, double freq)
  342. {
  343. AVFilterContext *ctx = p;
  344. FIREqualizerContext *s = ctx->priv;
  345. GainEntry *res;
  346. double d0, d1, d;
  347. if (isnan(freq))
  348. return freq;
  349. if (!s->nb_gain_entry)
  350. return 0;
  351. if (freq <= s->gain_entry_tbl[0].freq)
  352. return s->gain_entry_tbl[0].gain;
  353. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  354. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  355. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  356. av_assert0(res);
  357. d = res[1].freq - res[0].freq;
  358. d0 = freq - res[0].freq;
  359. d1 = res[1].freq - freq;
  360. if (d0 && d1)
  361. return (d0 * res[1].gain + d1 * res[0].gain) / d;
  362. if (d0)
  363. return res[1].gain;
  364. return res[0].gain;
  365. }
  366. static double cubic_interpolate_func(void *p, double freq)
  367. {
  368. AVFilterContext *ctx = p;
  369. FIREqualizerContext *s = ctx->priv;
  370. GainEntry *res;
  371. double x, x2, x3;
  372. double a, b, c, d;
  373. double m0, m1, m2, msum, unit;
  374. if (!s->nb_gain_entry)
  375. return 0;
  376. if (freq <= s->gain_entry_tbl[0].freq)
  377. return s->gain_entry_tbl[0].gain;
  378. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  379. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  380. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  381. av_assert0(res);
  382. unit = res[1].freq - res[0].freq;
  383. m0 = res != s->gain_entry_tbl ?
  384. unit * (res[0].gain - res[-1].gain) / (res[0].freq - res[-1].freq) : 0;
  385. m1 = res[1].gain - res[0].gain;
  386. m2 = res != s->gain_entry_tbl + s->nb_gain_entry - 2 ?
  387. unit * (res[2].gain - res[1].gain) / (res[2].freq - res[1].freq) : 0;
  388. msum = fabs(m0) + fabs(m1);
  389. m0 = msum > 0 ? (fabs(m0) * m1 + fabs(m1) * m0) / msum : 0;
  390. msum = fabs(m1) + fabs(m2);
  391. m1 = msum > 0 ? (fabs(m1) * m2 + fabs(m2) * m1) / msum : 0;
  392. d = res[0].gain;
  393. c = m0;
  394. b = 3 * res[1].gain - m1 - 2 * c - 3 * d;
  395. a = res[1].gain - b - c - d;
  396. x = (freq - res[0].freq) / unit;
  397. x2 = x * x;
  398. x3 = x2 * x;
  399. return a * x3 + b * x2 + c * x + d;
  400. }
  401. static const char *const var_names[] = {
  402. "f",
  403. "sr",
  404. "ch",
  405. "chid",
  406. "chs",
  407. "chlayout",
  408. NULL
  409. };
  410. enum VarOffset {
  411. VAR_F,
  412. VAR_SR,
  413. VAR_CH,
  414. VAR_CHID,
  415. VAR_CHS,
  416. VAR_CHLAYOUT,
  417. VAR_NB
  418. };
  419. static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
  420. {
  421. FIREqualizerContext *s = ctx->priv;
  422. AVFilterLink *inlink = ctx->inputs[0];
  423. const char *gain_entry_func_names[] = { "entry", NULL };
  424. const char *gain_func_names[] = { "gain_interpolate", "cubic_interpolate", NULL };
  425. double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
  426. double (*gain_funcs[])(void *, double) = { gain_interpolate_func, cubic_interpolate_func, NULL };
  427. double vars[VAR_NB];
  428. AVExpr *gain_expr;
  429. int ret, k, center, ch;
  430. int xlog = s->scale == SCALE_LOGLIN || s->scale == SCALE_LOGLOG;
  431. int ylog = s->scale == SCALE_LINLOG || s->scale == SCALE_LOGLOG;
  432. FILE *dump_fp = NULL;
  433. s->nb_gain_entry = 0;
  434. s->gain_entry_err = 0;
  435. if (gain_entry) {
  436. double result = 0.0;
  437. ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
  438. gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
  439. if (ret < 0)
  440. return ret;
  441. if (s->gain_entry_err < 0)
  442. return s->gain_entry_err;
  443. }
  444. av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
  445. ret = av_expr_parse(&gain_expr, gain, var_names,
  446. gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
  447. if (ret < 0)
  448. return ret;
  449. if (s->dumpfile && (!s->dump_buf || !s->analysis_rdft || !(dump_fp = fopen(s->dumpfile, "w"))))
  450. av_log(ctx, AV_LOG_WARNING, "dumping failed.\n");
  451. vars[VAR_CHS] = inlink->channels;
  452. vars[VAR_CHLAYOUT] = inlink->channel_layout;
  453. vars[VAR_SR] = inlink->sample_rate;
  454. for (ch = 0; ch < inlink->channels; ch++) {
  455. float *rdft_buf = s->kernel_tmp_buf + ch * s->rdft_len;
  456. double result;
  457. vars[VAR_CH] = ch;
  458. vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
  459. vars[VAR_F] = 0.0;
  460. if (xlog)
  461. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  462. result = av_expr_eval(gain_expr, vars, ctx);
  463. s->analysis_buf[0] = ylog ? pow(10.0, 0.05 * result) : result;
  464. vars[VAR_F] = 0.5 * inlink->sample_rate;
  465. if (xlog)
  466. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  467. result = av_expr_eval(gain_expr, vars, ctx);
  468. s->analysis_buf[1] = ylog ? pow(10.0, 0.05 * result) : result;
  469. for (k = 1; k < s->analysis_rdft_len/2; k++) {
  470. vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
  471. if (xlog)
  472. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  473. result = av_expr_eval(gain_expr, vars, ctx);
  474. s->analysis_buf[2*k] = ylog ? pow(10.0, 0.05 * result) : result;
  475. s->analysis_buf[2*k+1] = 0.0;
  476. }
  477. if (s->dump_buf)
  478. memcpy(s->dump_buf, s->analysis_buf, s->analysis_rdft_len * sizeof(*s->analysis_buf));
  479. av_rdft_calc(s->analysis_irdft, s->analysis_buf);
  480. center = s->fir_len / 2;
  481. for (k = 0; k <= center; k++) {
  482. double u = k * (M_PI/center);
  483. double win;
  484. switch (s->wfunc) {
  485. case WFUNC_RECTANGULAR:
  486. win = 1.0;
  487. break;
  488. case WFUNC_HANN:
  489. win = 0.5 + 0.5 * cos(u);
  490. break;
  491. case WFUNC_HAMMING:
  492. win = 0.53836 + 0.46164 * cos(u);
  493. break;
  494. case WFUNC_BLACKMAN:
  495. win = 0.42 + 0.5 * cos(u) + 0.08 * cos(2*u);
  496. break;
  497. case WFUNC_NUTTALL3:
  498. win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
  499. break;
  500. case WFUNC_MNUTTALL3:
  501. win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
  502. break;
  503. case WFUNC_NUTTALL:
  504. win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
  505. break;
  506. case WFUNC_BNUTTALL:
  507. win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
  508. break;
  509. case WFUNC_BHARRIS:
  510. win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
  511. break;
  512. case WFUNC_TUKEY:
  513. win = (u <= 0.5 * M_PI) ? 1.0 : (0.5 + 0.5 * cos(2*u - M_PI));
  514. break;
  515. default:
  516. av_assert0(0);
  517. }
  518. s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
  519. if (k)
  520. s->analysis_buf[s->analysis_rdft_len - k] = s->analysis_buf[k];
  521. }
  522. memset(s->analysis_buf + center + 1, 0, (s->analysis_rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
  523. memcpy(rdft_buf, s->analysis_buf, s->rdft_len/2 * sizeof(*s->analysis_buf));
  524. memcpy(rdft_buf + s->rdft_len/2, s->analysis_buf + s->analysis_rdft_len - s->rdft_len/2, s->rdft_len/2 * sizeof(*s->analysis_buf));
  525. av_rdft_calc(s->rdft, rdft_buf);
  526. for (k = 0; k < s->rdft_len; k++) {
  527. if (isnan(rdft_buf[k]) || isinf(rdft_buf[k])) {
  528. av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
  529. av_expr_free(gain_expr);
  530. if (dump_fp)
  531. fclose(dump_fp);
  532. return AVERROR(EINVAL);
  533. }
  534. }
  535. rdft_buf[s->rdft_len-1] = rdft_buf[1];
  536. for (k = 0; k < s->rdft_len/2; k++)
  537. rdft_buf[k] = rdft_buf[2*k];
  538. rdft_buf[s->rdft_len/2] = rdft_buf[s->rdft_len-1];
  539. if (dump_fp)
  540. dump_fir(ctx, dump_fp, ch);
  541. if (!s->multi)
  542. break;
  543. }
  544. memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
  545. av_expr_free(gain_expr);
  546. if (dump_fp)
  547. fclose(dump_fp);
  548. return 0;
  549. }
  550. #define SELECT_GAIN(s) (s->gain_cmd ? s->gain_cmd : s->gain)
  551. #define SELECT_GAIN_ENTRY(s) (s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry)
  552. static int config_input(AVFilterLink *inlink)
  553. {
  554. AVFilterContext *ctx = inlink->dst;
  555. FIREqualizerContext *s = ctx->priv;
  556. int rdft_bits;
  557. common_uninit(s);
  558. s->next_pts = 0;
  559. s->frame_nsamples_max = 0;
  560. s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
  561. s->remaining = s->fir_len - 1;
  562. for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  563. s->rdft_len = 1 << rdft_bits;
  564. s->nsamples_max = s->rdft_len - s->fir_len + 1;
  565. if (s->nsamples_max * 2 >= s->fir_len)
  566. break;
  567. }
  568. if (rdft_bits > RDFT_BITS_MAX) {
  569. av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
  570. return AVERROR(EINVAL);
  571. }
  572. if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  573. return AVERROR(ENOMEM);
  574. if (s->fft2 && !s->multi && inlink->channels > 1 && !(s->fft_ctx = av_fft_init(rdft_bits, 0)))
  575. return AVERROR(ENOMEM);
  576. for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  577. s->analysis_rdft_len = 1 << rdft_bits;
  578. if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
  579. break;
  580. }
  581. if (rdft_bits > RDFT_BITS_MAX) {
  582. av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
  583. return AVERROR(EINVAL);
  584. }
  585. if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  586. return AVERROR(ENOMEM);
  587. if (s->dumpfile) {
  588. s->analysis_rdft = av_rdft_init(rdft_bits, DFT_R2C);
  589. s->dump_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->dump_buf));
  590. }
  591. s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
  592. s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
  593. s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
  594. s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
  595. s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
  596. if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
  597. return AVERROR(ENOMEM);
  598. av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
  599. inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
  600. if (s->fixed)
  601. inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
  602. return generate_kernel(ctx, SELECT_GAIN(s), SELECT_GAIN_ENTRY(s));
  603. }
  604. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  605. {
  606. AVFilterContext *ctx = inlink->dst;
  607. FIREqualizerContext *s = ctx->priv;
  608. int ch;
  609. for (ch = 0; ch + 1 < inlink->channels && s->fft_ctx; ch += 2) {
  610. fast_convolute2(s, s->kernel_buf, (FFTComplex *)(s->conv_buf + 2 * ch * s->rdft_len),
  611. s->conv_idx + ch, (float *) frame->extended_data[ch],
  612. (float *) frame->extended_data[ch+1], frame->nb_samples);
  613. }
  614. for ( ; ch < inlink->channels; ch++) {
  615. fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
  616. s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
  617. (float *) frame->extended_data[ch], frame->nb_samples);
  618. }
  619. s->next_pts = AV_NOPTS_VALUE;
  620. if (frame->pts != AV_NOPTS_VALUE) {
  621. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
  622. if (s->zero_phase)
  623. frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
  624. }
  625. s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
  626. return ff_filter_frame(ctx->outputs[0], frame);
  627. }
  628. static int request_frame(AVFilterLink *outlink)
  629. {
  630. AVFilterContext *ctx = outlink->src;
  631. FIREqualizerContext *s= ctx->priv;
  632. int ret;
  633. ret = ff_request_frame(ctx->inputs[0]);
  634. if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
  635. AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
  636. if (!frame)
  637. return AVERROR(ENOMEM);
  638. av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
  639. frame->pts = s->next_pts;
  640. s->remaining -= frame->nb_samples;
  641. ret = filter_frame(ctx->inputs[0], frame);
  642. }
  643. return ret;
  644. }
  645. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  646. char *res, int res_len, int flags)
  647. {
  648. FIREqualizerContext *s = ctx->priv;
  649. int ret = AVERROR(ENOSYS);
  650. if (!strcmp(cmd, "gain")) {
  651. char *gain_cmd;
  652. if (SELECT_GAIN(s) && !strcmp(SELECT_GAIN(s), args)) {
  653. av_log(ctx, AV_LOG_DEBUG, "equal gain, do not rebuild.\n");
  654. return 0;
  655. }
  656. gain_cmd = av_strdup(args);
  657. if (!gain_cmd)
  658. return AVERROR(ENOMEM);
  659. ret = generate_kernel(ctx, gain_cmd, SELECT_GAIN_ENTRY(s));
  660. if (ret >= 0) {
  661. av_freep(&s->gain_cmd);
  662. s->gain_cmd = gain_cmd;
  663. } else {
  664. av_freep(&gain_cmd);
  665. }
  666. } else if (!strcmp(cmd, "gain_entry")) {
  667. char *gain_entry_cmd;
  668. if (SELECT_GAIN_ENTRY(s) && !strcmp(SELECT_GAIN_ENTRY(s), args)) {
  669. av_log(ctx, AV_LOG_DEBUG, "equal gain_entry, do not rebuild.\n");
  670. return 0;
  671. }
  672. gain_entry_cmd = av_strdup(args);
  673. if (!gain_entry_cmd)
  674. return AVERROR(ENOMEM);
  675. ret = generate_kernel(ctx, SELECT_GAIN(s), gain_entry_cmd);
  676. if (ret >= 0) {
  677. av_freep(&s->gain_entry_cmd);
  678. s->gain_entry_cmd = gain_entry_cmd;
  679. } else {
  680. av_freep(&gain_entry_cmd);
  681. }
  682. }
  683. return ret;
  684. }
  685. static const AVFilterPad firequalizer_inputs[] = {
  686. {
  687. .name = "default",
  688. .config_props = config_input,
  689. .filter_frame = filter_frame,
  690. .type = AVMEDIA_TYPE_AUDIO,
  691. .needs_writable = 1,
  692. },
  693. { NULL }
  694. };
  695. static const AVFilterPad firequalizer_outputs[] = {
  696. {
  697. .name = "default",
  698. .request_frame = request_frame,
  699. .type = AVMEDIA_TYPE_AUDIO,
  700. },
  701. { NULL }
  702. };
  703. AVFilter ff_af_firequalizer = {
  704. .name = "firequalizer",
  705. .description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
  706. .uninit = uninit,
  707. .query_formats = query_formats,
  708. .process_command = process_command,
  709. .priv_size = sizeof(FIREqualizerContext),
  710. .inputs = firequalizer_inputs,
  711. .outputs = firequalizer_outputs,
  712. .priv_class = &firequalizer_class,
  713. };