You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

757 lines
26KB

  1. /*
  2. * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavcodec/avfft.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. #include "audio.h"
  27. #define RDFT_BITS_MIN 4
  28. #define RDFT_BITS_MAX 16
  29. enum WindowFunc {
  30. WFUNC_RECTANGULAR,
  31. WFUNC_HANN,
  32. WFUNC_HAMMING,
  33. WFUNC_BLACKMAN,
  34. WFUNC_NUTTALL3,
  35. WFUNC_MNUTTALL3,
  36. WFUNC_NUTTALL,
  37. WFUNC_BNUTTALL,
  38. WFUNC_BHARRIS,
  39. WFUNC_TUKEY,
  40. NB_WFUNC
  41. };
  42. enum Scale {
  43. SCALE_LINLIN,
  44. SCALE_LINLOG,
  45. SCALE_LOGLIN,
  46. SCALE_LOGLOG,
  47. NB_SCALE
  48. };
  49. #define NB_GAIN_ENTRY_MAX 4096
  50. typedef struct {
  51. double freq;
  52. double gain;
  53. } GainEntry;
  54. typedef struct {
  55. int buf_idx;
  56. int overlap_idx;
  57. } OverlapIndex;
  58. typedef struct {
  59. const AVClass *class;
  60. RDFTContext *analysis_rdft;
  61. RDFTContext *analysis_irdft;
  62. RDFTContext *rdft;
  63. RDFTContext *irdft;
  64. int analysis_rdft_len;
  65. int rdft_len;
  66. float *analysis_buf;
  67. float *dump_buf;
  68. float *kernel_tmp_buf;
  69. float *kernel_buf;
  70. float *conv_buf;
  71. OverlapIndex *conv_idx;
  72. int fir_len;
  73. int nsamples_max;
  74. int64_t next_pts;
  75. int frame_nsamples_max;
  76. int remaining;
  77. char *gain_cmd;
  78. char *gain_entry_cmd;
  79. const char *gain;
  80. const char *gain_entry;
  81. double delay;
  82. double accuracy;
  83. int wfunc;
  84. int fixed;
  85. int multi;
  86. int zero_phase;
  87. int scale;
  88. char *dumpfile;
  89. int dumpscale;
  90. int nb_gain_entry;
  91. int gain_entry_err;
  92. GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
  93. } FIREqualizerContext;
  94. #define OFFSET(x) offsetof(FIREqualizerContext, x)
  95. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  96. static const AVOption firequalizer_options[] = {
  97. { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS },
  98. { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  99. { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
  100. { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
  101. { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" },
  102. { "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
  103. { "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
  104. { "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
  105. { "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
  106. { "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  107. { "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  108. { "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
  109. { "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
  110. { "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
  111. { "tukey", "tukey window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_TUKEY }, 0, 0, FLAGS, "wfunc" },
  112. { "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  113. { "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  114. { "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  115. { "scale", "set gain scale", OFFSET(scale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
  116. { "linlin", "linear-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLIN }, 0, 0, FLAGS, "scale" },
  117. { "linlog", "linear-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLOG }, 0, 0, FLAGS, "scale" },
  118. { "loglin", "logarithmic-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLIN }, 0, 0, FLAGS, "scale" },
  119. { "loglog", "logarithmic-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLOG }, 0, 0, FLAGS, "scale" },
  120. { "dumpfile", "set dump file", OFFSET(dumpfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  121. { "dumpscale", "set dump scale", OFFSET(dumpscale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
  122. { NULL }
  123. };
  124. AVFILTER_DEFINE_CLASS(firequalizer);
  125. static void common_uninit(FIREqualizerContext *s)
  126. {
  127. av_rdft_end(s->analysis_rdft);
  128. av_rdft_end(s->analysis_irdft);
  129. av_rdft_end(s->rdft);
  130. av_rdft_end(s->irdft);
  131. s->analysis_rdft = s->analysis_irdft = s->rdft = s->irdft = NULL;
  132. av_freep(&s->analysis_buf);
  133. av_freep(&s->dump_buf);
  134. av_freep(&s->kernel_tmp_buf);
  135. av_freep(&s->kernel_buf);
  136. av_freep(&s->conv_buf);
  137. av_freep(&s->conv_idx);
  138. }
  139. static av_cold void uninit(AVFilterContext *ctx)
  140. {
  141. FIREqualizerContext *s = ctx->priv;
  142. common_uninit(s);
  143. av_freep(&s->gain_cmd);
  144. av_freep(&s->gain_entry_cmd);
  145. }
  146. static int query_formats(AVFilterContext *ctx)
  147. {
  148. AVFilterChannelLayouts *layouts;
  149. AVFilterFormats *formats;
  150. static const enum AVSampleFormat sample_fmts[] = {
  151. AV_SAMPLE_FMT_FLTP,
  152. AV_SAMPLE_FMT_NONE
  153. };
  154. int ret;
  155. layouts = ff_all_channel_counts();
  156. if (!layouts)
  157. return AVERROR(ENOMEM);
  158. ret = ff_set_common_channel_layouts(ctx, layouts);
  159. if (ret < 0)
  160. return ret;
  161. formats = ff_make_format_list(sample_fmts);
  162. if (!formats)
  163. return AVERROR(ENOMEM);
  164. ret = ff_set_common_formats(ctx, formats);
  165. if (ret < 0)
  166. return ret;
  167. formats = ff_all_samplerates();
  168. if (!formats)
  169. return AVERROR(ENOMEM);
  170. return ff_set_common_samplerates(ctx, formats);
  171. }
  172. static void fast_convolute(FIREqualizerContext *s, const float *kernel_buf, float *conv_buf,
  173. OverlapIndex *idx, float *data, int nsamples)
  174. {
  175. if (nsamples <= s->nsamples_max) {
  176. float *buf = conv_buf + idx->buf_idx * s->rdft_len;
  177. float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
  178. int k;
  179. memcpy(buf, data, nsamples * sizeof(*data));
  180. memset(buf + nsamples, 0, (s->rdft_len - nsamples) * sizeof(*data));
  181. av_rdft_calc(s->rdft, buf);
  182. buf[0] *= kernel_buf[0];
  183. buf[1] *= kernel_buf[1];
  184. for (k = 2; k < s->rdft_len; k += 2) {
  185. float re, im;
  186. re = buf[k] * kernel_buf[k] - buf[k+1] * kernel_buf[k+1];
  187. im = buf[k] * kernel_buf[k+1] + buf[k+1] * kernel_buf[k];
  188. buf[k] = re;
  189. buf[k+1] = im;
  190. }
  191. av_rdft_calc(s->irdft, buf);
  192. for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
  193. buf[k] += obuf[k];
  194. memcpy(data, buf, nsamples * sizeof(*data));
  195. idx->buf_idx = !idx->buf_idx;
  196. idx->overlap_idx = nsamples;
  197. } else {
  198. while (nsamples > s->nsamples_max * 2) {
  199. fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
  200. data += s->nsamples_max;
  201. nsamples -= s->nsamples_max;
  202. }
  203. fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
  204. fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
  205. }
  206. }
  207. static void dump_fir(AVFilterContext *ctx, FILE *fp, int ch)
  208. {
  209. FIREqualizerContext *s = ctx->priv;
  210. int rate = ctx->inputs[0]->sample_rate;
  211. int xlog = s->dumpscale == SCALE_LOGLIN || s->dumpscale == SCALE_LOGLOG;
  212. int ylog = s->dumpscale == SCALE_LINLOG || s->dumpscale == SCALE_LOGLOG;
  213. int x;
  214. int center = s->fir_len / 2;
  215. double delay = s->zero_phase ? 0.0 : (double) center / rate;
  216. double vx, ya, yb;
  217. s->analysis_buf[0] *= s->rdft_len/2;
  218. for (x = 1; x <= center; x++) {
  219. s->analysis_buf[x] *= s->rdft_len/2;
  220. s->analysis_buf[s->analysis_rdft_len - x] *= s->rdft_len/2;
  221. }
  222. if (ch)
  223. fprintf(fp, "\n\n");
  224. fprintf(fp, "# time[%d] (time amplitude)\n", ch);
  225. for (x = center; x > 0; x--)
  226. fprintf(fp, "%15.10f %15.10f\n", delay - (double) x / rate, (double) s->analysis_buf[s->analysis_rdft_len - x]);
  227. for (x = 0; x <= center; x++)
  228. fprintf(fp, "%15.10f %15.10f\n", delay + (double)x / rate , (double) s->analysis_buf[x]);
  229. av_rdft_calc(s->analysis_rdft, s->analysis_buf);
  230. fprintf(fp, "\n\n# freq[%d] (frequency desired_gain actual_gain)\n", ch);
  231. for (x = 0; x <= s->analysis_rdft_len/2; x++) {
  232. int i = (x == s->analysis_rdft_len/2) ? 1 : 2 * x;
  233. vx = (double)x * rate / s->analysis_rdft_len;
  234. if (xlog)
  235. vx = log2(0.05*vx);
  236. ya = s->dump_buf[i];
  237. yb = s->analysis_buf[i];
  238. if (ylog) {
  239. ya = 20.0 * log10(fabs(ya));
  240. yb = 20.0 * log10(fabs(yb));
  241. }
  242. fprintf(fp, "%17.10f %17.10f %17.10f\n", vx, ya, yb);
  243. }
  244. }
  245. static double entry_func(void *p, double freq, double gain)
  246. {
  247. AVFilterContext *ctx = p;
  248. FIREqualizerContext *s = ctx->priv;
  249. if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
  250. av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
  251. s->gain_entry_err = AVERROR(EINVAL);
  252. return 0;
  253. }
  254. if (isnan(freq)) {
  255. av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
  256. s->gain_entry_err = AVERROR(EINVAL);
  257. return 0;
  258. }
  259. if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
  260. av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
  261. s->gain_entry_err = AVERROR(EINVAL);
  262. return 0;
  263. }
  264. s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
  265. s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
  266. s->nb_gain_entry++;
  267. return 0;
  268. }
  269. static int gain_entry_compare(const void *key, const void *memb)
  270. {
  271. const double *freq = key;
  272. const GainEntry *entry = memb;
  273. if (*freq < entry[0].freq)
  274. return -1;
  275. if (*freq > entry[1].freq)
  276. return 1;
  277. return 0;
  278. }
  279. static double gain_interpolate_func(void *p, double freq)
  280. {
  281. AVFilterContext *ctx = p;
  282. FIREqualizerContext *s = ctx->priv;
  283. GainEntry *res;
  284. double d0, d1, d;
  285. if (isnan(freq))
  286. return freq;
  287. if (!s->nb_gain_entry)
  288. return 0;
  289. if (freq <= s->gain_entry_tbl[0].freq)
  290. return s->gain_entry_tbl[0].gain;
  291. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  292. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  293. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  294. av_assert0(res);
  295. d = res[1].freq - res[0].freq;
  296. d0 = freq - res[0].freq;
  297. d1 = res[1].freq - freq;
  298. if (d0 && d1)
  299. return (d0 * res[1].gain + d1 * res[0].gain) / d;
  300. if (d0)
  301. return res[1].gain;
  302. return res[0].gain;
  303. }
  304. static double cubic_interpolate_func(void *p, double freq)
  305. {
  306. AVFilterContext *ctx = p;
  307. FIREqualizerContext *s = ctx->priv;
  308. GainEntry *res;
  309. double x, x2, x3;
  310. double a, b, c, d;
  311. double m0, m1, m2, msum, unit;
  312. if (!s->nb_gain_entry)
  313. return 0;
  314. if (freq <= s->gain_entry_tbl[0].freq)
  315. return s->gain_entry_tbl[0].gain;
  316. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  317. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  318. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  319. av_assert0(res);
  320. unit = res[1].freq - res[0].freq;
  321. m0 = res != s->gain_entry_tbl ?
  322. unit * (res[0].gain - res[-1].gain) / (res[0].freq - res[-1].freq) : 0;
  323. m1 = res[1].gain - res[0].gain;
  324. m2 = res != s->gain_entry_tbl + s->nb_gain_entry - 2 ?
  325. unit * (res[2].gain - res[1].gain) / (res[2].freq - res[1].freq) : 0;
  326. msum = fabs(m0) + fabs(m1);
  327. m0 = msum > 0 ? (fabs(m0) * m1 + fabs(m1) * m0) / msum : 0;
  328. msum = fabs(m1) + fabs(m2);
  329. m1 = msum > 0 ? (fabs(m1) * m2 + fabs(m2) * m1) / msum : 0;
  330. d = res[0].gain;
  331. c = m0;
  332. b = 3 * res[1].gain - m1 - 2 * c - 3 * d;
  333. a = res[1].gain - b - c - d;
  334. x = (freq - res[0].freq) / unit;
  335. x2 = x * x;
  336. x3 = x2 * x;
  337. return a * x3 + b * x2 + c * x + d;
  338. }
  339. static const char *const var_names[] = {
  340. "f",
  341. "sr",
  342. "ch",
  343. "chid",
  344. "chs",
  345. "chlayout",
  346. NULL
  347. };
  348. enum VarOffset {
  349. VAR_F,
  350. VAR_SR,
  351. VAR_CH,
  352. VAR_CHID,
  353. VAR_CHS,
  354. VAR_CHLAYOUT,
  355. VAR_NB
  356. };
  357. static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
  358. {
  359. FIREqualizerContext *s = ctx->priv;
  360. AVFilterLink *inlink = ctx->inputs[0];
  361. const char *gain_entry_func_names[] = { "entry", NULL };
  362. const char *gain_func_names[] = { "gain_interpolate", "cubic_interpolate", NULL };
  363. double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
  364. double (*gain_funcs[])(void *, double) = { gain_interpolate_func, cubic_interpolate_func, NULL };
  365. double vars[VAR_NB];
  366. AVExpr *gain_expr;
  367. int ret, k, center, ch;
  368. int xlog = s->scale == SCALE_LOGLIN || s->scale == SCALE_LOGLOG;
  369. int ylog = s->scale == SCALE_LINLOG || s->scale == SCALE_LOGLOG;
  370. FILE *dump_fp = NULL;
  371. s->nb_gain_entry = 0;
  372. s->gain_entry_err = 0;
  373. if (gain_entry) {
  374. double result = 0.0;
  375. ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
  376. gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
  377. if (ret < 0)
  378. return ret;
  379. if (s->gain_entry_err < 0)
  380. return s->gain_entry_err;
  381. }
  382. av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
  383. ret = av_expr_parse(&gain_expr, gain, var_names,
  384. gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
  385. if (ret < 0)
  386. return ret;
  387. if (s->dumpfile && (!s->dump_buf || !s->analysis_rdft || !(dump_fp = fopen(s->dumpfile, "w"))))
  388. av_log(ctx, AV_LOG_WARNING, "dumping failed.\n");
  389. vars[VAR_CHS] = inlink->channels;
  390. vars[VAR_CHLAYOUT] = inlink->channel_layout;
  391. vars[VAR_SR] = inlink->sample_rate;
  392. for (ch = 0; ch < inlink->channels; ch++) {
  393. float *rdft_buf = s->kernel_tmp_buf + ch * s->rdft_len;
  394. double result;
  395. vars[VAR_CH] = ch;
  396. vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
  397. vars[VAR_F] = 0.0;
  398. if (xlog)
  399. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  400. result = av_expr_eval(gain_expr, vars, ctx);
  401. s->analysis_buf[0] = ylog ? pow(10.0, 0.05 * result) : result;
  402. vars[VAR_F] = 0.5 * inlink->sample_rate;
  403. if (xlog)
  404. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  405. result = av_expr_eval(gain_expr, vars, ctx);
  406. s->analysis_buf[1] = ylog ? pow(10.0, 0.05 * result) : result;
  407. for (k = 1; k < s->analysis_rdft_len/2; k++) {
  408. vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
  409. if (xlog)
  410. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  411. result = av_expr_eval(gain_expr, vars, ctx);
  412. s->analysis_buf[2*k] = ylog ? pow(10.0, 0.05 * result) : result;
  413. s->analysis_buf[2*k+1] = 0.0;
  414. }
  415. if (s->dump_buf)
  416. memcpy(s->dump_buf, s->analysis_buf, s->analysis_rdft_len * sizeof(*s->analysis_buf));
  417. av_rdft_calc(s->analysis_irdft, s->analysis_buf);
  418. center = s->fir_len / 2;
  419. for (k = 0; k <= center; k++) {
  420. double u = k * (M_PI/center);
  421. double win;
  422. switch (s->wfunc) {
  423. case WFUNC_RECTANGULAR:
  424. win = 1.0;
  425. break;
  426. case WFUNC_HANN:
  427. win = 0.5 + 0.5 * cos(u);
  428. break;
  429. case WFUNC_HAMMING:
  430. win = 0.53836 + 0.46164 * cos(u);
  431. break;
  432. case WFUNC_BLACKMAN:
  433. win = 0.42 + 0.5 * cos(u) + 0.08 * cos(2*u);
  434. break;
  435. case WFUNC_NUTTALL3:
  436. win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
  437. break;
  438. case WFUNC_MNUTTALL3:
  439. win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
  440. break;
  441. case WFUNC_NUTTALL:
  442. win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
  443. break;
  444. case WFUNC_BNUTTALL:
  445. win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
  446. break;
  447. case WFUNC_BHARRIS:
  448. win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
  449. break;
  450. case WFUNC_TUKEY:
  451. win = (u <= 0.5 * M_PI) ? 1.0 : (0.5 + 0.5 * cos(2*u - M_PI));
  452. break;
  453. default:
  454. av_assert0(0);
  455. }
  456. s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
  457. if (k)
  458. s->analysis_buf[s->analysis_rdft_len - k] = s->analysis_buf[k];
  459. }
  460. memset(s->analysis_buf + center + 1, 0, (s->analysis_rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
  461. memcpy(rdft_buf, s->analysis_buf + s->analysis_rdft_len - center, center * sizeof(*s->analysis_buf));
  462. memcpy(rdft_buf + center, s->analysis_buf, (s->rdft_len - center) * sizeof(*s->analysis_buf));
  463. av_rdft_calc(s->rdft, rdft_buf);
  464. for (k = 0; k < s->rdft_len; k++) {
  465. if (isnan(rdft_buf[k]) || isinf(rdft_buf[k])) {
  466. av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
  467. av_expr_free(gain_expr);
  468. if (dump_fp)
  469. fclose(dump_fp);
  470. return AVERROR(EINVAL);
  471. }
  472. }
  473. if (dump_fp)
  474. dump_fir(ctx, dump_fp, ch);
  475. if (!s->multi)
  476. break;
  477. }
  478. memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
  479. av_expr_free(gain_expr);
  480. if (dump_fp)
  481. fclose(dump_fp);
  482. return 0;
  483. }
  484. #define SELECT_GAIN(s) (s->gain_cmd ? s->gain_cmd : s->gain)
  485. #define SELECT_GAIN_ENTRY(s) (s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry)
  486. static int config_input(AVFilterLink *inlink)
  487. {
  488. AVFilterContext *ctx = inlink->dst;
  489. FIREqualizerContext *s = ctx->priv;
  490. int rdft_bits;
  491. common_uninit(s);
  492. s->next_pts = 0;
  493. s->frame_nsamples_max = 0;
  494. s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
  495. s->remaining = s->fir_len - 1;
  496. for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  497. s->rdft_len = 1 << rdft_bits;
  498. s->nsamples_max = s->rdft_len - s->fir_len + 1;
  499. if (s->nsamples_max * 2 >= s->fir_len)
  500. break;
  501. }
  502. if (rdft_bits > RDFT_BITS_MAX) {
  503. av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
  504. return AVERROR(EINVAL);
  505. }
  506. if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  507. return AVERROR(ENOMEM);
  508. for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  509. s->analysis_rdft_len = 1 << rdft_bits;
  510. if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
  511. break;
  512. }
  513. if (rdft_bits > RDFT_BITS_MAX) {
  514. av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
  515. return AVERROR(EINVAL);
  516. }
  517. if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  518. return AVERROR(ENOMEM);
  519. if (s->dumpfile) {
  520. s->analysis_rdft = av_rdft_init(rdft_bits, DFT_R2C);
  521. s->dump_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->dump_buf));
  522. }
  523. s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
  524. s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
  525. s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
  526. s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
  527. s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
  528. if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
  529. return AVERROR(ENOMEM);
  530. av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
  531. inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
  532. if (s->fixed)
  533. inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
  534. return generate_kernel(ctx, SELECT_GAIN(s), SELECT_GAIN_ENTRY(s));
  535. }
  536. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  537. {
  538. AVFilterContext *ctx = inlink->dst;
  539. FIREqualizerContext *s = ctx->priv;
  540. int ch;
  541. for (ch = 0; ch < inlink->channels; ch++) {
  542. fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
  543. s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
  544. (float *) frame->extended_data[ch], frame->nb_samples);
  545. }
  546. s->next_pts = AV_NOPTS_VALUE;
  547. if (frame->pts != AV_NOPTS_VALUE) {
  548. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
  549. if (s->zero_phase)
  550. frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
  551. }
  552. s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
  553. return ff_filter_frame(ctx->outputs[0], frame);
  554. }
  555. static int request_frame(AVFilterLink *outlink)
  556. {
  557. AVFilterContext *ctx = outlink->src;
  558. FIREqualizerContext *s= ctx->priv;
  559. int ret;
  560. ret = ff_request_frame(ctx->inputs[0]);
  561. if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
  562. AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
  563. if (!frame)
  564. return AVERROR(ENOMEM);
  565. av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
  566. frame->pts = s->next_pts;
  567. s->remaining -= frame->nb_samples;
  568. ret = filter_frame(ctx->inputs[0], frame);
  569. }
  570. return ret;
  571. }
  572. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  573. char *res, int res_len, int flags)
  574. {
  575. FIREqualizerContext *s = ctx->priv;
  576. int ret = AVERROR(ENOSYS);
  577. if (!strcmp(cmd, "gain")) {
  578. char *gain_cmd;
  579. if (SELECT_GAIN(s) && !strcmp(SELECT_GAIN(s), args)) {
  580. av_log(ctx, AV_LOG_DEBUG, "equal gain, do not rebuild.\n");
  581. return 0;
  582. }
  583. gain_cmd = av_strdup(args);
  584. if (!gain_cmd)
  585. return AVERROR(ENOMEM);
  586. ret = generate_kernel(ctx, gain_cmd, SELECT_GAIN_ENTRY(s));
  587. if (ret >= 0) {
  588. av_freep(&s->gain_cmd);
  589. s->gain_cmd = gain_cmd;
  590. } else {
  591. av_freep(&gain_cmd);
  592. }
  593. } else if (!strcmp(cmd, "gain_entry")) {
  594. char *gain_entry_cmd;
  595. if (SELECT_GAIN_ENTRY(s) && !strcmp(SELECT_GAIN_ENTRY(s), args)) {
  596. av_log(ctx, AV_LOG_DEBUG, "equal gain_entry, do not rebuild.\n");
  597. return 0;
  598. }
  599. gain_entry_cmd = av_strdup(args);
  600. if (!gain_entry_cmd)
  601. return AVERROR(ENOMEM);
  602. ret = generate_kernel(ctx, SELECT_GAIN(s), gain_entry_cmd);
  603. if (ret >= 0) {
  604. av_freep(&s->gain_entry_cmd);
  605. s->gain_entry_cmd = gain_entry_cmd;
  606. } else {
  607. av_freep(&gain_entry_cmd);
  608. }
  609. }
  610. return ret;
  611. }
  612. static const AVFilterPad firequalizer_inputs[] = {
  613. {
  614. .name = "default",
  615. .config_props = config_input,
  616. .filter_frame = filter_frame,
  617. .type = AVMEDIA_TYPE_AUDIO,
  618. .needs_writable = 1,
  619. },
  620. { NULL }
  621. };
  622. static const AVFilterPad firequalizer_outputs[] = {
  623. {
  624. .name = "default",
  625. .request_frame = request_frame,
  626. .type = AVMEDIA_TYPE_AUDIO,
  627. },
  628. { NULL }
  629. };
  630. AVFilter ff_af_firequalizer = {
  631. .name = "firequalizer",
  632. .description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
  633. .uninit = uninit,
  634. .query_formats = query_formats,
  635. .process_command = process_command,
  636. .priv_size = sizeof(FIREqualizerContext),
  637. .inputs = firequalizer_inputs,
  638. .outputs = firequalizer_outputs,
  639. .priv_class = &firequalizer_class,
  640. };