You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

761 lines
26KB

  1. /*
  2. * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavcodec/avfft.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. #include "audio.h"
  27. #define RDFT_BITS_MIN 4
  28. #define RDFT_BITS_MAX 16
  29. enum WindowFunc {
  30. WFUNC_RECTANGULAR,
  31. WFUNC_HANN,
  32. WFUNC_HAMMING,
  33. WFUNC_BLACKMAN,
  34. WFUNC_NUTTALL3,
  35. WFUNC_MNUTTALL3,
  36. WFUNC_NUTTALL,
  37. WFUNC_BNUTTALL,
  38. WFUNC_BHARRIS,
  39. WFUNC_TUKEY,
  40. NB_WFUNC
  41. };
  42. enum Scale {
  43. SCALE_LINLIN,
  44. SCALE_LINLOG,
  45. SCALE_LOGLIN,
  46. SCALE_LOGLOG,
  47. NB_SCALE
  48. };
  49. #define NB_GAIN_ENTRY_MAX 4096
  50. typedef struct {
  51. double freq;
  52. double gain;
  53. } GainEntry;
  54. typedef struct {
  55. int buf_idx;
  56. int overlap_idx;
  57. } OverlapIndex;
  58. typedef struct {
  59. const AVClass *class;
  60. RDFTContext *analysis_rdft;
  61. RDFTContext *analysis_irdft;
  62. RDFTContext *rdft;
  63. RDFTContext *irdft;
  64. int analysis_rdft_len;
  65. int rdft_len;
  66. float *analysis_buf;
  67. float *dump_buf;
  68. float *kernel_tmp_buf;
  69. float *kernel_buf;
  70. float *conv_buf;
  71. OverlapIndex *conv_idx;
  72. int fir_len;
  73. int nsamples_max;
  74. int64_t next_pts;
  75. int frame_nsamples_max;
  76. int remaining;
  77. char *gain_cmd;
  78. char *gain_entry_cmd;
  79. const char *gain;
  80. const char *gain_entry;
  81. double delay;
  82. double accuracy;
  83. int wfunc;
  84. int fixed;
  85. int multi;
  86. int zero_phase;
  87. int scale;
  88. char *dumpfile;
  89. int dumpscale;
  90. int nb_gain_entry;
  91. int gain_entry_err;
  92. GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
  93. } FIREqualizerContext;
  94. #define OFFSET(x) offsetof(FIREqualizerContext, x)
  95. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  96. static const AVOption firequalizer_options[] = {
  97. { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS },
  98. { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  99. { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
  100. { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
  101. { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" },
  102. { "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
  103. { "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
  104. { "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
  105. { "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
  106. { "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  107. { "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  108. { "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
  109. { "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
  110. { "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
  111. { "tukey", "tukey window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_TUKEY }, 0, 0, FLAGS, "wfunc" },
  112. { "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  113. { "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  114. { "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  115. { "scale", "set gain scale", OFFSET(scale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
  116. { "linlin", "linear-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLIN }, 0, 0, FLAGS, "scale" },
  117. { "linlog", "linear-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLOG }, 0, 0, FLAGS, "scale" },
  118. { "loglin", "logarithmic-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLIN }, 0, 0, FLAGS, "scale" },
  119. { "loglog", "logarithmic-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLOG }, 0, 0, FLAGS, "scale" },
  120. { "dumpfile", "set dump file", OFFSET(dumpfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  121. { "dumpscale", "set dump scale", OFFSET(dumpscale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
  122. { NULL }
  123. };
  124. AVFILTER_DEFINE_CLASS(firequalizer);
  125. static void common_uninit(FIREqualizerContext *s)
  126. {
  127. av_rdft_end(s->analysis_rdft);
  128. av_rdft_end(s->analysis_irdft);
  129. av_rdft_end(s->rdft);
  130. av_rdft_end(s->irdft);
  131. s->analysis_rdft = s->analysis_irdft = s->rdft = s->irdft = NULL;
  132. av_freep(&s->analysis_buf);
  133. av_freep(&s->dump_buf);
  134. av_freep(&s->kernel_tmp_buf);
  135. av_freep(&s->kernel_buf);
  136. av_freep(&s->conv_buf);
  137. av_freep(&s->conv_idx);
  138. }
  139. static av_cold void uninit(AVFilterContext *ctx)
  140. {
  141. FIREqualizerContext *s = ctx->priv;
  142. common_uninit(s);
  143. av_freep(&s->gain_cmd);
  144. av_freep(&s->gain_entry_cmd);
  145. }
  146. static int query_formats(AVFilterContext *ctx)
  147. {
  148. AVFilterChannelLayouts *layouts;
  149. AVFilterFormats *formats;
  150. static const enum AVSampleFormat sample_fmts[] = {
  151. AV_SAMPLE_FMT_FLTP,
  152. AV_SAMPLE_FMT_NONE
  153. };
  154. int ret;
  155. layouts = ff_all_channel_counts();
  156. if (!layouts)
  157. return AVERROR(ENOMEM);
  158. ret = ff_set_common_channel_layouts(ctx, layouts);
  159. if (ret < 0)
  160. return ret;
  161. formats = ff_make_format_list(sample_fmts);
  162. if (!formats)
  163. return AVERROR(ENOMEM);
  164. ret = ff_set_common_formats(ctx, formats);
  165. if (ret < 0)
  166. return ret;
  167. formats = ff_all_samplerates();
  168. if (!formats)
  169. return AVERROR(ENOMEM);
  170. return ff_set_common_samplerates(ctx, formats);
  171. }
  172. static void fast_convolute(FIREqualizerContext *s, const float *kernel_buf, float *conv_buf,
  173. OverlapIndex *idx, float *data, int nsamples)
  174. {
  175. if (nsamples <= s->nsamples_max) {
  176. float *buf = conv_buf + idx->buf_idx * s->rdft_len;
  177. float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
  178. int center = s->fir_len/2;
  179. int k;
  180. memset(buf, 0, center * sizeof(*data));
  181. memcpy(buf + center, data, nsamples * sizeof(*data));
  182. memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*data));
  183. av_rdft_calc(s->rdft, buf);
  184. buf[0] *= kernel_buf[0];
  185. buf[1] *= kernel_buf[s->rdft_len/2];
  186. for (k = 1; k < s->rdft_len/2; k++) {
  187. buf[2*k] *= kernel_buf[k];
  188. buf[2*k+1] *= kernel_buf[k];
  189. }
  190. av_rdft_calc(s->irdft, buf);
  191. for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
  192. buf[k] += obuf[k];
  193. memcpy(data, buf, nsamples * sizeof(*data));
  194. idx->buf_idx = !idx->buf_idx;
  195. idx->overlap_idx = nsamples;
  196. } else {
  197. while (nsamples > s->nsamples_max * 2) {
  198. fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
  199. data += s->nsamples_max;
  200. nsamples -= s->nsamples_max;
  201. }
  202. fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
  203. fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
  204. }
  205. }
  206. static void dump_fir(AVFilterContext *ctx, FILE *fp, int ch)
  207. {
  208. FIREqualizerContext *s = ctx->priv;
  209. int rate = ctx->inputs[0]->sample_rate;
  210. int xlog = s->dumpscale == SCALE_LOGLIN || s->dumpscale == SCALE_LOGLOG;
  211. int ylog = s->dumpscale == SCALE_LINLOG || s->dumpscale == SCALE_LOGLOG;
  212. int x;
  213. int center = s->fir_len / 2;
  214. double delay = s->zero_phase ? 0.0 : (double) center / rate;
  215. double vx, ya, yb;
  216. s->analysis_buf[0] *= s->rdft_len/2;
  217. for (x = 1; x <= center; x++) {
  218. s->analysis_buf[x] *= s->rdft_len/2;
  219. s->analysis_buf[s->analysis_rdft_len - x] *= s->rdft_len/2;
  220. }
  221. if (ch)
  222. fprintf(fp, "\n\n");
  223. fprintf(fp, "# time[%d] (time amplitude)\n", ch);
  224. for (x = center; x > 0; x--)
  225. fprintf(fp, "%15.10f %15.10f\n", delay - (double) x / rate, (double) s->analysis_buf[s->analysis_rdft_len - x]);
  226. for (x = 0; x <= center; x++)
  227. fprintf(fp, "%15.10f %15.10f\n", delay + (double)x / rate , (double) s->analysis_buf[x]);
  228. av_rdft_calc(s->analysis_rdft, s->analysis_buf);
  229. fprintf(fp, "\n\n# freq[%d] (frequency desired_gain actual_gain)\n", ch);
  230. for (x = 0; x <= s->analysis_rdft_len/2; x++) {
  231. int i = (x == s->analysis_rdft_len/2) ? 1 : 2 * x;
  232. vx = (double)x * rate / s->analysis_rdft_len;
  233. if (xlog)
  234. vx = log2(0.05*vx);
  235. ya = s->dump_buf[i];
  236. yb = s->analysis_buf[i];
  237. if (ylog) {
  238. ya = 20.0 * log10(fabs(ya));
  239. yb = 20.0 * log10(fabs(yb));
  240. }
  241. fprintf(fp, "%17.10f %17.10f %17.10f\n", vx, ya, yb);
  242. }
  243. }
  244. static double entry_func(void *p, double freq, double gain)
  245. {
  246. AVFilterContext *ctx = p;
  247. FIREqualizerContext *s = ctx->priv;
  248. if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
  249. av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
  250. s->gain_entry_err = AVERROR(EINVAL);
  251. return 0;
  252. }
  253. if (isnan(freq)) {
  254. av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
  255. s->gain_entry_err = AVERROR(EINVAL);
  256. return 0;
  257. }
  258. if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
  259. av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
  260. s->gain_entry_err = AVERROR(EINVAL);
  261. return 0;
  262. }
  263. s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
  264. s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
  265. s->nb_gain_entry++;
  266. return 0;
  267. }
  268. static int gain_entry_compare(const void *key, const void *memb)
  269. {
  270. const double *freq = key;
  271. const GainEntry *entry = memb;
  272. if (*freq < entry[0].freq)
  273. return -1;
  274. if (*freq > entry[1].freq)
  275. return 1;
  276. return 0;
  277. }
  278. static double gain_interpolate_func(void *p, double freq)
  279. {
  280. AVFilterContext *ctx = p;
  281. FIREqualizerContext *s = ctx->priv;
  282. GainEntry *res;
  283. double d0, d1, d;
  284. if (isnan(freq))
  285. return freq;
  286. if (!s->nb_gain_entry)
  287. return 0;
  288. if (freq <= s->gain_entry_tbl[0].freq)
  289. return s->gain_entry_tbl[0].gain;
  290. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  291. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  292. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  293. av_assert0(res);
  294. d = res[1].freq - res[0].freq;
  295. d0 = freq - res[0].freq;
  296. d1 = res[1].freq - freq;
  297. if (d0 && d1)
  298. return (d0 * res[1].gain + d1 * res[0].gain) / d;
  299. if (d0)
  300. return res[1].gain;
  301. return res[0].gain;
  302. }
  303. static double cubic_interpolate_func(void *p, double freq)
  304. {
  305. AVFilterContext *ctx = p;
  306. FIREqualizerContext *s = ctx->priv;
  307. GainEntry *res;
  308. double x, x2, x3;
  309. double a, b, c, d;
  310. double m0, m1, m2, msum, unit;
  311. if (!s->nb_gain_entry)
  312. return 0;
  313. if (freq <= s->gain_entry_tbl[0].freq)
  314. return s->gain_entry_tbl[0].gain;
  315. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  316. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  317. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  318. av_assert0(res);
  319. unit = res[1].freq - res[0].freq;
  320. m0 = res != s->gain_entry_tbl ?
  321. unit * (res[0].gain - res[-1].gain) / (res[0].freq - res[-1].freq) : 0;
  322. m1 = res[1].gain - res[0].gain;
  323. m2 = res != s->gain_entry_tbl + s->nb_gain_entry - 2 ?
  324. unit * (res[2].gain - res[1].gain) / (res[2].freq - res[1].freq) : 0;
  325. msum = fabs(m0) + fabs(m1);
  326. m0 = msum > 0 ? (fabs(m0) * m1 + fabs(m1) * m0) / msum : 0;
  327. msum = fabs(m1) + fabs(m2);
  328. m1 = msum > 0 ? (fabs(m1) * m2 + fabs(m2) * m1) / msum : 0;
  329. d = res[0].gain;
  330. c = m0;
  331. b = 3 * res[1].gain - m1 - 2 * c - 3 * d;
  332. a = res[1].gain - b - c - d;
  333. x = (freq - res[0].freq) / unit;
  334. x2 = x * x;
  335. x3 = x2 * x;
  336. return a * x3 + b * x2 + c * x + d;
  337. }
  338. static const char *const var_names[] = {
  339. "f",
  340. "sr",
  341. "ch",
  342. "chid",
  343. "chs",
  344. "chlayout",
  345. NULL
  346. };
  347. enum VarOffset {
  348. VAR_F,
  349. VAR_SR,
  350. VAR_CH,
  351. VAR_CHID,
  352. VAR_CHS,
  353. VAR_CHLAYOUT,
  354. VAR_NB
  355. };
  356. static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
  357. {
  358. FIREqualizerContext *s = ctx->priv;
  359. AVFilterLink *inlink = ctx->inputs[0];
  360. const char *gain_entry_func_names[] = { "entry", NULL };
  361. const char *gain_func_names[] = { "gain_interpolate", "cubic_interpolate", NULL };
  362. double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
  363. double (*gain_funcs[])(void *, double) = { gain_interpolate_func, cubic_interpolate_func, NULL };
  364. double vars[VAR_NB];
  365. AVExpr *gain_expr;
  366. int ret, k, center, ch;
  367. int xlog = s->scale == SCALE_LOGLIN || s->scale == SCALE_LOGLOG;
  368. int ylog = s->scale == SCALE_LINLOG || s->scale == SCALE_LOGLOG;
  369. FILE *dump_fp = NULL;
  370. s->nb_gain_entry = 0;
  371. s->gain_entry_err = 0;
  372. if (gain_entry) {
  373. double result = 0.0;
  374. ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
  375. gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
  376. if (ret < 0)
  377. return ret;
  378. if (s->gain_entry_err < 0)
  379. return s->gain_entry_err;
  380. }
  381. av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
  382. ret = av_expr_parse(&gain_expr, gain, var_names,
  383. gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
  384. if (ret < 0)
  385. return ret;
  386. if (s->dumpfile && (!s->dump_buf || !s->analysis_rdft || !(dump_fp = fopen(s->dumpfile, "w"))))
  387. av_log(ctx, AV_LOG_WARNING, "dumping failed.\n");
  388. vars[VAR_CHS] = inlink->channels;
  389. vars[VAR_CHLAYOUT] = inlink->channel_layout;
  390. vars[VAR_SR] = inlink->sample_rate;
  391. for (ch = 0; ch < inlink->channels; ch++) {
  392. float *rdft_buf = s->kernel_tmp_buf + ch * s->rdft_len;
  393. double result;
  394. vars[VAR_CH] = ch;
  395. vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
  396. vars[VAR_F] = 0.0;
  397. if (xlog)
  398. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  399. result = av_expr_eval(gain_expr, vars, ctx);
  400. s->analysis_buf[0] = ylog ? pow(10.0, 0.05 * result) : result;
  401. vars[VAR_F] = 0.5 * inlink->sample_rate;
  402. if (xlog)
  403. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  404. result = av_expr_eval(gain_expr, vars, ctx);
  405. s->analysis_buf[1] = ylog ? pow(10.0, 0.05 * result) : result;
  406. for (k = 1; k < s->analysis_rdft_len/2; k++) {
  407. vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
  408. if (xlog)
  409. vars[VAR_F] = log2(0.05 * vars[VAR_F]);
  410. result = av_expr_eval(gain_expr, vars, ctx);
  411. s->analysis_buf[2*k] = ylog ? pow(10.0, 0.05 * result) : result;
  412. s->analysis_buf[2*k+1] = 0.0;
  413. }
  414. if (s->dump_buf)
  415. memcpy(s->dump_buf, s->analysis_buf, s->analysis_rdft_len * sizeof(*s->analysis_buf));
  416. av_rdft_calc(s->analysis_irdft, s->analysis_buf);
  417. center = s->fir_len / 2;
  418. for (k = 0; k <= center; k++) {
  419. double u = k * (M_PI/center);
  420. double win;
  421. switch (s->wfunc) {
  422. case WFUNC_RECTANGULAR:
  423. win = 1.0;
  424. break;
  425. case WFUNC_HANN:
  426. win = 0.5 + 0.5 * cos(u);
  427. break;
  428. case WFUNC_HAMMING:
  429. win = 0.53836 + 0.46164 * cos(u);
  430. break;
  431. case WFUNC_BLACKMAN:
  432. win = 0.42 + 0.5 * cos(u) + 0.08 * cos(2*u);
  433. break;
  434. case WFUNC_NUTTALL3:
  435. win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
  436. break;
  437. case WFUNC_MNUTTALL3:
  438. win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
  439. break;
  440. case WFUNC_NUTTALL:
  441. win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
  442. break;
  443. case WFUNC_BNUTTALL:
  444. win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
  445. break;
  446. case WFUNC_BHARRIS:
  447. win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
  448. break;
  449. case WFUNC_TUKEY:
  450. win = (u <= 0.5 * M_PI) ? 1.0 : (0.5 + 0.5 * cos(2*u - M_PI));
  451. break;
  452. default:
  453. av_assert0(0);
  454. }
  455. s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
  456. if (k)
  457. s->analysis_buf[s->analysis_rdft_len - k] = s->analysis_buf[k];
  458. }
  459. memset(s->analysis_buf + center + 1, 0, (s->analysis_rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
  460. memcpy(rdft_buf, s->analysis_buf, s->rdft_len/2 * sizeof(*s->analysis_buf));
  461. memcpy(rdft_buf + s->rdft_len/2, s->analysis_buf + s->analysis_rdft_len - s->rdft_len/2, s->rdft_len/2 * sizeof(*s->analysis_buf));
  462. av_rdft_calc(s->rdft, rdft_buf);
  463. for (k = 0; k < s->rdft_len; k++) {
  464. if (isnan(rdft_buf[k]) || isinf(rdft_buf[k])) {
  465. av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
  466. av_expr_free(gain_expr);
  467. if (dump_fp)
  468. fclose(dump_fp);
  469. return AVERROR(EINVAL);
  470. }
  471. }
  472. rdft_buf[s->rdft_len-1] = rdft_buf[1];
  473. for (k = 0; k < s->rdft_len/2; k++)
  474. rdft_buf[k] = rdft_buf[2*k];
  475. rdft_buf[s->rdft_len/2] = rdft_buf[s->rdft_len-1];
  476. if (dump_fp)
  477. dump_fir(ctx, dump_fp, ch);
  478. if (!s->multi)
  479. break;
  480. }
  481. memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
  482. av_expr_free(gain_expr);
  483. if (dump_fp)
  484. fclose(dump_fp);
  485. return 0;
  486. }
  487. #define SELECT_GAIN(s) (s->gain_cmd ? s->gain_cmd : s->gain)
  488. #define SELECT_GAIN_ENTRY(s) (s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry)
  489. static int config_input(AVFilterLink *inlink)
  490. {
  491. AVFilterContext *ctx = inlink->dst;
  492. FIREqualizerContext *s = ctx->priv;
  493. int rdft_bits;
  494. common_uninit(s);
  495. s->next_pts = 0;
  496. s->frame_nsamples_max = 0;
  497. s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
  498. s->remaining = s->fir_len - 1;
  499. for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  500. s->rdft_len = 1 << rdft_bits;
  501. s->nsamples_max = s->rdft_len - s->fir_len + 1;
  502. if (s->nsamples_max * 2 >= s->fir_len)
  503. break;
  504. }
  505. if (rdft_bits > RDFT_BITS_MAX) {
  506. av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
  507. return AVERROR(EINVAL);
  508. }
  509. if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  510. return AVERROR(ENOMEM);
  511. for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  512. s->analysis_rdft_len = 1 << rdft_bits;
  513. if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
  514. break;
  515. }
  516. if (rdft_bits > RDFT_BITS_MAX) {
  517. av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
  518. return AVERROR(EINVAL);
  519. }
  520. if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  521. return AVERROR(ENOMEM);
  522. if (s->dumpfile) {
  523. s->analysis_rdft = av_rdft_init(rdft_bits, DFT_R2C);
  524. s->dump_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->dump_buf));
  525. }
  526. s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
  527. s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
  528. s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
  529. s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
  530. s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
  531. if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
  532. return AVERROR(ENOMEM);
  533. av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
  534. inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
  535. if (s->fixed)
  536. inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
  537. return generate_kernel(ctx, SELECT_GAIN(s), SELECT_GAIN_ENTRY(s));
  538. }
  539. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  540. {
  541. AVFilterContext *ctx = inlink->dst;
  542. FIREqualizerContext *s = ctx->priv;
  543. int ch;
  544. for (ch = 0; ch < inlink->channels; ch++) {
  545. fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
  546. s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
  547. (float *) frame->extended_data[ch], frame->nb_samples);
  548. }
  549. s->next_pts = AV_NOPTS_VALUE;
  550. if (frame->pts != AV_NOPTS_VALUE) {
  551. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
  552. if (s->zero_phase)
  553. frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
  554. }
  555. s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
  556. return ff_filter_frame(ctx->outputs[0], frame);
  557. }
  558. static int request_frame(AVFilterLink *outlink)
  559. {
  560. AVFilterContext *ctx = outlink->src;
  561. FIREqualizerContext *s= ctx->priv;
  562. int ret;
  563. ret = ff_request_frame(ctx->inputs[0]);
  564. if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
  565. AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
  566. if (!frame)
  567. return AVERROR(ENOMEM);
  568. av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
  569. frame->pts = s->next_pts;
  570. s->remaining -= frame->nb_samples;
  571. ret = filter_frame(ctx->inputs[0], frame);
  572. }
  573. return ret;
  574. }
  575. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  576. char *res, int res_len, int flags)
  577. {
  578. FIREqualizerContext *s = ctx->priv;
  579. int ret = AVERROR(ENOSYS);
  580. if (!strcmp(cmd, "gain")) {
  581. char *gain_cmd;
  582. if (SELECT_GAIN(s) && !strcmp(SELECT_GAIN(s), args)) {
  583. av_log(ctx, AV_LOG_DEBUG, "equal gain, do not rebuild.\n");
  584. return 0;
  585. }
  586. gain_cmd = av_strdup(args);
  587. if (!gain_cmd)
  588. return AVERROR(ENOMEM);
  589. ret = generate_kernel(ctx, gain_cmd, SELECT_GAIN_ENTRY(s));
  590. if (ret >= 0) {
  591. av_freep(&s->gain_cmd);
  592. s->gain_cmd = gain_cmd;
  593. } else {
  594. av_freep(&gain_cmd);
  595. }
  596. } else if (!strcmp(cmd, "gain_entry")) {
  597. char *gain_entry_cmd;
  598. if (SELECT_GAIN_ENTRY(s) && !strcmp(SELECT_GAIN_ENTRY(s), args)) {
  599. av_log(ctx, AV_LOG_DEBUG, "equal gain_entry, do not rebuild.\n");
  600. return 0;
  601. }
  602. gain_entry_cmd = av_strdup(args);
  603. if (!gain_entry_cmd)
  604. return AVERROR(ENOMEM);
  605. ret = generate_kernel(ctx, SELECT_GAIN(s), gain_entry_cmd);
  606. if (ret >= 0) {
  607. av_freep(&s->gain_entry_cmd);
  608. s->gain_entry_cmd = gain_entry_cmd;
  609. } else {
  610. av_freep(&gain_entry_cmd);
  611. }
  612. }
  613. return ret;
  614. }
  615. static const AVFilterPad firequalizer_inputs[] = {
  616. {
  617. .name = "default",
  618. .config_props = config_input,
  619. .filter_frame = filter_frame,
  620. .type = AVMEDIA_TYPE_AUDIO,
  621. .needs_writable = 1,
  622. },
  623. { NULL }
  624. };
  625. static const AVFilterPad firequalizer_outputs[] = {
  626. {
  627. .name = "default",
  628. .request_frame = request_frame,
  629. .type = AVMEDIA_TYPE_AUDIO,
  630. },
  631. { NULL }
  632. };
  633. AVFilter ff_af_firequalizer = {
  634. .name = "firequalizer",
  635. .description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
  636. .uninit = uninit,
  637. .query_formats = query_formats,
  638. .process_command = process_command,
  639. .priv_size = sizeof(FIREqualizerContext),
  640. .inputs = firequalizer_inputs,
  641. .outputs = firequalizer_outputs,
  642. .priv_class = &firequalizer_class,
  643. };