You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

599 lines
20KB

  1. /*
  2. * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavcodec/avfft.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. #include "audio.h"
  27. #define RDFT_BITS_MIN 4
  28. #define RDFT_BITS_MAX 16
  29. enum WindowFunc {
  30. WFUNC_RECTANGULAR,
  31. WFUNC_HANN,
  32. WFUNC_HAMMING,
  33. WFUNC_BLACKMAN,
  34. WFUNC_NUTTALL3,
  35. WFUNC_MNUTTALL3,
  36. WFUNC_NUTTALL,
  37. WFUNC_BNUTTALL,
  38. WFUNC_BHARRIS,
  39. NB_WFUNC
  40. };
  41. #define NB_GAIN_ENTRY_MAX 4096
  42. typedef struct {
  43. double freq;
  44. double gain;
  45. } GainEntry;
  46. typedef struct {
  47. int buf_idx;
  48. int overlap_idx;
  49. } OverlapIndex;
  50. typedef struct {
  51. const AVClass *class;
  52. RDFTContext *analysis_irdft;
  53. RDFTContext *rdft;
  54. RDFTContext *irdft;
  55. int analysis_rdft_len;
  56. int rdft_len;
  57. float *analysis_buf;
  58. float *kernel_tmp_buf;
  59. float *kernel_buf;
  60. float *conv_buf;
  61. OverlapIndex *conv_idx;
  62. int fir_len;
  63. int nsamples_max;
  64. int64_t next_pts;
  65. int frame_nsamples_max;
  66. int remaining;
  67. char *gain_cmd;
  68. char *gain_entry_cmd;
  69. const char *gain;
  70. const char *gain_entry;
  71. double delay;
  72. double accuracy;
  73. int wfunc;
  74. int fixed;
  75. int multi;
  76. int zero_phase;
  77. int nb_gain_entry;
  78. int gain_entry_err;
  79. GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
  80. } FIREqualizerContext;
  81. #define OFFSET(x) offsetof(FIREqualizerContext, x)
  82. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  83. static const AVOption firequalizer_options[] = {
  84. { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS },
  85. { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  86. { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
  87. { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
  88. { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" },
  89. { "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
  90. { "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
  91. { "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
  92. { "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
  93. { "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  94. { "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  95. { "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
  96. { "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
  97. { "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
  98. { "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  99. { "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  100. { "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  101. { NULL }
  102. };
  103. AVFILTER_DEFINE_CLASS(firequalizer);
  104. static void common_uninit(FIREqualizerContext *s)
  105. {
  106. av_rdft_end(s->analysis_irdft);
  107. av_rdft_end(s->rdft);
  108. av_rdft_end(s->irdft);
  109. s->analysis_irdft = s->rdft = s->irdft = NULL;
  110. av_freep(&s->analysis_buf);
  111. av_freep(&s->kernel_tmp_buf);
  112. av_freep(&s->kernel_buf);
  113. av_freep(&s->conv_buf);
  114. av_freep(&s->conv_idx);
  115. }
  116. static av_cold void uninit(AVFilterContext *ctx)
  117. {
  118. FIREqualizerContext *s = ctx->priv;
  119. common_uninit(s);
  120. av_freep(&s->gain_cmd);
  121. av_freep(&s->gain_entry_cmd);
  122. }
  123. static int query_formats(AVFilterContext *ctx)
  124. {
  125. AVFilterChannelLayouts *layouts;
  126. AVFilterFormats *formats;
  127. static const enum AVSampleFormat sample_fmts[] = {
  128. AV_SAMPLE_FMT_FLTP,
  129. AV_SAMPLE_FMT_NONE
  130. };
  131. int ret;
  132. layouts = ff_all_channel_counts();
  133. if (!layouts)
  134. return AVERROR(ENOMEM);
  135. ret = ff_set_common_channel_layouts(ctx, layouts);
  136. if (ret < 0)
  137. return ret;
  138. formats = ff_make_format_list(sample_fmts);
  139. if (!formats)
  140. return AVERROR(ENOMEM);
  141. ret = ff_set_common_formats(ctx, formats);
  142. if (ret < 0)
  143. return ret;
  144. formats = ff_all_samplerates();
  145. if (!formats)
  146. return AVERROR(ENOMEM);
  147. return ff_set_common_samplerates(ctx, formats);
  148. }
  149. static void fast_convolute(FIREqualizerContext *s, const float *kernel_buf, float *conv_buf,
  150. OverlapIndex *idx, float *data, int nsamples)
  151. {
  152. if (nsamples <= s->nsamples_max) {
  153. float *buf = conv_buf + idx->buf_idx * s->rdft_len;
  154. float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
  155. int k;
  156. memcpy(buf, data, nsamples * sizeof(*data));
  157. memset(buf + nsamples, 0, (s->rdft_len - nsamples) * sizeof(*data));
  158. av_rdft_calc(s->rdft, buf);
  159. buf[0] *= kernel_buf[0];
  160. buf[1] *= kernel_buf[1];
  161. for (k = 2; k < s->rdft_len; k += 2) {
  162. float re, im;
  163. re = buf[k] * kernel_buf[k] - buf[k+1] * kernel_buf[k+1];
  164. im = buf[k] * kernel_buf[k+1] + buf[k+1] * kernel_buf[k];
  165. buf[k] = re;
  166. buf[k+1] = im;
  167. }
  168. av_rdft_calc(s->irdft, buf);
  169. for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
  170. buf[k] += obuf[k];
  171. memcpy(data, buf, nsamples * sizeof(*data));
  172. idx->buf_idx = !idx->buf_idx;
  173. idx->overlap_idx = nsamples;
  174. } else {
  175. while (nsamples > s->nsamples_max * 2) {
  176. fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
  177. data += s->nsamples_max;
  178. nsamples -= s->nsamples_max;
  179. }
  180. fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
  181. fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
  182. }
  183. }
  184. static double entry_func(void *p, double freq, double gain)
  185. {
  186. AVFilterContext *ctx = p;
  187. FIREqualizerContext *s = ctx->priv;
  188. if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
  189. av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
  190. s->gain_entry_err = AVERROR(EINVAL);
  191. return 0;
  192. }
  193. if (isnan(freq)) {
  194. av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
  195. s->gain_entry_err = AVERROR(EINVAL);
  196. return 0;
  197. }
  198. if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
  199. av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
  200. s->gain_entry_err = AVERROR(EINVAL);
  201. return 0;
  202. }
  203. s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
  204. s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
  205. s->nb_gain_entry++;
  206. return 0;
  207. }
  208. static int gain_entry_compare(const void *key, const void *memb)
  209. {
  210. const double *freq = key;
  211. const GainEntry *entry = memb;
  212. if (*freq < entry[0].freq)
  213. return -1;
  214. if (*freq > entry[1].freq)
  215. return 1;
  216. return 0;
  217. }
  218. static double gain_interpolate_func(void *p, double freq)
  219. {
  220. AVFilterContext *ctx = p;
  221. FIREqualizerContext *s = ctx->priv;
  222. GainEntry *res;
  223. double d0, d1, d;
  224. if (isnan(freq))
  225. return freq;
  226. if (!s->nb_gain_entry)
  227. return 0;
  228. if (freq <= s->gain_entry_tbl[0].freq)
  229. return s->gain_entry_tbl[0].gain;
  230. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  231. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  232. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  233. av_assert0(res);
  234. d = res[1].freq - res[0].freq;
  235. d0 = freq - res[0].freq;
  236. d1 = res[1].freq - freq;
  237. if (d0 && d1)
  238. return (d0 * res[1].gain + d1 * res[0].gain) / d;
  239. if (d0)
  240. return res[1].gain;
  241. return res[0].gain;
  242. }
  243. static const char *const var_names[] = {
  244. "f",
  245. "sr",
  246. "ch",
  247. "chid",
  248. "chs",
  249. "chlayout",
  250. NULL
  251. };
  252. enum VarOffset {
  253. VAR_F,
  254. VAR_SR,
  255. VAR_CH,
  256. VAR_CHID,
  257. VAR_CHS,
  258. VAR_CHLAYOUT,
  259. VAR_NB
  260. };
  261. static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
  262. {
  263. FIREqualizerContext *s = ctx->priv;
  264. AVFilterLink *inlink = ctx->inputs[0];
  265. const char *gain_entry_func_names[] = { "entry", NULL };
  266. const char *gain_func_names[] = { "gain_interpolate", NULL };
  267. double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
  268. double (*gain_funcs[])(void *, double) = { gain_interpolate_func, NULL };
  269. double vars[VAR_NB];
  270. AVExpr *gain_expr;
  271. int ret, k, center, ch;
  272. s->nb_gain_entry = 0;
  273. s->gain_entry_err = 0;
  274. if (gain_entry) {
  275. double result = 0.0;
  276. ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
  277. gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
  278. if (ret < 0)
  279. return ret;
  280. if (s->gain_entry_err < 0)
  281. return s->gain_entry_err;
  282. }
  283. av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
  284. ret = av_expr_parse(&gain_expr, gain, var_names,
  285. gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
  286. if (ret < 0)
  287. return ret;
  288. vars[VAR_CHS] = inlink->channels;
  289. vars[VAR_CHLAYOUT] = inlink->channel_layout;
  290. vars[VAR_SR] = inlink->sample_rate;
  291. for (ch = 0; ch < inlink->channels; ch++) {
  292. vars[VAR_CH] = ch;
  293. vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
  294. vars[VAR_F] = 0.0;
  295. s->analysis_buf[0] = pow(10.0, 0.05 * av_expr_eval(gain_expr, vars, ctx));
  296. vars[VAR_F] = 0.5 * inlink->sample_rate;
  297. s->analysis_buf[1] = pow(10.0, 0.05 * av_expr_eval(gain_expr, vars, ctx));
  298. for (k = 1; k < s->analysis_rdft_len/2; k++) {
  299. vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
  300. s->analysis_buf[2*k] = pow(10.0, 0.05 * av_expr_eval(gain_expr, vars, ctx));
  301. s->analysis_buf[2*k+1] = 0.0;
  302. }
  303. av_rdft_calc(s->analysis_irdft, s->analysis_buf);
  304. center = s->fir_len / 2;
  305. for (k = 0; k <= center; k++) {
  306. double u = k * (M_PI/center);
  307. double win;
  308. switch (s->wfunc) {
  309. case WFUNC_RECTANGULAR:
  310. win = 1.0;
  311. break;
  312. case WFUNC_HANN:
  313. win = 0.5 + 0.5 * cos(u);
  314. break;
  315. case WFUNC_HAMMING:
  316. win = 0.53836 + 0.46164 * cos(u);
  317. break;
  318. case WFUNC_BLACKMAN:
  319. win = 0.42 + 0.5 * cos(u) + 0.08 * cos(2*u);
  320. break;
  321. case WFUNC_NUTTALL3:
  322. win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
  323. break;
  324. case WFUNC_MNUTTALL3:
  325. win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
  326. break;
  327. case WFUNC_NUTTALL:
  328. win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
  329. break;
  330. case WFUNC_BNUTTALL:
  331. win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
  332. break;
  333. case WFUNC_BHARRIS:
  334. win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
  335. break;
  336. default:
  337. av_assert0(0);
  338. }
  339. s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
  340. }
  341. for (k = 0; k < center - k; k++) {
  342. float tmp = s->analysis_buf[k];
  343. s->analysis_buf[k] = s->analysis_buf[center - k];
  344. s->analysis_buf[center - k] = tmp;
  345. }
  346. for (k = 1; k <= center; k++)
  347. s->analysis_buf[center + k] = s->analysis_buf[center - k];
  348. memset(s->analysis_buf + s->fir_len, 0, (s->rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
  349. av_rdft_calc(s->rdft, s->analysis_buf);
  350. for (k = 0; k < s->rdft_len; k++) {
  351. if (isnan(s->analysis_buf[k]) || isinf(s->analysis_buf[k])) {
  352. av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
  353. av_expr_free(gain_expr);
  354. return AVERROR(EINVAL);
  355. }
  356. }
  357. memcpy(s->kernel_tmp_buf + ch * s->rdft_len, s->analysis_buf, s->rdft_len * sizeof(*s->analysis_buf));
  358. if (!s->multi)
  359. break;
  360. }
  361. memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
  362. av_expr_free(gain_expr);
  363. return 0;
  364. }
  365. static int config_input(AVFilterLink *inlink)
  366. {
  367. AVFilterContext *ctx = inlink->dst;
  368. FIREqualizerContext *s = ctx->priv;
  369. int rdft_bits;
  370. common_uninit(s);
  371. s->next_pts = 0;
  372. s->frame_nsamples_max = 0;
  373. s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
  374. s->remaining = s->fir_len - 1;
  375. for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  376. s->rdft_len = 1 << rdft_bits;
  377. s->nsamples_max = s->rdft_len - s->fir_len + 1;
  378. if (s->nsamples_max * 2 >= s->fir_len)
  379. break;
  380. }
  381. if (rdft_bits > RDFT_BITS_MAX) {
  382. av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
  383. return AVERROR(EINVAL);
  384. }
  385. if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  386. return AVERROR(ENOMEM);
  387. for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  388. s->analysis_rdft_len = 1 << rdft_bits;
  389. if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
  390. break;
  391. }
  392. if (rdft_bits > RDFT_BITS_MAX) {
  393. av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
  394. return AVERROR(EINVAL);
  395. }
  396. if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  397. return AVERROR(ENOMEM);
  398. s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
  399. s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
  400. s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
  401. s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
  402. s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
  403. if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
  404. return AVERROR(ENOMEM);
  405. av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
  406. inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
  407. if (s->fixed)
  408. inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
  409. return generate_kernel(ctx, s->gain_cmd ? s->gain_cmd : s->gain,
  410. s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry);
  411. }
  412. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  413. {
  414. AVFilterContext *ctx = inlink->dst;
  415. FIREqualizerContext *s = ctx->priv;
  416. int ch;
  417. for (ch = 0; ch < inlink->channels; ch++) {
  418. fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
  419. s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
  420. (float *) frame->extended_data[ch], frame->nb_samples);
  421. }
  422. s->next_pts = AV_NOPTS_VALUE;
  423. if (frame->pts != AV_NOPTS_VALUE) {
  424. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
  425. if (s->zero_phase)
  426. frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
  427. }
  428. s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
  429. return ff_filter_frame(ctx->outputs[0], frame);
  430. }
  431. static int request_frame(AVFilterLink *outlink)
  432. {
  433. AVFilterContext *ctx = outlink->src;
  434. FIREqualizerContext *s= ctx->priv;
  435. int ret;
  436. ret = ff_request_frame(ctx->inputs[0]);
  437. if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
  438. AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
  439. if (!frame)
  440. return AVERROR(ENOMEM);
  441. av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
  442. frame->pts = s->next_pts;
  443. s->remaining -= frame->nb_samples;
  444. ret = filter_frame(ctx->inputs[0], frame);
  445. }
  446. return ret;
  447. }
  448. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  449. char *res, int res_len, int flags)
  450. {
  451. FIREqualizerContext *s = ctx->priv;
  452. int ret = AVERROR(ENOSYS);
  453. if (!strcmp(cmd, "gain")) {
  454. char *gain_cmd;
  455. gain_cmd = av_strdup(args);
  456. if (!gain_cmd)
  457. return AVERROR(ENOMEM);
  458. ret = generate_kernel(ctx, gain_cmd, s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry);
  459. if (ret >= 0) {
  460. av_freep(&s->gain_cmd);
  461. s->gain_cmd = gain_cmd;
  462. } else {
  463. av_freep(&gain_cmd);
  464. }
  465. } else if (!strcmp(cmd, "gain_entry")) {
  466. char *gain_entry_cmd;
  467. gain_entry_cmd = av_strdup(args);
  468. if (!gain_entry_cmd)
  469. return AVERROR(ENOMEM);
  470. ret = generate_kernel(ctx, s->gain_cmd ? s->gain_cmd : s->gain, gain_entry_cmd);
  471. if (ret >= 0) {
  472. av_freep(&s->gain_entry_cmd);
  473. s->gain_entry_cmd = gain_entry_cmd;
  474. } else {
  475. av_freep(&gain_entry_cmd);
  476. }
  477. }
  478. return ret;
  479. }
  480. static const AVFilterPad firequalizer_inputs[] = {
  481. {
  482. .name = "default",
  483. .config_props = config_input,
  484. .filter_frame = filter_frame,
  485. .type = AVMEDIA_TYPE_AUDIO,
  486. .needs_writable = 1,
  487. },
  488. { NULL }
  489. };
  490. static const AVFilterPad firequalizer_outputs[] = {
  491. {
  492. .name = "default",
  493. .request_frame = request_frame,
  494. .type = AVMEDIA_TYPE_AUDIO,
  495. },
  496. { NULL }
  497. };
  498. AVFilter ff_af_firequalizer = {
  499. .name = "firequalizer",
  500. .description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
  501. .uninit = uninit,
  502. .query_formats = query_formats,
  503. .process_command = process_command,
  504. .priv_size = sizeof(FIREqualizerContext),
  505. .inputs = firequalizer_inputs,
  506. .outputs = firequalizer_outputs,
  507. .priv_class = &firequalizer_class,
  508. };