You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

782 lines
24KB

  1. /*
  2. * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavutil/avstring.h"
  23. #include "libavutil/ffmath.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/parseutils.h"
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "audio.h"
  29. #define FILTER_ORDER 4
  30. enum FilterType {
  31. BUTTERWORTH,
  32. CHEBYSHEV1,
  33. CHEBYSHEV2,
  34. NB_TYPES
  35. };
  36. typedef struct FoSection {
  37. double a0, a1, a2, a3, a4;
  38. double b0, b1, b2, b3, b4;
  39. double num[4];
  40. double denum[4];
  41. } FoSection;
  42. typedef struct EqualizatorFilter {
  43. int ignore;
  44. int channel;
  45. int type;
  46. double freq;
  47. double gain;
  48. double width;
  49. FoSection section[2];
  50. } EqualizatorFilter;
  51. typedef struct AudioNEqualizerContext {
  52. const AVClass *class;
  53. char *args;
  54. char *colors;
  55. int draw_curves;
  56. int w, h;
  57. double mag;
  58. int fscale;
  59. int nb_filters;
  60. int nb_allocated;
  61. EqualizatorFilter *filters;
  62. AVFrame *video;
  63. } AudioNEqualizerContext;
  64. #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
  65. #define A AV_OPT_FLAG_AUDIO_PARAM
  66. #define V AV_OPT_FLAG_VIDEO_PARAM
  67. #define F AV_OPT_FLAG_FILTERING_PARAM
  68. static const AVOption anequalizer_options[] = {
  69. { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
  70. { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
  71. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
  72. { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
  73. { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
  74. { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
  75. { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
  76. { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
  77. { NULL }
  78. };
  79. AVFILTER_DEFINE_CLASS(anequalizer);
  80. static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
  81. {
  82. AudioNEqualizerContext *s = ctx->priv;
  83. char *colors, *color, *saveptr = NULL;
  84. int ch, i, n;
  85. colors = av_strdup(s->colors);
  86. if (!colors)
  87. return;
  88. memset(out->data[0], 0, s->h * out->linesize[0]);
  89. for (ch = 0; ch < inlink->channels; ch++) {
  90. uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
  91. int prev_v = -1;
  92. double f;
  93. color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
  94. if (color)
  95. av_parse_color(fg, color, -1, ctx);
  96. for (f = 0; f < s->w; f++) {
  97. double zr, zi, zr2, zi2;
  98. double Hr, Hi;
  99. double Hmag = 1;
  100. double w;
  101. int v, y, x;
  102. w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
  103. zr = cos(w);
  104. zr2 = zr * zr;
  105. zi = -sin(w);
  106. zi2 = zi * zi;
  107. for (n = 0; n < s->nb_filters; n++) {
  108. if (s->filters[n].channel != ch ||
  109. s->filters[n].ignore)
  110. continue;
  111. for (i = 0; i < FILTER_ORDER / 2; i++) {
  112. FoSection *S = &s->filters[n].section[i];
  113. /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
  114. ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
  115. Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
  116. Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
  117. Hmag *= hypot(Hr, Hi);
  118. Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
  119. Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
  120. Hmag /= hypot(Hr, Hi);
  121. }
  122. }
  123. v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
  124. x = lrint(f);
  125. if (prev_v == -1)
  126. prev_v = v;
  127. if (v <= prev_v) {
  128. for (y = v; y <= prev_v; y++)
  129. AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
  130. } else {
  131. for (y = prev_v; y <= v; y++)
  132. AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
  133. }
  134. prev_v = v;
  135. }
  136. }
  137. av_free(colors);
  138. }
  139. static int config_video(AVFilterLink *outlink)
  140. {
  141. AVFilterContext *ctx = outlink->src;
  142. AudioNEqualizerContext *s = ctx->priv;
  143. AVFilterLink *inlink = ctx->inputs[0];
  144. AVFrame *out;
  145. outlink->w = s->w;
  146. outlink->h = s->h;
  147. av_frame_free(&s->video);
  148. s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  149. if (!out)
  150. return AVERROR(ENOMEM);
  151. outlink->sample_aspect_ratio = (AVRational){1,1};
  152. draw_curves(ctx, inlink, out);
  153. return 0;
  154. }
  155. static av_cold int init(AVFilterContext *ctx)
  156. {
  157. AudioNEqualizerContext *s = ctx->priv;
  158. AVFilterPad pad, vpad;
  159. int ret;
  160. pad = (AVFilterPad){
  161. .name = "out0",
  162. .type = AVMEDIA_TYPE_AUDIO,
  163. };
  164. ret = ff_insert_outpad(ctx, 0, &pad);
  165. if (ret < 0)
  166. return ret;
  167. if (s->draw_curves) {
  168. vpad = (AVFilterPad){
  169. .name = "out1",
  170. .type = AVMEDIA_TYPE_VIDEO,
  171. .config_props = config_video,
  172. };
  173. ret = ff_insert_outpad(ctx, 1, &vpad);
  174. if (ret < 0)
  175. return ret;
  176. }
  177. return 0;
  178. }
  179. static int query_formats(AVFilterContext *ctx)
  180. {
  181. AVFilterLink *inlink = ctx->inputs[0];
  182. AVFilterLink *outlink = ctx->outputs[0];
  183. AudioNEqualizerContext *s = ctx->priv;
  184. AVFilterFormats *formats;
  185. AVFilterChannelLayouts *layouts;
  186. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
  187. static const enum AVSampleFormat sample_fmts[] = {
  188. AV_SAMPLE_FMT_DBLP,
  189. AV_SAMPLE_FMT_NONE
  190. };
  191. int ret;
  192. if (s->draw_curves) {
  193. AVFilterLink *videolink = ctx->outputs[1];
  194. formats = ff_make_format_list(pix_fmts);
  195. if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
  196. return ret;
  197. }
  198. formats = ff_make_format_list(sample_fmts);
  199. if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0 ||
  200. (ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
  201. return ret;
  202. layouts = ff_all_channel_counts();
  203. if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0 ||
  204. (ret = ff_channel_layouts_ref(layouts, &outlink->incfg.channel_layouts)) < 0)
  205. return ret;
  206. formats = ff_all_samplerates();
  207. if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
  208. (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
  209. return ret;
  210. return 0;
  211. }
  212. static av_cold void uninit(AVFilterContext *ctx)
  213. {
  214. AudioNEqualizerContext *s = ctx->priv;
  215. av_frame_free(&s->video);
  216. av_freep(&s->filters);
  217. s->nb_filters = 0;
  218. s->nb_allocated = 0;
  219. }
  220. static void butterworth_fo_section(FoSection *S, double beta,
  221. double si, double g, double g0,
  222. double D, double c0)
  223. {
  224. if (c0 == 1 || c0 == -1) {
  225. S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
  226. S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
  227. S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
  228. S->b3 = 0;
  229. S->b4 = 0;
  230. S->a0 = 1;
  231. S->a1 = 2*c0*(beta*beta - 1)/D;
  232. S->a2 = (beta*beta - 2*beta*si + 1)/D;
  233. S->a3 = 0;
  234. S->a4 = 0;
  235. } else {
  236. S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
  237. S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
  238. S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
  239. S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
  240. S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
  241. S->a0 = 1;
  242. S->a1 = -4*c0*(1 + si*beta)/D;
  243. S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
  244. S->a3 = -4*c0*(1 - si*beta)/D;
  245. S->a4 = (beta*beta - 2*si*beta + 1)/D;
  246. }
  247. }
  248. static void butterworth_bp_filter(EqualizatorFilter *f,
  249. int N, double w0, double wb,
  250. double G, double Gb, double G0)
  251. {
  252. double g, c0, g0, beta;
  253. double epsilon;
  254. int r = N % 2;
  255. int L = (N - r) / 2;
  256. int i;
  257. if (G == 0 && G0 == 0) {
  258. f->section[0].a0 = 1;
  259. f->section[0].b0 = 1;
  260. f->section[1].a0 = 1;
  261. f->section[1].b0 = 1;
  262. return;
  263. }
  264. G = ff_exp10(G/20);
  265. Gb = ff_exp10(Gb/20);
  266. G0 = ff_exp10(G0/20);
  267. epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
  268. g = pow(G, 1.0 / N);
  269. g0 = pow(G0, 1.0 / N);
  270. beta = pow(epsilon, -1.0 / N) * tan(wb/2);
  271. c0 = cos(w0);
  272. for (i = 1; i <= L; i++) {
  273. double ui = (2.0 * i - 1) / N;
  274. double si = sin(M_PI * ui / 2.0);
  275. double Di = beta * beta + 2 * si * beta + 1;
  276. butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
  277. }
  278. }
  279. static void chebyshev1_fo_section(FoSection *S, double a,
  280. double c, double tetta_b,
  281. double g0, double si, double b,
  282. double D, double c0)
  283. {
  284. if (c0 == 1 || c0 == -1) {
  285. S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
  286. S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
  287. S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
  288. S->b3 = 0;
  289. S->b4 = 0;
  290. S->a0 = 1;
  291. S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
  292. S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
  293. S->a3 = 0;
  294. S->a4 = 0;
  295. } else {
  296. S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
  297. S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
  298. S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
  299. S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
  300. S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
  301. S->a0 = 1;
  302. S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
  303. S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
  304. S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
  305. S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
  306. }
  307. }
  308. static void chebyshev1_bp_filter(EqualizatorFilter *f,
  309. int N, double w0, double wb,
  310. double G, double Gb, double G0)
  311. {
  312. double a, b, c0, g0, alfa, beta, tetta_b;
  313. double epsilon;
  314. int r = N % 2;
  315. int L = (N - r) / 2;
  316. int i;
  317. if (G == 0 && G0 == 0) {
  318. f->section[0].a0 = 1;
  319. f->section[0].b0 = 1;
  320. f->section[1].a0 = 1;
  321. f->section[1].b0 = 1;
  322. return;
  323. }
  324. G = ff_exp10(G/20);
  325. Gb = ff_exp10(Gb/20);
  326. G0 = ff_exp10(G0/20);
  327. epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
  328. g0 = pow(G0,1.0/N);
  329. alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
  330. beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
  331. a = 0.5 * (alfa - 1.0/alfa);
  332. b = 0.5 * (beta - g0*g0*(1/beta));
  333. tetta_b = tan(wb/2);
  334. c0 = cos(w0);
  335. for (i = 1; i <= L; i++) {
  336. double ui = (2.0*i-1.0)/N;
  337. double ci = cos(M_PI*ui/2.0);
  338. double si = sin(M_PI*ui/2.0);
  339. double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
  340. chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
  341. }
  342. }
  343. static void chebyshev2_fo_section(FoSection *S, double a,
  344. double c, double tetta_b,
  345. double g, double si, double b,
  346. double D, double c0)
  347. {
  348. if (c0 == 1 || c0 == -1) {
  349. S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
  350. S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
  351. S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
  352. S->b3 = 0;
  353. S->b4 = 0;
  354. S->a0 = 1;
  355. S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
  356. S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
  357. S->a3 = 0;
  358. S->a4 = 0;
  359. } else {
  360. S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
  361. S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
  362. S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
  363. S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
  364. S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
  365. S->a0 = 1;
  366. S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
  367. S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
  368. S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
  369. S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
  370. }
  371. }
  372. static void chebyshev2_bp_filter(EqualizatorFilter *f,
  373. int N, double w0, double wb,
  374. double G, double Gb, double G0)
  375. {
  376. double a, b, c0, tetta_b;
  377. double epsilon, g, eu, ew;
  378. int r = N % 2;
  379. int L = (N - r) / 2;
  380. int i;
  381. if (G == 0 && G0 == 0) {
  382. f->section[0].a0 = 1;
  383. f->section[0].b0 = 1;
  384. f->section[1].a0 = 1;
  385. f->section[1].b0 = 1;
  386. return;
  387. }
  388. G = ff_exp10(G/20);
  389. Gb = ff_exp10(Gb/20);
  390. G0 = ff_exp10(G0/20);
  391. epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
  392. g = pow(G, 1.0 / N);
  393. eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
  394. ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
  395. a = (eu - 1.0/eu)/2.0;
  396. b = (ew - g*g/ew)/2.0;
  397. tetta_b = tan(wb/2);
  398. c0 = cos(w0);
  399. for (i = 1; i <= L; i++) {
  400. double ui = (2.0 * i - 1.0)/N;
  401. double ci = cos(M_PI * ui / 2.0);
  402. double si = sin(M_PI * ui / 2.0);
  403. double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
  404. chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
  405. }
  406. }
  407. static double butterworth_compute_bw_gain_db(double gain)
  408. {
  409. double bw_gain = 0;
  410. if (gain <= -6)
  411. bw_gain = gain + 3;
  412. else if(gain > -6 && gain < 6)
  413. bw_gain = gain * 0.5;
  414. else if(gain >= 6)
  415. bw_gain = gain - 3;
  416. return bw_gain;
  417. }
  418. static double chebyshev1_compute_bw_gain_db(double gain)
  419. {
  420. double bw_gain = 0;
  421. if (gain <= -6)
  422. bw_gain = gain + 1;
  423. else if(gain > -6 && gain < 6)
  424. bw_gain = gain * 0.9;
  425. else if(gain >= 6)
  426. bw_gain = gain - 1;
  427. return bw_gain;
  428. }
  429. static double chebyshev2_compute_bw_gain_db(double gain)
  430. {
  431. double bw_gain = 0;
  432. if (gain <= -6)
  433. bw_gain = -3;
  434. else if(gain > -6 && gain < 6)
  435. bw_gain = gain * 0.3;
  436. else if(gain >= 6)
  437. bw_gain = 3;
  438. return bw_gain;
  439. }
  440. static inline double hz_2_rad(double x, double fs)
  441. {
  442. return 2 * M_PI * x / fs;
  443. }
  444. static void equalizer(EqualizatorFilter *f, double sample_rate)
  445. {
  446. double w0 = hz_2_rad(f->freq, sample_rate);
  447. double wb = hz_2_rad(f->width, sample_rate);
  448. double bw_gain;
  449. switch (f->type) {
  450. case BUTTERWORTH:
  451. bw_gain = butterworth_compute_bw_gain_db(f->gain);
  452. butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
  453. break;
  454. case CHEBYSHEV1:
  455. bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
  456. chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
  457. break;
  458. case CHEBYSHEV2:
  459. bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
  460. chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
  461. break;
  462. }
  463. }
  464. static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
  465. {
  466. equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
  467. if (s->nb_filters >= s->nb_allocated - 1) {
  468. EqualizatorFilter *filters;
  469. filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
  470. if (!filters)
  471. return AVERROR(ENOMEM);
  472. memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
  473. av_free(s->filters);
  474. s->filters = filters;
  475. s->nb_allocated *= 2;
  476. }
  477. s->nb_filters++;
  478. return 0;
  479. }
  480. static int config_input(AVFilterLink *inlink)
  481. {
  482. AVFilterContext *ctx = inlink->dst;
  483. AudioNEqualizerContext *s = ctx->priv;
  484. char *args = av_strdup(s->args);
  485. char *saveptr = NULL;
  486. int ret = 0;
  487. if (!args)
  488. return AVERROR(ENOMEM);
  489. s->nb_allocated = 32 * inlink->channels;
  490. s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
  491. if (!s->filters) {
  492. s->nb_allocated = 0;
  493. av_free(args);
  494. return AVERROR(ENOMEM);
  495. }
  496. while (1) {
  497. char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
  498. if (!arg)
  499. break;
  500. s->filters[s->nb_filters].type = 0;
  501. if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
  502. &s->filters[s->nb_filters].freq,
  503. &s->filters[s->nb_filters].width,
  504. &s->filters[s->nb_filters].gain,
  505. &s->filters[s->nb_filters].type) != 5 &&
  506. sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
  507. &s->filters[s->nb_filters].freq,
  508. &s->filters[s->nb_filters].width,
  509. &s->filters[s->nb_filters].gain) != 4 ) {
  510. av_free(args);
  511. return AVERROR(EINVAL);
  512. }
  513. if (s->filters[s->nb_filters].freq < 0 ||
  514. s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
  515. s->filters[s->nb_filters].ignore = 1;
  516. if (s->filters[s->nb_filters].channel < 0 ||
  517. s->filters[s->nb_filters].channel >= inlink->channels)
  518. s->filters[s->nb_filters].ignore = 1;
  519. s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
  520. ret = add_filter(s, inlink);
  521. if (ret < 0)
  522. break;
  523. }
  524. av_free(args);
  525. return ret;
  526. }
  527. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  528. char *res, int res_len, int flags)
  529. {
  530. AudioNEqualizerContext *s = ctx->priv;
  531. AVFilterLink *inlink = ctx->inputs[0];
  532. int ret = AVERROR(ENOSYS);
  533. if (!strcmp(cmd, "change")) {
  534. double freq, width, gain;
  535. int filter;
  536. if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
  537. return AVERROR(EINVAL);
  538. if (filter < 0 || filter >= s->nb_filters)
  539. return AVERROR(EINVAL);
  540. if (freq < 0 || freq > inlink->sample_rate / 2.0)
  541. return AVERROR(EINVAL);
  542. s->filters[filter].freq = freq;
  543. s->filters[filter].width = width;
  544. s->filters[filter].gain = gain;
  545. equalizer(&s->filters[filter], inlink->sample_rate);
  546. if (s->draw_curves)
  547. draw_curves(ctx, inlink, s->video);
  548. ret = 0;
  549. }
  550. return ret;
  551. }
  552. static inline double section_process(FoSection *S, double in)
  553. {
  554. double out;
  555. out = S->b0 * in;
  556. out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
  557. out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
  558. out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
  559. out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
  560. S->num[3] = S->num[2];
  561. S->num[2] = S->num[1];
  562. S->num[1] = S->num[0];
  563. S->num[0] = in;
  564. S->denum[3] = S->denum[2];
  565. S->denum[2] = S->denum[1];
  566. S->denum[1] = S->denum[0];
  567. S->denum[0] = out;
  568. return out;
  569. }
  570. static double process_sample(FoSection *s1, double in)
  571. {
  572. double p0 = in, p1;
  573. int i;
  574. for (i = 0; i < FILTER_ORDER / 2; i++) {
  575. p1 = section_process(&s1[i], p0);
  576. p0 = p1;
  577. }
  578. return p1;
  579. }
  580. static int filter_channels(AVFilterContext *ctx, void *arg,
  581. int jobnr, int nb_jobs)
  582. {
  583. AudioNEqualizerContext *s = ctx->priv;
  584. AVFrame *buf = arg;
  585. const int start = (buf->channels * jobnr) / nb_jobs;
  586. const int end = (buf->channels * (jobnr+1)) / nb_jobs;
  587. for (int i = 0; i < s->nb_filters; i++) {
  588. EqualizatorFilter *f = &s->filters[i];
  589. double *bptr;
  590. if (f->gain == 0. || f->ignore)
  591. continue;
  592. if (f->channel < start ||
  593. f->channel >= end)
  594. continue;
  595. bptr = (double *)buf->extended_data[f->channel];
  596. for (int n = 0; n < buf->nb_samples; n++) {
  597. double sample = bptr[n];
  598. sample = process_sample(f->section, sample);
  599. bptr[n] = sample;
  600. }
  601. }
  602. return 0;
  603. }
  604. static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
  605. {
  606. AVFilterContext *ctx = inlink->dst;
  607. AudioNEqualizerContext *s = ctx->priv;
  608. AVFilterLink *outlink = ctx->outputs[0];
  609. if (!ctx->is_disabled)
  610. ctx->internal->execute(ctx, filter_channels, buf, NULL, FFMIN(inlink->channels,
  611. ff_filter_get_nb_threads(ctx)));
  612. if (s->draw_curves) {
  613. AVFrame *clone;
  614. const int64_t pts = buf->pts +
  615. av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
  616. outlink->time_base);
  617. int ret;
  618. s->video->pts = pts;
  619. clone = av_frame_clone(s->video);
  620. if (!clone)
  621. return AVERROR(ENOMEM);
  622. ret = ff_filter_frame(ctx->outputs[1], clone);
  623. if (ret < 0)
  624. return ret;
  625. }
  626. return ff_filter_frame(outlink, buf);
  627. }
  628. static const AVFilterPad inputs[] = {
  629. {
  630. .name = "default",
  631. .type = AVMEDIA_TYPE_AUDIO,
  632. .config_props = config_input,
  633. .filter_frame = filter_frame,
  634. .needs_writable = 1,
  635. },
  636. { NULL }
  637. };
  638. AVFilter ff_af_anequalizer = {
  639. .name = "anequalizer",
  640. .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
  641. .priv_size = sizeof(AudioNEqualizerContext),
  642. .priv_class = &anequalizer_class,
  643. .init = init,
  644. .uninit = uninit,
  645. .query_formats = query_formats,
  646. .inputs = inputs,
  647. .outputs = NULL,
  648. .process_command = process_command,
  649. .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS |
  650. AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  651. AVFILTER_FLAG_SLICE_THREADS,
  652. };