You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1513 lines
53KB

  1. /*
  2. * Copyright (c) 2018 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h>
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/avstring.h"
  23. #include "libavutil/intreadwrite.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/xga_font_data.h"
  26. #include "audio.h"
  27. #include "avfilter.h"
  28. #include "internal.h"
  29. typedef struct ThreadData {
  30. AVFrame *in, *out;
  31. } ThreadData;
  32. typedef struct Pair {
  33. int a, b;
  34. } Pair;
  35. typedef struct BiquadContext {
  36. double a[3];
  37. double b[3];
  38. double w1, w2;
  39. } BiquadContext;
  40. typedef struct IIRChannel {
  41. int nb_ab[2];
  42. double *ab[2];
  43. double g;
  44. double *cache[2];
  45. double fir;
  46. BiquadContext *biquads;
  47. int clippings;
  48. } IIRChannel;
  49. typedef struct AudioIIRContext {
  50. const AVClass *class;
  51. char *a_str, *b_str, *g_str;
  52. double dry_gain, wet_gain;
  53. double mix;
  54. int normalize;
  55. int format;
  56. int process;
  57. int precision;
  58. int response;
  59. int w, h;
  60. int ir_channel;
  61. AVRational rate;
  62. AVFrame *video;
  63. IIRChannel *iir;
  64. int channels;
  65. enum AVSampleFormat sample_format;
  66. int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
  67. } AudioIIRContext;
  68. static int query_formats(AVFilterContext *ctx)
  69. {
  70. AudioIIRContext *s = ctx->priv;
  71. AVFilterFormats *formats;
  72. AVFilterChannelLayouts *layouts;
  73. enum AVSampleFormat sample_fmts[] = {
  74. AV_SAMPLE_FMT_DBLP,
  75. AV_SAMPLE_FMT_NONE
  76. };
  77. static const enum AVPixelFormat pix_fmts[] = {
  78. AV_PIX_FMT_RGB0,
  79. AV_PIX_FMT_NONE
  80. };
  81. int ret;
  82. if (s->response) {
  83. AVFilterLink *videolink = ctx->outputs[1];
  84. formats = ff_make_format_list(pix_fmts);
  85. if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
  86. return ret;
  87. }
  88. layouts = ff_all_channel_counts();
  89. if (!layouts)
  90. return AVERROR(ENOMEM);
  91. ret = ff_set_common_channel_layouts(ctx, layouts);
  92. if (ret < 0)
  93. return ret;
  94. sample_fmts[0] = s->sample_format;
  95. formats = ff_make_format_list(sample_fmts);
  96. if (!formats)
  97. return AVERROR(ENOMEM);
  98. ret = ff_set_common_formats(ctx, formats);
  99. if (ret < 0)
  100. return ret;
  101. formats = ff_all_samplerates();
  102. if (!formats)
  103. return AVERROR(ENOMEM);
  104. return ff_set_common_samplerates(ctx, formats);
  105. }
  106. #define IIR_CH(name, type, min, max, need_clipping) \
  107. static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
  108. { \
  109. AudioIIRContext *s = ctx->priv; \
  110. const double ig = s->dry_gain; \
  111. const double og = s->wet_gain; \
  112. const double mix = s->mix; \
  113. ThreadData *td = arg; \
  114. AVFrame *in = td->in, *out = td->out; \
  115. const type *src = (const type *)in->extended_data[ch]; \
  116. double *oc = (double *)s->iir[ch].cache[0]; \
  117. double *ic = (double *)s->iir[ch].cache[1]; \
  118. const int nb_a = s->iir[ch].nb_ab[0]; \
  119. const int nb_b = s->iir[ch].nb_ab[1]; \
  120. const double *a = s->iir[ch].ab[0]; \
  121. const double *b = s->iir[ch].ab[1]; \
  122. const double g = s->iir[ch].g; \
  123. int *clippings = &s->iir[ch].clippings; \
  124. type *dst = (type *)out->extended_data[ch]; \
  125. int n; \
  126. \
  127. for (n = 0; n < in->nb_samples; n++) { \
  128. double sample = 0.; \
  129. int x; \
  130. \
  131. memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
  132. memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
  133. ic[0] = src[n] * ig; \
  134. for (x = 0; x < nb_b; x++) \
  135. sample += b[x] * ic[x]; \
  136. \
  137. for (x = 1; x < nb_a; x++) \
  138. sample -= a[x] * oc[x]; \
  139. \
  140. oc[0] = sample; \
  141. sample *= og * g; \
  142. sample = sample * mix + ic[0] * (1. - mix); \
  143. if (need_clipping && sample < min) { \
  144. (*clippings)++; \
  145. dst[n] = min; \
  146. } else if (need_clipping && sample > max) { \
  147. (*clippings)++; \
  148. dst[n] = max; \
  149. } else { \
  150. dst[n] = sample; \
  151. } \
  152. } \
  153. \
  154. return 0; \
  155. }
  156. IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  157. IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  158. IIR_CH(fltp, float, -1., 1., 0)
  159. IIR_CH(dblp, double, -1., 1., 0)
  160. #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
  161. static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
  162. int ch, int nb_jobs) \
  163. { \
  164. AudioIIRContext *s = ctx->priv; \
  165. const double ig = s->dry_gain; \
  166. const double og = s->wet_gain; \
  167. const double mix = s->mix; \
  168. const double imix = 1. - mix; \
  169. ThreadData *td = arg; \
  170. AVFrame *in = td->in, *out = td->out; \
  171. const type *src = (const type *)in->extended_data[ch]; \
  172. type *dst = (type *)out->extended_data[ch]; \
  173. IIRChannel *iir = &s->iir[ch]; \
  174. const double g = iir->g; \
  175. int *clippings = &iir->clippings; \
  176. int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
  177. int n, i; \
  178. \
  179. for (i = nb_biquads - 1; i >= 0; i--) { \
  180. const double a1 = -iir->biquads[i].a[1]; \
  181. const double a2 = -iir->biquads[i].a[2]; \
  182. const double b0 = iir->biquads[i].b[0]; \
  183. const double b1 = iir->biquads[i].b[1]; \
  184. const double b2 = iir->biquads[i].b[2]; \
  185. double w1 = iir->biquads[i].w1; \
  186. double w2 = iir->biquads[i].w2; \
  187. \
  188. for (n = 0; n < in->nb_samples; n++) { \
  189. double i0 = ig * (i ? dst[n] : src[n]); \
  190. double o0 = i0 * b0 + w1; \
  191. \
  192. w1 = b1 * i0 + w2 + a1 * o0; \
  193. w2 = b2 * i0 + a2 * o0; \
  194. o0 *= og * g; \
  195. \
  196. o0 = o0 * mix + imix * i0; \
  197. if (need_clipping && o0 < min) { \
  198. (*clippings)++; \
  199. dst[n] = min; \
  200. } else if (need_clipping && o0 > max) { \
  201. (*clippings)++; \
  202. dst[n] = max; \
  203. } else { \
  204. dst[n] = o0; \
  205. } \
  206. } \
  207. iir->biquads[i].w1 = w1; \
  208. iir->biquads[i].w2 = w2; \
  209. } \
  210. \
  211. return 0; \
  212. }
  213. SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  214. SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  215. SERIAL_IIR_CH(fltp, float, -1., 1., 0)
  216. SERIAL_IIR_CH(dblp, double, -1., 1., 0)
  217. #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
  218. static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
  219. int ch, int nb_jobs) \
  220. { \
  221. AudioIIRContext *s = ctx->priv; \
  222. const double ig = s->dry_gain; \
  223. const double og = s->wet_gain; \
  224. const double mix = s->mix; \
  225. const double imix = 1. - mix; \
  226. ThreadData *td = arg; \
  227. AVFrame *in = td->in, *out = td->out; \
  228. const type *src = (const type *)in->extended_data[ch]; \
  229. type *dst = (type *)out->extended_data[ch]; \
  230. IIRChannel *iir = &s->iir[ch]; \
  231. const double g = iir->g; \
  232. const double fir = iir->fir; \
  233. int *clippings = &iir->clippings; \
  234. int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
  235. int n, i; \
  236. \
  237. for (i = 0; i < nb_biquads; i++) { \
  238. const double a1 = -iir->biquads[i].a[1]; \
  239. const double a2 = -iir->biquads[i].a[2]; \
  240. const double b1 = iir->biquads[i].b[1]; \
  241. const double b2 = iir->biquads[i].b[2]; \
  242. double w1 = iir->biquads[i].w1; \
  243. double w2 = iir->biquads[i].w2; \
  244. \
  245. for (n = 0; n < in->nb_samples; n++) { \
  246. double i0 = ig * src[n]; \
  247. double o0 = w1; \
  248. \
  249. w1 = b1 * i0 + w2 + a1 * o0; \
  250. w2 = b2 * i0 + a2 * o0; \
  251. o0 *= og * g; \
  252. o0 += dst[n]; \
  253. \
  254. if (need_clipping && o0 < min) { \
  255. (*clippings)++; \
  256. dst[n] = min; \
  257. } else if (need_clipping && o0 > max) { \
  258. (*clippings)++; \
  259. dst[n] = max; \
  260. } else { \
  261. dst[n] = o0; \
  262. } \
  263. } \
  264. iir->biquads[i].w1 = w1; \
  265. iir->biquads[i].w2 = w2; \
  266. } \
  267. \
  268. for (n = 0; n < in->nb_samples; n++) { \
  269. dst[n] += fir * src[n]; \
  270. dst[n] = dst[n] * mix + imix * src[n]; \
  271. } \
  272. \
  273. return 0; \
  274. }
  275. PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  276. PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  277. PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
  278. PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
  279. static void count_coefficients(char *item_str, int *nb_items)
  280. {
  281. char *p;
  282. if (!item_str)
  283. return;
  284. *nb_items = 1;
  285. for (p = item_str; *p && *p != '|'; p++) {
  286. if (*p == ' ')
  287. (*nb_items)++;
  288. }
  289. }
  290. static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
  291. {
  292. AudioIIRContext *s = ctx->priv;
  293. char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
  294. int i;
  295. p = old_str = av_strdup(item_str);
  296. if (!p)
  297. return AVERROR(ENOMEM);
  298. for (i = 0; i < nb_items; i++) {
  299. if (!(arg = av_strtok(p, "|", &saveptr)))
  300. arg = prev_arg;
  301. if (!arg) {
  302. av_freep(&old_str);
  303. return AVERROR(EINVAL);
  304. }
  305. p = NULL;
  306. if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
  307. av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
  308. av_freep(&old_str);
  309. return AVERROR(EINVAL);
  310. }
  311. prev_arg = arg;
  312. }
  313. av_freep(&old_str);
  314. return 0;
  315. }
  316. static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
  317. {
  318. char *p, *arg, *old_str, *saveptr = NULL;
  319. int i;
  320. p = old_str = av_strdup(item_str);
  321. if (!p)
  322. return AVERROR(ENOMEM);
  323. for (i = 0; i < nb_items; i++) {
  324. if (!(arg = av_strtok(p, " ", &saveptr)))
  325. break;
  326. p = NULL;
  327. if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
  328. av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
  329. av_freep(&old_str);
  330. return AVERROR(EINVAL);
  331. }
  332. }
  333. av_freep(&old_str);
  334. return 0;
  335. }
  336. static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
  337. {
  338. char *p, *arg, *old_str, *saveptr = NULL;
  339. int i;
  340. p = old_str = av_strdup(item_str);
  341. if (!p)
  342. return AVERROR(ENOMEM);
  343. for (i = 0; i < nb_items; i++) {
  344. if (!(arg = av_strtok(p, " ", &saveptr)))
  345. break;
  346. p = NULL;
  347. if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
  348. av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
  349. av_freep(&old_str);
  350. return AVERROR(EINVAL);
  351. }
  352. }
  353. av_freep(&old_str);
  354. return 0;
  355. }
  356. static const char *format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
  357. static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
  358. {
  359. AudioIIRContext *s = ctx->priv;
  360. char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
  361. int i, ret;
  362. p = old_str = av_strdup(item_str);
  363. if (!p)
  364. return AVERROR(ENOMEM);
  365. for (i = 0; i < channels; i++) {
  366. IIRChannel *iir = &s->iir[i];
  367. if (!(arg = av_strtok(p, "|", &saveptr)))
  368. arg = prev_arg;
  369. if (!arg) {
  370. av_freep(&old_str);
  371. return AVERROR(EINVAL);
  372. }
  373. count_coefficients(arg, &iir->nb_ab[ab]);
  374. p = NULL;
  375. iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
  376. iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
  377. if (!iir->ab[ab] || !iir->cache[ab]) {
  378. av_freep(&old_str);
  379. return AVERROR(ENOMEM);
  380. }
  381. if (s->format > 0) {
  382. ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
  383. } else {
  384. ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
  385. }
  386. if (ret < 0) {
  387. av_freep(&old_str);
  388. return ret;
  389. }
  390. prev_arg = arg;
  391. }
  392. av_freep(&old_str);
  393. return 0;
  394. }
  395. static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
  396. {
  397. *RE = re * re2 - im * im2;
  398. *IM = re * im2 + re2 * im;
  399. }
  400. static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
  401. {
  402. coefs[2 * n] = 1.0;
  403. for (int i = 1; i <= n; i++) {
  404. for (int j = n - i; j < n; j++) {
  405. double re, im;
  406. cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
  407. pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
  408. coefs[2 * j] -= re;
  409. coefs[2 * j + 1] -= im;
  410. }
  411. }
  412. for (int i = 0; i < n + 1; i++) {
  413. if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
  414. av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
  415. coefs[2 * i + 1], i);
  416. return AVERROR(EINVAL);
  417. }
  418. }
  419. return 0;
  420. }
  421. static void normalize_coeffs(AVFilterContext *ctx, int ch)
  422. {
  423. AudioIIRContext *s = ctx->priv;
  424. IIRChannel *iir = &s->iir[ch];
  425. double sum_den = 0.;
  426. if (!s->normalize)
  427. return;
  428. for (int i = 0; i < iir->nb_ab[1]; i++) {
  429. sum_den += iir->ab[1][i];
  430. }
  431. if (sum_den > 1e-6) {
  432. double factor, sum_num = 0.;
  433. for (int i = 0; i < iir->nb_ab[0]; i++) {
  434. sum_num += iir->ab[0][i];
  435. }
  436. factor = sum_num / sum_den;
  437. for (int i = 0; i < iir->nb_ab[1]; i++) {
  438. iir->ab[1][i] *= factor;
  439. }
  440. }
  441. }
  442. static int convert_zp2tf(AVFilterContext *ctx, int channels)
  443. {
  444. AudioIIRContext *s = ctx->priv;
  445. int ch, i, j, ret = 0;
  446. for (ch = 0; ch < channels; ch++) {
  447. IIRChannel *iir = &s->iir[ch];
  448. double *topc, *botc;
  449. topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
  450. botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
  451. if (!topc || !botc) {
  452. ret = AVERROR(ENOMEM);
  453. goto fail;
  454. }
  455. ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
  456. if (ret < 0) {
  457. goto fail;
  458. }
  459. ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
  460. if (ret < 0) {
  461. goto fail;
  462. }
  463. for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
  464. iir->ab[1][j] = topc[2 * i];
  465. }
  466. iir->nb_ab[1]++;
  467. for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
  468. iir->ab[0][j] = botc[2 * i];
  469. }
  470. iir->nb_ab[0]++;
  471. normalize_coeffs(ctx, ch);
  472. fail:
  473. av_free(topc);
  474. av_free(botc);
  475. if (ret < 0)
  476. break;
  477. }
  478. return ret;
  479. }
  480. static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
  481. {
  482. AudioIIRContext *s = ctx->priv;
  483. int ch, ret;
  484. for (ch = 0; ch < channels; ch++) {
  485. IIRChannel *iir = &s->iir[ch];
  486. int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
  487. int current_biquad = 0;
  488. iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
  489. if (!iir->biquads)
  490. return AVERROR(ENOMEM);
  491. while (nb_biquads--) {
  492. Pair outmost_pole = { -1, -1 };
  493. Pair nearest_zero = { -1, -1 };
  494. double zeros[4] = { 0 };
  495. double poles[4] = { 0 };
  496. double b[6] = { 0 };
  497. double a[6] = { 0 };
  498. double min_distance = DBL_MAX;
  499. double max_mag = 0;
  500. double factor;
  501. int i;
  502. for (i = 0; i < iir->nb_ab[0]; i++) {
  503. double mag;
  504. if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
  505. continue;
  506. mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
  507. if (mag > max_mag) {
  508. max_mag = mag;
  509. outmost_pole.a = i;
  510. }
  511. }
  512. for (i = 0; i < iir->nb_ab[0]; i++) {
  513. if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
  514. continue;
  515. if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
  516. iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
  517. outmost_pole.b = i;
  518. break;
  519. }
  520. }
  521. av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
  522. if (outmost_pole.a < 0 || outmost_pole.b < 0)
  523. return AVERROR(EINVAL);
  524. for (i = 0; i < iir->nb_ab[1]; i++) {
  525. double distance;
  526. if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
  527. continue;
  528. distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
  529. iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
  530. if (distance < min_distance) {
  531. min_distance = distance;
  532. nearest_zero.a = i;
  533. }
  534. }
  535. for (i = 0; i < iir->nb_ab[1]; i++) {
  536. if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
  537. continue;
  538. if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
  539. iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
  540. nearest_zero.b = i;
  541. break;
  542. }
  543. }
  544. av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
  545. if (nearest_zero.a < 0 || nearest_zero.b < 0)
  546. return AVERROR(EINVAL);
  547. poles[0] = iir->ab[0][2 * outmost_pole.a ];
  548. poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
  549. zeros[0] = iir->ab[1][2 * nearest_zero.a ];
  550. zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
  551. if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
  552. zeros[2] = 0;
  553. zeros[3] = 0;
  554. poles[2] = 0;
  555. poles[3] = 0;
  556. } else {
  557. poles[2] = iir->ab[0][2 * outmost_pole.b ];
  558. poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
  559. zeros[2] = iir->ab[1][2 * nearest_zero.b ];
  560. zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
  561. }
  562. ret = expand(ctx, zeros, 2, b);
  563. if (ret < 0)
  564. return ret;
  565. ret = expand(ctx, poles, 2, a);
  566. if (ret < 0)
  567. return ret;
  568. iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
  569. iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
  570. iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
  571. iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
  572. iir->biquads[current_biquad].a[0] = 1.;
  573. iir->biquads[current_biquad].a[1] = a[2] / a[4];
  574. iir->biquads[current_biquad].a[2] = a[0] / a[4];
  575. iir->biquads[current_biquad].b[0] = b[4] / a[4];
  576. iir->biquads[current_biquad].b[1] = b[2] / a[4];
  577. iir->biquads[current_biquad].b[2] = b[0] / a[4];
  578. if (s->normalize &&
  579. fabs(iir->biquads[current_biquad].b[0] +
  580. iir->biquads[current_biquad].b[1] +
  581. iir->biquads[current_biquad].b[2]) > 1e-6) {
  582. factor = (iir->biquads[current_biquad].a[0] +
  583. iir->biquads[current_biquad].a[1] +
  584. iir->biquads[current_biquad].a[2]) /
  585. (iir->biquads[current_biquad].b[0] +
  586. iir->biquads[current_biquad].b[1] +
  587. iir->biquads[current_biquad].b[2]);
  588. av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
  589. iir->biquads[current_biquad].b[0] *= factor;
  590. iir->biquads[current_biquad].b[1] *= factor;
  591. iir->biquads[current_biquad].b[2] *= factor;
  592. }
  593. iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
  594. iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
  595. iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
  596. av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
  597. iir->biquads[current_biquad].a[0],
  598. iir->biquads[current_biquad].a[1],
  599. iir->biquads[current_biquad].a[2],
  600. iir->biquads[current_biquad].b[0],
  601. iir->biquads[current_biquad].b[1],
  602. iir->biquads[current_biquad].b[2]);
  603. current_biquad++;
  604. }
  605. }
  606. return 0;
  607. }
  608. static void biquad_process(double *x, double *y, int length,
  609. double b0, double b1, double b2,
  610. double a1, double a2)
  611. {
  612. double w1 = 0., w2 = 0.;
  613. a1 = -a1;
  614. a2 = -a2;
  615. for (int n = 0; n < length; n++) {
  616. double out, in = x[n];
  617. y[n] = out = in * b0 + w1;
  618. w1 = b1 * in + w2 + a1 * out;
  619. w2 = b2 * in + a2 * out;
  620. }
  621. }
  622. static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
  623. {
  624. double sum = 0.;
  625. for (int i = 0; i < n; i++) {
  626. for (int j = i; j < n; j++) {
  627. sum = 0.;
  628. for (int k = 0; k < i; k++)
  629. sum += lu[i * n + k] * lu[k * n + j];
  630. lu[i * n + j] = matrix[j * n + i] - sum;
  631. }
  632. for (int j = i + 1; j < n; j++) {
  633. sum = 0.;
  634. for (int k = 0; k < i; k++)
  635. sum += lu[j * n + k] * lu[k * n + i];
  636. lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
  637. }
  638. }
  639. for (int i = 0; i < n; i++) {
  640. sum = 0.;
  641. for (int k = 0; k < i; k++)
  642. sum += lu[i * n + k] * y[k];
  643. y[i] = vector[i] - sum;
  644. }
  645. for (int i = n - 1; i >= 0; i--) {
  646. sum = 0.;
  647. for (int k = i + 1; k < n; k++)
  648. sum += lu[i * n + k] * x[k];
  649. x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
  650. }
  651. }
  652. static int convert_serial2parallel(AVFilterContext *ctx, int channels)
  653. {
  654. AudioIIRContext *s = ctx->priv;
  655. int ret = 0;
  656. for (int ch = 0; ch < channels; ch++) {
  657. IIRChannel *iir = &s->iir[ch];
  658. int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
  659. int length = nb_biquads * 2 + 1;
  660. double *impulse = av_calloc(length, sizeof(*impulse));
  661. double *y = av_calloc(length, sizeof(*y));
  662. double *resp = av_calloc(length, sizeof(*resp));
  663. double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
  664. double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
  665. if (!impulse || !y || !resp || !M) {
  666. av_free(impulse);
  667. av_free(y);
  668. av_free(resp);
  669. av_free(M);
  670. av_free(W);
  671. return AVERROR(ENOMEM);
  672. }
  673. impulse[0] = 1.;
  674. for (int n = 0; n < nb_biquads; n++) {
  675. BiquadContext *biquad = &iir->biquads[n];
  676. biquad_process(n ? y : impulse, y, length,
  677. biquad->b[0], biquad->b[1], biquad->b[2],
  678. biquad->a[1], biquad->a[2]);
  679. }
  680. for (int n = 0; n < nb_biquads; n++) {
  681. BiquadContext *biquad = &iir->biquads[n];
  682. biquad_process(impulse, resp, length - 1,
  683. 1., 0., 0., biquad->a[1], biquad->a[2]);
  684. memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
  685. memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
  686. memset(resp, 0, length * sizeof(*resp));
  687. }
  688. solve(M, &y[1], length - 1, &impulse[1], resp, W);
  689. iir->fir = y[0];
  690. for (int n = 0; n < nb_biquads; n++) {
  691. BiquadContext *biquad = &iir->biquads[n];
  692. biquad->b[0] = 0.;
  693. biquad->b[1] = resp[n * 2 + 0];
  694. biquad->b[2] = resp[n * 2 + 1];
  695. }
  696. av_free(impulse);
  697. av_free(y);
  698. av_free(resp);
  699. av_free(M);
  700. av_free(W);
  701. if (ret < 0)
  702. return ret;
  703. }
  704. return 0;
  705. }
  706. static void convert_pr2zp(AVFilterContext *ctx, int channels)
  707. {
  708. AudioIIRContext *s = ctx->priv;
  709. int ch;
  710. for (ch = 0; ch < channels; ch++) {
  711. IIRChannel *iir = &s->iir[ch];
  712. int n;
  713. for (n = 0; n < iir->nb_ab[0]; n++) {
  714. double r = iir->ab[0][2*n];
  715. double angle = iir->ab[0][2*n+1];
  716. iir->ab[0][2*n] = r * cos(angle);
  717. iir->ab[0][2*n+1] = r * sin(angle);
  718. }
  719. for (n = 0; n < iir->nb_ab[1]; n++) {
  720. double r = iir->ab[1][2*n];
  721. double angle = iir->ab[1][2*n+1];
  722. iir->ab[1][2*n] = r * cos(angle);
  723. iir->ab[1][2*n+1] = r * sin(angle);
  724. }
  725. }
  726. }
  727. static void convert_sp2zp(AVFilterContext *ctx, int channels)
  728. {
  729. AudioIIRContext *s = ctx->priv;
  730. int ch;
  731. for (ch = 0; ch < channels; ch++) {
  732. IIRChannel *iir = &s->iir[ch];
  733. int n;
  734. for (n = 0; n < iir->nb_ab[0]; n++) {
  735. double sr = iir->ab[0][2*n];
  736. double si = iir->ab[0][2*n+1];
  737. iir->ab[0][2*n] = exp(sr) * cos(si);
  738. iir->ab[0][2*n+1] = exp(sr) * sin(si);
  739. }
  740. for (n = 0; n < iir->nb_ab[1]; n++) {
  741. double sr = iir->ab[1][2*n];
  742. double si = iir->ab[1][2*n+1];
  743. iir->ab[1][2*n] = exp(sr) * cos(si);
  744. iir->ab[1][2*n+1] = exp(sr) * sin(si);
  745. }
  746. }
  747. }
  748. static double fact(double i)
  749. {
  750. if (i <= 0.)
  751. return 1.;
  752. return i * fact(i - 1.);
  753. }
  754. static double coef_sf2zf(double *a, int N, int n)
  755. {
  756. double z = 0.;
  757. for (int i = 0; i <= N; i++) {
  758. double acc = 0.;
  759. for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
  760. acc += ((fact(i) * fact(N - i)) /
  761. (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
  762. ((k & 1) ? -1. : 1.);;
  763. }
  764. z += a[i] * pow(2., i) * acc;
  765. }
  766. return z;
  767. }
  768. static void convert_sf2tf(AVFilterContext *ctx, int channels)
  769. {
  770. AudioIIRContext *s = ctx->priv;
  771. int ch;
  772. for (ch = 0; ch < channels; ch++) {
  773. IIRChannel *iir = &s->iir[ch];
  774. double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
  775. double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
  776. if (!temp0 || !temp1)
  777. goto next;
  778. memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
  779. memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
  780. for (int n = 0; n < iir->nb_ab[0]; n++)
  781. iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
  782. for (int n = 0; n < iir->nb_ab[1]; n++)
  783. iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
  784. next:
  785. av_free(temp0);
  786. av_free(temp1);
  787. }
  788. }
  789. static void convert_pd2zp(AVFilterContext *ctx, int channels)
  790. {
  791. AudioIIRContext *s = ctx->priv;
  792. int ch;
  793. for (ch = 0; ch < channels; ch++) {
  794. IIRChannel *iir = &s->iir[ch];
  795. int n;
  796. for (n = 0; n < iir->nb_ab[0]; n++) {
  797. double r = iir->ab[0][2*n];
  798. double angle = M_PI*iir->ab[0][2*n+1]/180.;
  799. iir->ab[0][2*n] = r * cos(angle);
  800. iir->ab[0][2*n+1] = r * sin(angle);
  801. }
  802. for (n = 0; n < iir->nb_ab[1]; n++) {
  803. double r = iir->ab[1][2*n];
  804. double angle = M_PI*iir->ab[1][2*n+1]/180.;
  805. iir->ab[1][2*n] = r * cos(angle);
  806. iir->ab[1][2*n+1] = r * sin(angle);
  807. }
  808. }
  809. }
  810. static void check_stability(AVFilterContext *ctx, int channels)
  811. {
  812. AudioIIRContext *s = ctx->priv;
  813. int ch;
  814. for (ch = 0; ch < channels; ch++) {
  815. IIRChannel *iir = &s->iir[ch];
  816. for (int n = 0; n < iir->nb_ab[0]; n++) {
  817. double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
  818. if (pr >= 1.) {
  819. av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
  820. break;
  821. }
  822. }
  823. }
  824. }
  825. static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
  826. {
  827. const uint8_t *font;
  828. int font_height;
  829. int i;
  830. font = avpriv_cga_font, font_height = 8;
  831. for (i = 0; txt[i]; i++) {
  832. int char_y, mask;
  833. uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
  834. for (char_y = 0; char_y < font_height; char_y++) {
  835. for (mask = 0x80; mask; mask >>= 1) {
  836. if (font[txt[i] * font_height + char_y] & mask)
  837. AV_WL32(p, color);
  838. p += 4;
  839. }
  840. p += pic->linesize[0] - 8 * 4;
  841. }
  842. }
  843. }
  844. static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
  845. {
  846. int dx = FFABS(x1-x0);
  847. int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
  848. int err = (dx>dy ? dx : -dy) / 2, e2;
  849. for (;;) {
  850. AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
  851. if (x0 == x1 && y0 == y1)
  852. break;
  853. e2 = err;
  854. if (e2 >-dx) {
  855. err -= dy;
  856. x0--;
  857. }
  858. if (e2 < dy) {
  859. err += dx;
  860. y0 += sy;
  861. }
  862. }
  863. }
  864. static double distance(double x0, double x1, double y0, double y1)
  865. {
  866. return hypot(x0 - x1, y0 - y1);
  867. }
  868. static void get_response(int channel, int format, double w,
  869. const double *b, const double *a,
  870. int nb_b, int nb_a, double *magnitude, double *phase)
  871. {
  872. double realz, realp;
  873. double imagz, imagp;
  874. double real, imag;
  875. double div;
  876. if (format == 0) {
  877. realz = 0., realp = 0.;
  878. imagz = 0., imagp = 0.;
  879. for (int x = 0; x < nb_a; x++) {
  880. realz += cos(-x * w) * a[x];
  881. imagz += sin(-x * w) * a[x];
  882. }
  883. for (int x = 0; x < nb_b; x++) {
  884. realp += cos(-x * w) * b[x];
  885. imagp += sin(-x * w) * b[x];
  886. }
  887. div = realp * realp + imagp * imagp;
  888. real = (realz * realp + imagz * imagp) / div;
  889. imag = (imagz * realp - imagp * realz) / div;
  890. *magnitude = hypot(real, imag);
  891. *phase = atan2(imag, real);
  892. } else {
  893. double p = 1., z = 1.;
  894. double acc = 0.;
  895. for (int x = 0; x < nb_a; x++) {
  896. z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
  897. acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
  898. }
  899. for (int x = 0; x < nb_b; x++) {
  900. p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
  901. acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
  902. }
  903. *magnitude = z / p;
  904. *phase = acc;
  905. }
  906. }
  907. static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
  908. {
  909. AudioIIRContext *s = ctx->priv;
  910. double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
  911. double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
  912. int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
  913. char text[32];
  914. int ch, i;
  915. memset(out->data[0], 0, s->h * out->linesize[0]);
  916. phase = av_malloc_array(s->w, sizeof(*phase));
  917. temp = av_malloc_array(s->w, sizeof(*temp));
  918. mag = av_malloc_array(s->w, sizeof(*mag));
  919. delay = av_malloc_array(s->w, sizeof(*delay));
  920. if (!mag || !phase || !delay || !temp)
  921. goto end;
  922. ch = av_clip(s->ir_channel, 0, s->channels - 1);
  923. for (i = 0; i < s->w; i++) {
  924. const double *b = s->iir[ch].ab[0];
  925. const double *a = s->iir[ch].ab[1];
  926. const int nb_b = s->iir[ch].nb_ab[0];
  927. const int nb_a = s->iir[ch].nb_ab[1];
  928. double w = i * M_PI / (s->w - 1);
  929. double m, p;
  930. get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
  931. mag[i] = s->iir[ch].g * m;
  932. phase[i] = p;
  933. min = fmin(min, mag[i]);
  934. max = fmax(max, mag[i]);
  935. }
  936. temp[0] = 0.;
  937. for (i = 0; i < s->w - 1; i++) {
  938. double d = phase[i] - phase[i + 1];
  939. temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
  940. }
  941. min_phase = phase[0];
  942. max_phase = phase[0];
  943. for (i = 1; i < s->w; i++) {
  944. temp[i] += temp[i - 1];
  945. phase[i] += temp[i];
  946. min_phase = fmin(min_phase, phase[i]);
  947. max_phase = fmax(max_phase, phase[i]);
  948. }
  949. for (i = 0; i < s->w - 1; i++) {
  950. double div = s->w / (double)sample_rate;
  951. delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
  952. min_delay = fmin(min_delay, delay[i + 1]);
  953. max_delay = fmax(max_delay, delay[i + 1]);
  954. }
  955. delay[0] = delay[1];
  956. for (i = 0; i < s->w; i++) {
  957. int ymag = mag[i] / max * (s->h - 1);
  958. int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
  959. int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
  960. ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
  961. yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
  962. ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
  963. if (prev_ymag < 0)
  964. prev_ymag = ymag;
  965. if (prev_yphase < 0)
  966. prev_yphase = yphase;
  967. if (prev_ydelay < 0)
  968. prev_ydelay = ydelay;
  969. draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
  970. draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
  971. draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
  972. prev_ymag = ymag;
  973. prev_yphase = yphase;
  974. prev_ydelay = ydelay;
  975. }
  976. if (s->w > 400 && s->h > 100) {
  977. drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
  978. snprintf(text, sizeof(text), "%.2f", max);
  979. drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
  980. drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
  981. snprintf(text, sizeof(text), "%.2f", min);
  982. drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
  983. drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
  984. snprintf(text, sizeof(text), "%.2f", max_phase);
  985. drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
  986. drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
  987. snprintf(text, sizeof(text), "%.2f", min_phase);
  988. drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
  989. drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
  990. snprintf(text, sizeof(text), "%.2f", max_delay);
  991. drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
  992. drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
  993. snprintf(text, sizeof(text), "%.2f", min_delay);
  994. drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
  995. }
  996. end:
  997. av_free(delay);
  998. av_free(temp);
  999. av_free(phase);
  1000. av_free(mag);
  1001. }
  1002. static int config_output(AVFilterLink *outlink)
  1003. {
  1004. AVFilterContext *ctx = outlink->src;
  1005. AudioIIRContext *s = ctx->priv;
  1006. AVFilterLink *inlink = ctx->inputs[0];
  1007. int ch, ret, i;
  1008. s->channels = inlink->channels;
  1009. s->iir = av_calloc(s->channels, sizeof(*s->iir));
  1010. if (!s->iir)
  1011. return AVERROR(ENOMEM);
  1012. ret = read_gains(ctx, s->g_str, inlink->channels);
  1013. if (ret < 0)
  1014. return ret;
  1015. ret = read_channels(ctx, inlink->channels, s->a_str, 0);
  1016. if (ret < 0)
  1017. return ret;
  1018. ret = read_channels(ctx, inlink->channels, s->b_str, 1);
  1019. if (ret < 0)
  1020. return ret;
  1021. if (s->format == -1) {
  1022. convert_sf2tf(ctx, inlink->channels);
  1023. s->format = 0;
  1024. } else if (s->format == 2) {
  1025. convert_pr2zp(ctx, inlink->channels);
  1026. } else if (s->format == 3) {
  1027. convert_pd2zp(ctx, inlink->channels);
  1028. } else if (s->format == 4) {
  1029. convert_sp2zp(ctx, inlink->channels);
  1030. }
  1031. if (s->format > 0) {
  1032. check_stability(ctx, inlink->channels);
  1033. }
  1034. av_frame_free(&s->video);
  1035. if (s->response) {
  1036. s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
  1037. if (!s->video)
  1038. return AVERROR(ENOMEM);
  1039. draw_response(ctx, s->video, inlink->sample_rate);
  1040. }
  1041. if (s->format == 0)
  1042. av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
  1043. if (s->format > 0 && s->process == 0) {
  1044. av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
  1045. ret = convert_zp2tf(ctx, inlink->channels);
  1046. if (ret < 0)
  1047. return ret;
  1048. } else if (s->format <= 0 && s->process == 1) {
  1049. av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
  1050. return AVERROR_PATCHWELCOME;
  1051. } else if (s->format <= 0 && s->process == 2) {
  1052. av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
  1053. return AVERROR_PATCHWELCOME;
  1054. } else if (s->format > 0 && s->process == 1) {
  1055. ret = decompose_zp2biquads(ctx, inlink->channels);
  1056. if (ret < 0)
  1057. return ret;
  1058. } else if (s->format > 0 && s->process == 2) {
  1059. if (s->precision > 1)
  1060. av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
  1061. ret = decompose_zp2biquads(ctx, inlink->channels);
  1062. if (ret < 0)
  1063. return ret;
  1064. ret = convert_serial2parallel(ctx, inlink->channels);
  1065. if (ret < 0)
  1066. return ret;
  1067. }
  1068. for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
  1069. IIRChannel *iir = &s->iir[ch];
  1070. for (i = 1; i < iir->nb_ab[0]; i++) {
  1071. iir->ab[0][i] /= iir->ab[0][0];
  1072. }
  1073. iir->ab[0][0] = 1.0;
  1074. for (i = 0; i < iir->nb_ab[1]; i++) {
  1075. iir->ab[1][i] *= iir->g;
  1076. }
  1077. normalize_coeffs(ctx, ch);
  1078. }
  1079. switch (inlink->format) {
  1080. case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
  1081. case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
  1082. case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
  1083. case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
  1084. }
  1085. return 0;
  1086. }
  1087. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  1088. {
  1089. AVFilterContext *ctx = inlink->dst;
  1090. AudioIIRContext *s = ctx->priv;
  1091. AVFilterLink *outlink = ctx->outputs[0];
  1092. ThreadData td;
  1093. AVFrame *out;
  1094. int ch, ret;
  1095. if (av_frame_is_writable(in) && s->process != 2) {
  1096. out = in;
  1097. } else {
  1098. out = ff_get_audio_buffer(outlink, in->nb_samples);
  1099. if (!out) {
  1100. av_frame_free(&in);
  1101. return AVERROR(ENOMEM);
  1102. }
  1103. av_frame_copy_props(out, in);
  1104. }
  1105. td.in = in;
  1106. td.out = out;
  1107. ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
  1108. for (ch = 0; ch < outlink->channels; ch++) {
  1109. if (s->iir[ch].clippings > 0)
  1110. av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
  1111. ch, s->iir[ch].clippings);
  1112. s->iir[ch].clippings = 0;
  1113. }
  1114. if (in != out)
  1115. av_frame_free(&in);
  1116. if (s->response) {
  1117. AVFilterLink *outlink = ctx->outputs[1];
  1118. int64_t old_pts = s->video->pts;
  1119. int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
  1120. if (new_pts > old_pts) {
  1121. AVFrame *clone;
  1122. s->video->pts = new_pts;
  1123. clone = av_frame_clone(s->video);
  1124. if (!clone)
  1125. return AVERROR(ENOMEM);
  1126. ret = ff_filter_frame(outlink, clone);
  1127. if (ret < 0)
  1128. return ret;
  1129. }
  1130. }
  1131. return ff_filter_frame(outlink, out);
  1132. }
  1133. static int config_video(AVFilterLink *outlink)
  1134. {
  1135. AVFilterContext *ctx = outlink->src;
  1136. AudioIIRContext *s = ctx->priv;
  1137. outlink->sample_aspect_ratio = (AVRational){1,1};
  1138. outlink->w = s->w;
  1139. outlink->h = s->h;
  1140. outlink->frame_rate = s->rate;
  1141. outlink->time_base = av_inv_q(outlink->frame_rate);
  1142. return 0;
  1143. }
  1144. static av_cold int init(AVFilterContext *ctx)
  1145. {
  1146. AudioIIRContext *s = ctx->priv;
  1147. AVFilterPad pad, vpad;
  1148. int ret;
  1149. if (!s->a_str || !s->b_str || !s->g_str) {
  1150. av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
  1151. return AVERROR(EINVAL);
  1152. }
  1153. switch (s->precision) {
  1154. case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
  1155. case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
  1156. case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
  1157. case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
  1158. default: return AVERROR_BUG;
  1159. }
  1160. pad = (AVFilterPad){
  1161. .name = "default",
  1162. .type = AVMEDIA_TYPE_AUDIO,
  1163. .config_props = config_output,
  1164. };
  1165. ret = ff_insert_outpad(ctx, 0, &pad);
  1166. if (ret < 0)
  1167. return ret;
  1168. if (s->response) {
  1169. vpad = (AVFilterPad){
  1170. .name = "filter_response",
  1171. .type = AVMEDIA_TYPE_VIDEO,
  1172. .config_props = config_video,
  1173. };
  1174. ret = ff_insert_outpad(ctx, 1, &vpad);
  1175. if (ret < 0)
  1176. return ret;
  1177. }
  1178. return 0;
  1179. }
  1180. static av_cold void uninit(AVFilterContext *ctx)
  1181. {
  1182. AudioIIRContext *s = ctx->priv;
  1183. int ch;
  1184. if (s->iir) {
  1185. for (ch = 0; ch < s->channels; ch++) {
  1186. IIRChannel *iir = &s->iir[ch];
  1187. av_freep(&iir->ab[0]);
  1188. av_freep(&iir->ab[1]);
  1189. av_freep(&iir->cache[0]);
  1190. av_freep(&iir->cache[1]);
  1191. av_freep(&iir->biquads);
  1192. }
  1193. }
  1194. av_freep(&s->iir);
  1195. av_frame_free(&s->video);
  1196. }
  1197. static const AVFilterPad inputs[] = {
  1198. {
  1199. .name = "default",
  1200. .type = AVMEDIA_TYPE_AUDIO,
  1201. .filter_frame = filter_frame,
  1202. },
  1203. { NULL }
  1204. };
  1205. #define OFFSET(x) offsetof(AudioIIRContext, x)
  1206. #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  1207. #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  1208. static const AVOption aiir_options[] = {
  1209. { "zeros", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1210. { "z", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1211. { "poles", "set A/denominator/poles coefficients", OFFSET(a_str),AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1212. { "p", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1213. { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
  1214. { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
  1215. { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  1216. { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  1217. { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -1, 4, AF, "format" },
  1218. { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -1, 4, AF, "format" },
  1219. { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "format" },
  1220. { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
  1221. { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
  1222. { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
  1223. { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
  1224. { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
  1225. { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
  1226. { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
  1227. { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
  1228. { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
  1229. { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "process" },
  1230. { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
  1231. { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
  1232. { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
  1233. { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
  1234. { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
  1235. { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
  1236. { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
  1237. { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
  1238. { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  1239. { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
  1240. { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
  1241. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
  1242. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
  1243. { NULL },
  1244. };
  1245. AVFILTER_DEFINE_CLASS(aiir);
  1246. AVFilter ff_af_aiir = {
  1247. .name = "aiir",
  1248. .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
  1249. .priv_size = sizeof(AudioIIRContext),
  1250. .priv_class = &aiir_class,
  1251. .init = init,
  1252. .uninit = uninit,
  1253. .query_formats = query_formats,
  1254. .inputs = inputs,
  1255. .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS |
  1256. AVFILTER_FLAG_SLICE_THREADS,
  1257. };