You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1290 lines
44KB

  1. /*
  2. * Copyright (c) 2018 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h>
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/avstring.h"
  23. #include "libavutil/intreadwrite.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/xga_font_data.h"
  26. #include "audio.h"
  27. #include "avfilter.h"
  28. #include "internal.h"
  29. typedef struct ThreadData {
  30. AVFrame *in, *out;
  31. } ThreadData;
  32. typedef struct Pair {
  33. int a, b;
  34. } Pair;
  35. typedef struct BiquadContext {
  36. double a[3];
  37. double b[3];
  38. double i1, i2;
  39. double o1, o2;
  40. } BiquadContext;
  41. typedef struct IIRChannel {
  42. int nb_ab[2];
  43. double *ab[2];
  44. double g;
  45. double *cache[2];
  46. BiquadContext *biquads;
  47. int clippings;
  48. } IIRChannel;
  49. typedef struct AudioIIRContext {
  50. const AVClass *class;
  51. char *a_str, *b_str, *g_str;
  52. double dry_gain, wet_gain;
  53. double mix;
  54. int normalize;
  55. int format;
  56. int process;
  57. int precision;
  58. int response;
  59. int w, h;
  60. int ir_channel;
  61. AVRational rate;
  62. AVFrame *video;
  63. IIRChannel *iir;
  64. int channels;
  65. enum AVSampleFormat sample_format;
  66. int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
  67. } AudioIIRContext;
  68. static int query_formats(AVFilterContext *ctx)
  69. {
  70. AudioIIRContext *s = ctx->priv;
  71. AVFilterFormats *formats;
  72. AVFilterChannelLayouts *layouts;
  73. enum AVSampleFormat sample_fmts[] = {
  74. AV_SAMPLE_FMT_DBLP,
  75. AV_SAMPLE_FMT_NONE
  76. };
  77. static const enum AVPixelFormat pix_fmts[] = {
  78. AV_PIX_FMT_RGB0,
  79. AV_PIX_FMT_NONE
  80. };
  81. int ret;
  82. if (s->response) {
  83. AVFilterLink *videolink = ctx->outputs[1];
  84. formats = ff_make_format_list(pix_fmts);
  85. if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
  86. return ret;
  87. }
  88. layouts = ff_all_channel_counts();
  89. if (!layouts)
  90. return AVERROR(ENOMEM);
  91. ret = ff_set_common_channel_layouts(ctx, layouts);
  92. if (ret < 0)
  93. return ret;
  94. sample_fmts[0] = s->sample_format;
  95. formats = ff_make_format_list(sample_fmts);
  96. if (!formats)
  97. return AVERROR(ENOMEM);
  98. ret = ff_set_common_formats(ctx, formats);
  99. if (ret < 0)
  100. return ret;
  101. formats = ff_all_samplerates();
  102. if (!formats)
  103. return AVERROR(ENOMEM);
  104. return ff_set_common_samplerates(ctx, formats);
  105. }
  106. #define IIR_CH(name, type, min, max, need_clipping) \
  107. static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
  108. { \
  109. AudioIIRContext *s = ctx->priv; \
  110. const double ig = s->dry_gain; \
  111. const double og = s->wet_gain; \
  112. const double mix = s->mix; \
  113. ThreadData *td = arg; \
  114. AVFrame *in = td->in, *out = td->out; \
  115. const type *src = (const type *)in->extended_data[ch]; \
  116. double *oc = (double *)s->iir[ch].cache[0]; \
  117. double *ic = (double *)s->iir[ch].cache[1]; \
  118. const int nb_a = s->iir[ch].nb_ab[0]; \
  119. const int nb_b = s->iir[ch].nb_ab[1]; \
  120. const double *a = s->iir[ch].ab[0]; \
  121. const double *b = s->iir[ch].ab[1]; \
  122. const double g = s->iir[ch].g; \
  123. int *clippings = &s->iir[ch].clippings; \
  124. type *dst = (type *)out->extended_data[ch]; \
  125. int n; \
  126. \
  127. for (n = 0; n < in->nb_samples; n++) { \
  128. double sample = 0.; \
  129. int x; \
  130. \
  131. memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
  132. memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
  133. ic[0] = src[n] * ig; \
  134. for (x = 0; x < nb_b; x++) \
  135. sample += b[x] * ic[x]; \
  136. \
  137. for (x = 1; x < nb_a; x++) \
  138. sample -= a[x] * oc[x]; \
  139. \
  140. oc[0] = sample; \
  141. sample *= og * g; \
  142. sample = sample * mix + ic[0] * (1. - mix); \
  143. if (need_clipping && sample < min) { \
  144. (*clippings)++; \
  145. dst[n] = min; \
  146. } else if (need_clipping && sample > max) { \
  147. (*clippings)++; \
  148. dst[n] = max; \
  149. } else { \
  150. dst[n] = sample; \
  151. } \
  152. } \
  153. \
  154. return 0; \
  155. }
  156. IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  157. IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  158. IIR_CH(fltp, float, -1., 1., 0)
  159. IIR_CH(dblp, double, -1., 1., 0)
  160. #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
  161. static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
  162. { \
  163. AudioIIRContext *s = ctx->priv; \
  164. const double ig = s->dry_gain; \
  165. const double og = s->wet_gain; \
  166. const double mix = s->mix; \
  167. ThreadData *td = arg; \
  168. AVFrame *in = td->in, *out = td->out; \
  169. const type *src = (const type *)in->extended_data[ch]; \
  170. type *dst = (type *)out->extended_data[ch]; \
  171. IIRChannel *iir = &s->iir[ch]; \
  172. const double g = iir->g; \
  173. int *clippings = &iir->clippings; \
  174. int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
  175. int n, i; \
  176. \
  177. for (i = 0; i < nb_biquads; i++) { \
  178. const double a1 = -iir->biquads[i].a[1]; \
  179. const double a2 = -iir->biquads[i].a[2]; \
  180. const double b0 = iir->biquads[i].b[0]; \
  181. const double b1 = iir->biquads[i].b[1]; \
  182. const double b2 = iir->biquads[i].b[2]; \
  183. double i1 = iir->biquads[i].i1; \
  184. double i2 = iir->biquads[i].i2; \
  185. double o1 = iir->biquads[i].o1; \
  186. double o2 = iir->biquads[i].o2; \
  187. \
  188. for (n = 0; n < in->nb_samples; n++) { \
  189. double sample = ig * (i ? dst[n] : src[n]); \
  190. double o0 = sample * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
  191. \
  192. i2 = i1; \
  193. i1 = src[n]; \
  194. o2 = o1; \
  195. o1 = o0; \
  196. o0 *= og * g; \
  197. \
  198. o0 = o0 * mix + (1. - mix) * sample; \
  199. if (need_clipping && o0 < min) { \
  200. (*clippings)++; \
  201. dst[n] = min; \
  202. } else if (need_clipping && o0 > max) { \
  203. (*clippings)++; \
  204. dst[n] = max; \
  205. } else { \
  206. dst[n] = o0; \
  207. } \
  208. } \
  209. iir->biquads[i].i1 = i1; \
  210. iir->biquads[i].i2 = i2; \
  211. iir->biquads[i].o1 = o1; \
  212. iir->biquads[i].o2 = o2; \
  213. } \
  214. \
  215. return 0; \
  216. }
  217. SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  218. SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  219. SERIAL_IIR_CH(fltp, float, -1., 1., 0)
  220. SERIAL_IIR_CH(dblp, double, -1., 1., 0)
  221. static void count_coefficients(char *item_str, int *nb_items)
  222. {
  223. char *p;
  224. if (!item_str)
  225. return;
  226. *nb_items = 1;
  227. for (p = item_str; *p && *p != '|'; p++) {
  228. if (*p == ' ')
  229. (*nb_items)++;
  230. }
  231. }
  232. static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
  233. {
  234. AudioIIRContext *s = ctx->priv;
  235. char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
  236. int i;
  237. p = old_str = av_strdup(item_str);
  238. if (!p)
  239. return AVERROR(ENOMEM);
  240. for (i = 0; i < nb_items; i++) {
  241. if (!(arg = av_strtok(p, "|", &saveptr)))
  242. arg = prev_arg;
  243. if (!arg) {
  244. av_freep(&old_str);
  245. return AVERROR(EINVAL);
  246. }
  247. p = NULL;
  248. if (sscanf(arg, "%lf", &s->iir[i].g) != 1) {
  249. av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
  250. av_freep(&old_str);
  251. return AVERROR(EINVAL);
  252. }
  253. prev_arg = arg;
  254. }
  255. av_freep(&old_str);
  256. return 0;
  257. }
  258. static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
  259. {
  260. char *p, *arg, *old_str, *saveptr = NULL;
  261. int i;
  262. p = old_str = av_strdup(item_str);
  263. if (!p)
  264. return AVERROR(ENOMEM);
  265. for (i = 0; i < nb_items; i++) {
  266. if (!(arg = av_strtok(p, " ", &saveptr)))
  267. break;
  268. p = NULL;
  269. if (sscanf(arg, "%lf", &dst[i]) != 1) {
  270. av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
  271. av_freep(&old_str);
  272. return AVERROR(EINVAL);
  273. }
  274. }
  275. av_freep(&old_str);
  276. return 0;
  277. }
  278. static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
  279. {
  280. char *p, *arg, *old_str, *saveptr = NULL;
  281. int i;
  282. p = old_str = av_strdup(item_str);
  283. if (!p)
  284. return AVERROR(ENOMEM);
  285. for (i = 0; i < nb_items; i++) {
  286. if (!(arg = av_strtok(p, " ", &saveptr)))
  287. break;
  288. p = NULL;
  289. if (sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
  290. av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
  291. av_freep(&old_str);
  292. return AVERROR(EINVAL);
  293. }
  294. }
  295. av_freep(&old_str);
  296. return 0;
  297. }
  298. static const char *format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
  299. static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
  300. {
  301. AudioIIRContext *s = ctx->priv;
  302. char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
  303. int i, ret;
  304. p = old_str = av_strdup(item_str);
  305. if (!p)
  306. return AVERROR(ENOMEM);
  307. for (i = 0; i < channels; i++) {
  308. IIRChannel *iir = &s->iir[i];
  309. if (!(arg = av_strtok(p, "|", &saveptr)))
  310. arg = prev_arg;
  311. if (!arg) {
  312. av_freep(&old_str);
  313. return AVERROR(EINVAL);
  314. }
  315. count_coefficients(arg, &iir->nb_ab[ab]);
  316. p = NULL;
  317. iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
  318. iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
  319. if (!iir->ab[ab] || !iir->cache[ab]) {
  320. av_freep(&old_str);
  321. return AVERROR(ENOMEM);
  322. }
  323. if (s->format) {
  324. ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
  325. } else {
  326. ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
  327. }
  328. if (ret < 0) {
  329. av_freep(&old_str);
  330. return ret;
  331. }
  332. prev_arg = arg;
  333. }
  334. av_freep(&old_str);
  335. return 0;
  336. }
  337. static void multiply(double wre, double wim, int npz, double *coeffs)
  338. {
  339. double nwre = -wre, nwim = -wim;
  340. double cre, cim;
  341. int i;
  342. for (i = npz; i >= 1; i--) {
  343. cre = coeffs[2 * i + 0];
  344. cim = coeffs[2 * i + 1];
  345. coeffs[2 * i + 0] = (nwre * cre - nwim * cim) + coeffs[2 * (i - 1) + 0];
  346. coeffs[2 * i + 1] = (nwre * cim + nwim * cre) + coeffs[2 * (i - 1) + 1];
  347. }
  348. cre = coeffs[0];
  349. cim = coeffs[1];
  350. coeffs[0] = nwre * cre - nwim * cim;
  351. coeffs[1] = nwre * cim + nwim * cre;
  352. }
  353. static int expand(AVFilterContext *ctx, double *pz, int nb, double *coeffs)
  354. {
  355. int i;
  356. coeffs[0] = 1.0;
  357. coeffs[1] = 0.0;
  358. for (i = 0; i < nb; i++) {
  359. coeffs[2 * (i + 1) ] = 0.0;
  360. coeffs[2 * (i + 1) + 1] = 0.0;
  361. }
  362. for (i = 0; i < nb; i++)
  363. multiply(pz[2 * i], pz[2 * i + 1], nb, coeffs);
  364. for (i = 0; i < nb + 1; i++) {
  365. if (fabs(coeffs[2 * i + 1]) > FLT_EPSILON) {
  366. av_log(ctx, AV_LOG_ERROR, "coeff: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
  367. coeffs[2 * i + 1], i);
  368. return AVERROR(EINVAL);
  369. }
  370. }
  371. return 0;
  372. }
  373. static void normalize_coeffs(AVFilterContext *ctx, int ch)
  374. {
  375. AudioIIRContext *s = ctx->priv;
  376. IIRChannel *iir = &s->iir[ch];
  377. double sum_den = 0.;
  378. if (!s->normalize)
  379. return;
  380. for (int i = 0; i < iir->nb_ab[1]; i++) {
  381. sum_den += iir->ab[1][i];
  382. }
  383. if (sum_den > 1e-6) {
  384. double factor, sum_num = 0.;
  385. for (int i = 0; i < iir->nb_ab[0]; i++) {
  386. sum_num += iir->ab[0][i];
  387. }
  388. factor = sum_num / sum_den;
  389. for (int i = 0; i < iir->nb_ab[1]; i++) {
  390. iir->ab[1][i] *= factor;
  391. }
  392. }
  393. }
  394. static int convert_zp2tf(AVFilterContext *ctx, int channels)
  395. {
  396. AudioIIRContext *s = ctx->priv;
  397. int ch, i, j, ret = 0;
  398. for (ch = 0; ch < channels; ch++) {
  399. IIRChannel *iir = &s->iir[ch];
  400. double *topc, *botc;
  401. topc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*topc));
  402. botc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*botc));
  403. if (!topc || !botc) {
  404. ret = AVERROR(ENOMEM);
  405. goto fail;
  406. }
  407. ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
  408. if (ret < 0) {
  409. goto fail;
  410. }
  411. ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
  412. if (ret < 0) {
  413. goto fail;
  414. }
  415. for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
  416. iir->ab[1][j] = topc[2 * i];
  417. }
  418. iir->nb_ab[1]++;
  419. for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
  420. iir->ab[0][j] = botc[2 * i];
  421. }
  422. iir->nb_ab[0]++;
  423. normalize_coeffs(ctx, ch);
  424. fail:
  425. av_free(topc);
  426. av_free(botc);
  427. if (ret < 0)
  428. break;
  429. }
  430. return ret;
  431. }
  432. static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
  433. {
  434. AudioIIRContext *s = ctx->priv;
  435. int ch, ret;
  436. for (ch = 0; ch < channels; ch++) {
  437. IIRChannel *iir = &s->iir[ch];
  438. int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
  439. int current_biquad = 0;
  440. iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
  441. if (!iir->biquads)
  442. return AVERROR(ENOMEM);
  443. while (nb_biquads--) {
  444. Pair outmost_pole = { -1, -1 };
  445. Pair nearest_zero = { -1, -1 };
  446. double zeros[4] = { 0 };
  447. double poles[4] = { 0 };
  448. double b[6] = { 0 };
  449. double a[6] = { 0 };
  450. double min_distance = DBL_MAX;
  451. double max_mag = 0;
  452. double factor;
  453. int i;
  454. for (i = 0; i < iir->nb_ab[0]; i++) {
  455. double mag;
  456. if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
  457. continue;
  458. mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
  459. if (mag > max_mag) {
  460. max_mag = mag;
  461. outmost_pole.a = i;
  462. }
  463. }
  464. for (i = 0; i < iir->nb_ab[0]; i++) {
  465. if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
  466. continue;
  467. if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
  468. iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
  469. outmost_pole.b = i;
  470. break;
  471. }
  472. }
  473. av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
  474. if (outmost_pole.a < 0 || outmost_pole.b < 0)
  475. return AVERROR(EINVAL);
  476. for (i = 0; i < iir->nb_ab[1]; i++) {
  477. double distance;
  478. if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
  479. continue;
  480. distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
  481. iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
  482. if (distance < min_distance) {
  483. min_distance = distance;
  484. nearest_zero.a = i;
  485. }
  486. }
  487. for (i = 0; i < iir->nb_ab[1]; i++) {
  488. if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
  489. continue;
  490. if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
  491. iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
  492. nearest_zero.b = i;
  493. break;
  494. }
  495. }
  496. av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
  497. if (nearest_zero.a < 0 || nearest_zero.b < 0)
  498. return AVERROR(EINVAL);
  499. poles[0] = iir->ab[0][2 * outmost_pole.a ];
  500. poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
  501. zeros[0] = iir->ab[1][2 * nearest_zero.a ];
  502. zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
  503. if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
  504. zeros[2] = 0;
  505. zeros[3] = 0;
  506. poles[2] = 0;
  507. poles[3] = 0;
  508. } else {
  509. poles[2] = iir->ab[0][2 * outmost_pole.b ];
  510. poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
  511. zeros[2] = iir->ab[1][2 * nearest_zero.b ];
  512. zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
  513. }
  514. ret = expand(ctx, zeros, 2, b);
  515. if (ret < 0)
  516. return ret;
  517. ret = expand(ctx, poles, 2, a);
  518. if (ret < 0)
  519. return ret;
  520. iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
  521. iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
  522. iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
  523. iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
  524. iir->biquads[current_biquad].a[0] = 1.;
  525. iir->biquads[current_biquad].a[1] = a[2] / a[4];
  526. iir->biquads[current_biquad].a[2] = a[0] / a[4];
  527. iir->biquads[current_biquad].b[0] = b[4] / a[4];
  528. iir->biquads[current_biquad].b[1] = b[2] / a[4];
  529. iir->biquads[current_biquad].b[2] = b[0] / a[4];
  530. if (s->normalize &&
  531. fabs(iir->biquads[current_biquad].b[0] +
  532. iir->biquads[current_biquad].b[1] +
  533. iir->biquads[current_biquad].b[2]) > 1e-6) {
  534. factor = (iir->biquads[current_biquad].a[0] +
  535. iir->biquads[current_biquad].a[1] +
  536. iir->biquads[current_biquad].a[2]) /
  537. (iir->biquads[current_biquad].b[0] +
  538. iir->biquads[current_biquad].b[1] +
  539. iir->biquads[current_biquad].b[2]);
  540. av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
  541. iir->biquads[current_biquad].b[0] *= factor;
  542. iir->biquads[current_biquad].b[1] *= factor;
  543. iir->biquads[current_biquad].b[2] *= factor;
  544. }
  545. iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
  546. iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
  547. iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
  548. av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
  549. iir->biquads[current_biquad].a[0],
  550. iir->biquads[current_biquad].a[1],
  551. iir->biquads[current_biquad].a[2],
  552. iir->biquads[current_biquad].b[0],
  553. iir->biquads[current_biquad].b[1],
  554. iir->biquads[current_biquad].b[2]);
  555. current_biquad++;
  556. }
  557. }
  558. return 0;
  559. }
  560. static void convert_pr2zp(AVFilterContext *ctx, int channels)
  561. {
  562. AudioIIRContext *s = ctx->priv;
  563. int ch;
  564. for (ch = 0; ch < channels; ch++) {
  565. IIRChannel *iir = &s->iir[ch];
  566. int n;
  567. for (n = 0; n < iir->nb_ab[0]; n++) {
  568. double r = iir->ab[0][2*n];
  569. double angle = iir->ab[0][2*n+1];
  570. iir->ab[0][2*n] = r * cos(angle);
  571. iir->ab[0][2*n+1] = r * sin(angle);
  572. }
  573. for (n = 0; n < iir->nb_ab[1]; n++) {
  574. double r = iir->ab[1][2*n];
  575. double angle = iir->ab[1][2*n+1];
  576. iir->ab[1][2*n] = r * cos(angle);
  577. iir->ab[1][2*n+1] = r * sin(angle);
  578. }
  579. }
  580. }
  581. static void convert_sp2zp(AVFilterContext *ctx, int channels)
  582. {
  583. AudioIIRContext *s = ctx->priv;
  584. int ch;
  585. for (ch = 0; ch < channels; ch++) {
  586. IIRChannel *iir = &s->iir[ch];
  587. int n;
  588. for (n = 0; n < iir->nb_ab[0]; n++) {
  589. double sr = iir->ab[0][2*n];
  590. double si = iir->ab[0][2*n+1];
  591. double snr = 1. + sr;
  592. double sdr = 1. - sr;
  593. double div = sdr * sdr + si * si;
  594. iir->ab[0][2*n] = (snr * sdr - si * si) / div;
  595. iir->ab[0][2*n+1] = (sdr * si + snr * si) / div;
  596. }
  597. for (n = 0; n < iir->nb_ab[1]; n++) {
  598. double sr = iir->ab[1][2*n];
  599. double si = iir->ab[1][2*n+1];
  600. double snr = 1. + sr;
  601. double sdr = 1. - sr;
  602. double div = sdr * sdr + si * si;
  603. iir->ab[1][2*n] = (snr * sdr - si * si) / div;
  604. iir->ab[1][2*n+1] = (sdr * si + snr * si) / div;
  605. }
  606. }
  607. }
  608. static void convert_pd2zp(AVFilterContext *ctx, int channels)
  609. {
  610. AudioIIRContext *s = ctx->priv;
  611. int ch;
  612. for (ch = 0; ch < channels; ch++) {
  613. IIRChannel *iir = &s->iir[ch];
  614. int n;
  615. for (n = 0; n < iir->nb_ab[0]; n++) {
  616. double r = iir->ab[0][2*n];
  617. double angle = M_PI*iir->ab[0][2*n+1]/180.;
  618. iir->ab[0][2*n] = r * cos(angle);
  619. iir->ab[0][2*n+1] = r * sin(angle);
  620. }
  621. for (n = 0; n < iir->nb_ab[1]; n++) {
  622. double r = iir->ab[1][2*n];
  623. double angle = M_PI*iir->ab[1][2*n+1]/180.;
  624. iir->ab[1][2*n] = r * cos(angle);
  625. iir->ab[1][2*n+1] = r * sin(angle);
  626. }
  627. }
  628. }
  629. static void check_stability(AVFilterContext *ctx, int channels)
  630. {
  631. AudioIIRContext *s = ctx->priv;
  632. int ch;
  633. for (ch = 0; ch < channels; ch++) {
  634. IIRChannel *iir = &s->iir[ch];
  635. for (int n = 0; n < iir->nb_ab[0]; n++) {
  636. double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
  637. if (pr >= 1.) {
  638. av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
  639. break;
  640. }
  641. }
  642. }
  643. }
  644. static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
  645. {
  646. const uint8_t *font;
  647. int font_height;
  648. int i;
  649. font = avpriv_cga_font, font_height = 8;
  650. for (i = 0; txt[i]; i++) {
  651. int char_y, mask;
  652. uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
  653. for (char_y = 0; char_y < font_height; char_y++) {
  654. for (mask = 0x80; mask; mask >>= 1) {
  655. if (font[txt[i] * font_height + char_y] & mask)
  656. AV_WL32(p, color);
  657. p += 4;
  658. }
  659. p += pic->linesize[0] - 8 * 4;
  660. }
  661. }
  662. }
  663. static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
  664. {
  665. int dx = FFABS(x1-x0);
  666. int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
  667. int err = (dx>dy ? dx : -dy) / 2, e2;
  668. for (;;) {
  669. AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
  670. if (x0 == x1 && y0 == y1)
  671. break;
  672. e2 = err;
  673. if (e2 >-dx) {
  674. err -= dy;
  675. x0--;
  676. }
  677. if (e2 < dy) {
  678. err += dx;
  679. y0 += sy;
  680. }
  681. }
  682. }
  683. static double distance(double x0, double x1, double y0, double y1)
  684. {
  685. return hypot(x0 - x1, y0 - y1);
  686. }
  687. static void get_response(int channel, int format, double w,
  688. const double *b, const double *a,
  689. int nb_b, int nb_a, double *magnitude, double *phase)
  690. {
  691. double realz, realp;
  692. double imagz, imagp;
  693. double real, imag;
  694. double div;
  695. if (format == 0) {
  696. realz = 0., realp = 0.;
  697. imagz = 0., imagp = 0.;
  698. for (int x = 0; x < nb_a; x++) {
  699. realz += cos(-x * w) * a[x];
  700. imagz += sin(-x * w) * a[x];
  701. }
  702. for (int x = 0; x < nb_b; x++) {
  703. realp += cos(-x * w) * b[x];
  704. imagp += sin(-x * w) * b[x];
  705. }
  706. div = realp * realp + imagp * imagp;
  707. real = (realz * realp + imagz * imagp) / div;
  708. imag = (imagz * realp - imagp * realz) / div;
  709. *magnitude = hypot(real, imag);
  710. *phase = atan2(imag, real);
  711. } else {
  712. double p = 1., z = 1.;
  713. double acc = 0.;
  714. for (int x = 0; x < nb_a; x++) {
  715. z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
  716. acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
  717. }
  718. for (int x = 0; x < nb_b; x++) {
  719. p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
  720. acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
  721. }
  722. *magnitude = z / p;
  723. *phase = acc;
  724. }
  725. }
  726. static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
  727. {
  728. AudioIIRContext *s = ctx->priv;
  729. double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
  730. double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
  731. int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
  732. char text[32];
  733. int ch, i;
  734. memset(out->data[0], 0, s->h * out->linesize[0]);
  735. phase = av_malloc_array(s->w, sizeof(*phase));
  736. temp = av_malloc_array(s->w, sizeof(*temp));
  737. mag = av_malloc_array(s->w, sizeof(*mag));
  738. delay = av_malloc_array(s->w, sizeof(*delay));
  739. if (!mag || !phase || !delay || !temp)
  740. goto end;
  741. ch = av_clip(s->ir_channel, 0, s->channels - 1);
  742. for (i = 0; i < s->w; i++) {
  743. const double *b = s->iir[ch].ab[0];
  744. const double *a = s->iir[ch].ab[1];
  745. const int nb_b = s->iir[ch].nb_ab[0];
  746. const int nb_a = s->iir[ch].nb_ab[1];
  747. double w = i * M_PI / (s->w - 1);
  748. double m, p;
  749. get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
  750. mag[i] = s->iir[ch].g * m;
  751. phase[i] = p;
  752. min = fmin(min, mag[i]);
  753. max = fmax(max, mag[i]);
  754. }
  755. temp[0] = 0.;
  756. for (i = 0; i < s->w - 1; i++) {
  757. double d = phase[i] - phase[i + 1];
  758. temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
  759. }
  760. min_phase = phase[0];
  761. max_phase = phase[0];
  762. for (i = 1; i < s->w; i++) {
  763. temp[i] += temp[i - 1];
  764. phase[i] += temp[i];
  765. min_phase = fmin(min_phase, phase[i]);
  766. max_phase = fmax(max_phase, phase[i]);
  767. }
  768. for (i = 0; i < s->w - 1; i++) {
  769. double div = s->w / (double)sample_rate;
  770. delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
  771. min_delay = fmin(min_delay, delay[i + 1]);
  772. max_delay = fmax(max_delay, delay[i + 1]);
  773. }
  774. delay[0] = delay[1];
  775. for (i = 0; i < s->w; i++) {
  776. int ymag = mag[i] / max * (s->h - 1);
  777. int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
  778. int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
  779. ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
  780. yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
  781. ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
  782. if (prev_ymag < 0)
  783. prev_ymag = ymag;
  784. if (prev_yphase < 0)
  785. prev_yphase = yphase;
  786. if (prev_ydelay < 0)
  787. prev_ydelay = ydelay;
  788. draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
  789. draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
  790. draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
  791. prev_ymag = ymag;
  792. prev_yphase = yphase;
  793. prev_ydelay = ydelay;
  794. }
  795. if (s->w > 400 && s->h > 100) {
  796. drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
  797. snprintf(text, sizeof(text), "%.2f", max);
  798. drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
  799. drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
  800. snprintf(text, sizeof(text), "%.2f", min);
  801. drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
  802. drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
  803. snprintf(text, sizeof(text), "%.2f", max_phase);
  804. drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
  805. drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
  806. snprintf(text, sizeof(text), "%.2f", min_phase);
  807. drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
  808. drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
  809. snprintf(text, sizeof(text), "%.2f", max_delay);
  810. drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
  811. drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
  812. snprintf(text, sizeof(text), "%.2f", min_delay);
  813. drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
  814. }
  815. end:
  816. av_free(delay);
  817. av_free(temp);
  818. av_free(phase);
  819. av_free(mag);
  820. }
  821. static int config_output(AVFilterLink *outlink)
  822. {
  823. AVFilterContext *ctx = outlink->src;
  824. AudioIIRContext *s = ctx->priv;
  825. AVFilterLink *inlink = ctx->inputs[0];
  826. int ch, ret, i;
  827. s->channels = inlink->channels;
  828. s->iir = av_calloc(s->channels, sizeof(*s->iir));
  829. if (!s->iir)
  830. return AVERROR(ENOMEM);
  831. ret = read_gains(ctx, s->g_str, inlink->channels);
  832. if (ret < 0)
  833. return ret;
  834. ret = read_channels(ctx, inlink->channels, s->a_str, 0);
  835. if (ret < 0)
  836. return ret;
  837. ret = read_channels(ctx, inlink->channels, s->b_str, 1);
  838. if (ret < 0)
  839. return ret;
  840. if (s->format == 2) {
  841. convert_pr2zp(ctx, inlink->channels);
  842. } else if (s->format == 3) {
  843. convert_pd2zp(ctx, inlink->channels);
  844. } else if (s->format == 4) {
  845. convert_sp2zp(ctx, inlink->channels);
  846. }
  847. if (s->format > 0) {
  848. check_stability(ctx, inlink->channels);
  849. }
  850. av_frame_free(&s->video);
  851. if (s->response) {
  852. s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
  853. if (!s->video)
  854. return AVERROR(ENOMEM);
  855. draw_response(ctx, s->video, inlink->sample_rate);
  856. }
  857. if (s->format == 0)
  858. av_log(ctx, AV_LOG_WARNING, "tf coefficients format is not recommended for too high number of zeros/poles.\n");
  859. if (s->format > 0 && s->process == 0) {
  860. av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
  861. ret = convert_zp2tf(ctx, inlink->channels);
  862. if (ret < 0)
  863. return ret;
  864. } else if (s->format == 0 && s->process == 1) {
  865. av_log(ctx, AV_LOG_ERROR, "Serial cascading is not implemented for transfer function.\n");
  866. return AVERROR_PATCHWELCOME;
  867. } else if (s->format > 0 && s->process == 1) {
  868. if (inlink->format == AV_SAMPLE_FMT_S16P)
  869. av_log(ctx, AV_LOG_WARNING, "Serial cascading is not recommended for i16 precision.\n");
  870. ret = decompose_zp2biquads(ctx, inlink->channels);
  871. if (ret < 0)
  872. return ret;
  873. }
  874. for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
  875. IIRChannel *iir = &s->iir[ch];
  876. for (i = 1; i < iir->nb_ab[0]; i++) {
  877. iir->ab[0][i] /= iir->ab[0][0];
  878. }
  879. iir->ab[0][0] = 1.0;
  880. for (i = 0; i < iir->nb_ab[1]; i++) {
  881. iir->ab[1][i] *= iir->g;
  882. }
  883. normalize_coeffs(ctx, ch);
  884. }
  885. switch (inlink->format) {
  886. case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
  887. case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
  888. case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
  889. case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
  890. }
  891. return 0;
  892. }
  893. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  894. {
  895. AVFilterContext *ctx = inlink->dst;
  896. AudioIIRContext *s = ctx->priv;
  897. AVFilterLink *outlink = ctx->outputs[0];
  898. ThreadData td;
  899. AVFrame *out;
  900. int ch, ret;
  901. if (av_frame_is_writable(in)) {
  902. out = in;
  903. } else {
  904. out = ff_get_audio_buffer(outlink, in->nb_samples);
  905. if (!out) {
  906. av_frame_free(&in);
  907. return AVERROR(ENOMEM);
  908. }
  909. av_frame_copy_props(out, in);
  910. }
  911. td.in = in;
  912. td.out = out;
  913. ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
  914. for (ch = 0; ch < outlink->channels; ch++) {
  915. if (s->iir[ch].clippings > 0)
  916. av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
  917. ch, s->iir[ch].clippings);
  918. s->iir[ch].clippings = 0;
  919. }
  920. if (in != out)
  921. av_frame_free(&in);
  922. if (s->response) {
  923. AVFilterLink *outlink = ctx->outputs[1];
  924. int64_t old_pts = s->video->pts;
  925. int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
  926. if (new_pts > old_pts) {
  927. AVFrame *clone;
  928. s->video->pts = new_pts;
  929. clone = av_frame_clone(s->video);
  930. if (!clone)
  931. return AVERROR(ENOMEM);
  932. ret = ff_filter_frame(outlink, clone);
  933. if (ret < 0)
  934. return ret;
  935. }
  936. }
  937. return ff_filter_frame(outlink, out);
  938. }
  939. static int config_video(AVFilterLink *outlink)
  940. {
  941. AVFilterContext *ctx = outlink->src;
  942. AudioIIRContext *s = ctx->priv;
  943. outlink->sample_aspect_ratio = (AVRational){1,1};
  944. outlink->w = s->w;
  945. outlink->h = s->h;
  946. outlink->frame_rate = s->rate;
  947. outlink->time_base = av_inv_q(outlink->frame_rate);
  948. return 0;
  949. }
  950. static av_cold int init(AVFilterContext *ctx)
  951. {
  952. AudioIIRContext *s = ctx->priv;
  953. AVFilterPad pad, vpad;
  954. int ret;
  955. if (!s->a_str || !s->b_str || !s->g_str) {
  956. av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
  957. return AVERROR(EINVAL);
  958. }
  959. switch (s->precision) {
  960. case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
  961. case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
  962. case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
  963. case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
  964. default: return AVERROR_BUG;
  965. }
  966. pad = (AVFilterPad){
  967. .name = av_strdup("default"),
  968. .type = AVMEDIA_TYPE_AUDIO,
  969. .config_props = config_output,
  970. };
  971. if (!pad.name)
  972. return AVERROR(ENOMEM);
  973. ret = ff_insert_outpad(ctx, 0, &pad);
  974. if (ret < 0)
  975. return ret;
  976. if (s->response) {
  977. vpad = (AVFilterPad){
  978. .name = av_strdup("filter_response"),
  979. .type = AVMEDIA_TYPE_VIDEO,
  980. .config_props = config_video,
  981. };
  982. if (!vpad.name)
  983. return AVERROR(ENOMEM);
  984. ret = ff_insert_outpad(ctx, 1, &vpad);
  985. if (ret < 0)
  986. return ret;
  987. }
  988. return 0;
  989. }
  990. static av_cold void uninit(AVFilterContext *ctx)
  991. {
  992. AudioIIRContext *s = ctx->priv;
  993. int ch;
  994. if (s->iir) {
  995. for (ch = 0; ch < s->channels; ch++) {
  996. IIRChannel *iir = &s->iir[ch];
  997. av_freep(&iir->ab[0]);
  998. av_freep(&iir->ab[1]);
  999. av_freep(&iir->cache[0]);
  1000. av_freep(&iir->cache[1]);
  1001. av_freep(&iir->biquads);
  1002. }
  1003. }
  1004. av_freep(&s->iir);
  1005. av_freep(&ctx->output_pads[0].name);
  1006. if (s->response)
  1007. av_freep(&ctx->output_pads[1].name);
  1008. av_frame_free(&s->video);
  1009. }
  1010. static const AVFilterPad inputs[] = {
  1011. {
  1012. .name = "default",
  1013. .type = AVMEDIA_TYPE_AUDIO,
  1014. .filter_frame = filter_frame,
  1015. },
  1016. { NULL }
  1017. };
  1018. #define OFFSET(x) offsetof(AudioIIRContext, x)
  1019. #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  1020. #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  1021. static const AVOption aiir_options[] = {
  1022. { "zeros", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1023. { "z", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1024. { "poles", "set A/denominator/poles coefficients", OFFSET(a_str),AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1025. { "p", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
  1026. { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
  1027. { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
  1028. { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  1029. { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  1030. { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, AF, "format" },
  1031. { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, AF, "format" },
  1032. { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
  1033. { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
  1034. { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
  1035. { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
  1036. { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
  1037. { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "process" },
  1038. { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "process" },
  1039. { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
  1040. { "s", "serial cascading", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
  1041. { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
  1042. { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
  1043. { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
  1044. { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
  1045. { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
  1046. { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
  1047. { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
  1048. { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
  1049. { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  1050. { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
  1051. { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
  1052. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
  1053. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
  1054. { NULL },
  1055. };
  1056. AVFILTER_DEFINE_CLASS(aiir);
  1057. AVFilter ff_af_aiir = {
  1058. .name = "aiir",
  1059. .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
  1060. .priv_size = sizeof(AudioIIRContext),
  1061. .priv_class = &aiir_class,
  1062. .init = init,
  1063. .uninit = uninit,
  1064. .query_formats = query_formats,
  1065. .inputs = inputs,
  1066. .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS |
  1067. AVFILTER_FLAG_SLICE_THREADS,
  1068. };