You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

335 lines
11KB

  1. /*
  2. * Copyright (c) 2018 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/opt.h"
  23. #include "audio.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. typedef struct AudioIIRContext {
  27. const AVClass *class;
  28. char *a_str, *b_str;
  29. double dry_gain, wet_gain;
  30. int *nb_a, *nb_b;
  31. double **a, **b;
  32. double **input, **output;
  33. int clippings;
  34. int channels;
  35. void (*iir_frame)(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
  36. } AudioIIRContext;
  37. static int query_formats(AVFilterContext *ctx)
  38. {
  39. AVFilterFormats *formats;
  40. AVFilterChannelLayouts *layouts;
  41. static const enum AVSampleFormat sample_fmts[] = {
  42. AV_SAMPLE_FMT_DBLP,
  43. AV_SAMPLE_FMT_FLTP,
  44. AV_SAMPLE_FMT_S32P,
  45. AV_SAMPLE_FMT_S16P,
  46. AV_SAMPLE_FMT_NONE
  47. };
  48. int ret;
  49. layouts = ff_all_channel_counts();
  50. if (!layouts)
  51. return AVERROR(ENOMEM);
  52. ret = ff_set_common_channel_layouts(ctx, layouts);
  53. if (ret < 0)
  54. return ret;
  55. formats = ff_make_format_list(sample_fmts);
  56. if (!formats)
  57. return AVERROR(ENOMEM);
  58. ret = ff_set_common_formats(ctx, formats);
  59. if (ret < 0)
  60. return ret;
  61. formats = ff_all_samplerates();
  62. if (!formats)
  63. return AVERROR(ENOMEM);
  64. return ff_set_common_samplerates(ctx, formats);
  65. }
  66. #define IIR_FRAME(name, type, min, max, need_clipping) \
  67. static void iir_frame_## name(AVFilterContext *ctx, AVFrame *in, AVFrame *out) \
  68. { \
  69. AudioIIRContext *s = ctx->priv; \
  70. const double ig = s->dry_gain; \
  71. const double og = s->wet_gain; \
  72. int ch, n; \
  73. \
  74. for (ch = 0; ch < out->channels; ch++) { \
  75. const type *src = (const type *)in->extended_data[ch]; \
  76. double *ic = (double *)s->input[ch]; \
  77. double *oc = (double *)s->output[ch]; \
  78. const int nb_a = s->nb_a[ch]; \
  79. const int nb_b = s->nb_b[ch]; \
  80. const double *a = s->a[ch]; \
  81. const double *b = s->b[ch]; \
  82. type *dst = (type *)out->extended_data[ch]; \
  83. \
  84. for (n = 0; n < in->nb_samples; n++) { \
  85. double sample = 0.; \
  86. int x; \
  87. \
  88. memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
  89. memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
  90. ic[0] = src[n] * ig; \
  91. for (x = 0; x < nb_b; x++) \
  92. sample += b[x] * ic[x]; \
  93. \
  94. for (x = 1; x < nb_a; x++) \
  95. sample -= a[x] * oc[x]; \
  96. \
  97. oc[0] = sample; \
  98. sample *= og; \
  99. if (need_clipping && sample < min) { \
  100. s->clippings++; \
  101. dst[n] = min; \
  102. } else if (need_clipping && sample > max) { \
  103. s->clippings++; \
  104. dst[n] = max; \
  105. } else { \
  106. dst[n] = sample; \
  107. } \
  108. } \
  109. } \
  110. }
  111. IIR_FRAME(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  112. IIR_FRAME(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  113. IIR_FRAME(fltp, float, -1., 1., 0)
  114. IIR_FRAME(dblp, double, -1., 1., 0)
  115. static void count_coefficients(char *item_str, int *nb_items)
  116. {
  117. char *p;
  118. *nb_items = 1;
  119. for (p = item_str; *p && *p != '|'; p++) {
  120. if (*p == ' ')
  121. (*nb_items)++;
  122. }
  123. }
  124. static int read_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
  125. {
  126. char *p, *arg, *old_str, *saveptr = NULL;
  127. int i;
  128. p = old_str = av_strdup(item_str);
  129. if (!p)
  130. return AVERROR(ENOMEM);
  131. for (i = 0; i < nb_items; i++) {
  132. if (!(arg = av_strtok(p, " ", &saveptr)))
  133. break;
  134. p = NULL;
  135. if (sscanf(arg, "%lf", &dst[i]) != 1) {
  136. av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
  137. return AVERROR(EINVAL);
  138. }
  139. }
  140. av_freep(&old_str);
  141. return 0;
  142. }
  143. static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int *nb, double **c, double **cache)
  144. {
  145. char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
  146. int i, ret;
  147. p = old_str = av_strdup(item_str);
  148. if (!p)
  149. return AVERROR(ENOMEM);
  150. for (i = 0; i < channels; i++) {
  151. if (!(arg = av_strtok(p, "|", &saveptr)))
  152. arg = prev_arg;
  153. p = NULL;
  154. count_coefficients(arg, &nb[i]);
  155. cache[i] = av_calloc(nb[i], sizeof(cache[i]));
  156. c[i] = av_calloc(nb[i], sizeof(c[i]));
  157. if (!c[i] || !cache[i])
  158. return AVERROR(ENOMEM);
  159. ret = read_coefficients(ctx, arg, nb[i], c[i]);
  160. if (ret < 0)
  161. return ret;
  162. prev_arg = arg;
  163. }
  164. av_freep(&old_str);
  165. return 0;
  166. }
  167. static int config_output(AVFilterLink *outlink)
  168. {
  169. AVFilterContext *ctx = outlink->src;
  170. AudioIIRContext *s = ctx->priv;
  171. AVFilterLink *inlink = ctx->inputs[0];
  172. int ch, ret, i;
  173. s->channels = inlink->channels;
  174. s->a = av_calloc(inlink->channels, sizeof(*s->a));
  175. s->b = av_calloc(inlink->channels, sizeof(*s->b));
  176. s->nb_a = av_calloc(inlink->channels, sizeof(*s->nb_a));
  177. s->nb_b = av_calloc(inlink->channels, sizeof(*s->nb_b));
  178. s->input = av_calloc(inlink->channels, sizeof(*s->input));
  179. s->output = av_calloc(inlink->channels, sizeof(*s->output));
  180. if (!s->a || !s->b || !s->nb_a || !s->nb_b || !s->input || !s->output)
  181. return AVERROR(ENOMEM);
  182. ret = read_channels(ctx, inlink->channels, s->a_str, s->nb_a, s->a, s->output);
  183. if (ret < 0)
  184. return ret;
  185. ret = read_channels(ctx, inlink->channels, s->b_str, s->nb_b, s->b, s->input);
  186. if (ret < 0)
  187. return ret;
  188. for (ch = 0; ch < inlink->channels; ch++) {
  189. for (i = 1; i < s->nb_a[ch]; i++) {
  190. s->a[ch][i] /= s->a[ch][0];
  191. }
  192. for (i = 0; i < s->nb_b[ch]; i++) {
  193. s->b[ch][i] /= s->a[ch][0];
  194. }
  195. }
  196. switch (inlink->format) {
  197. case AV_SAMPLE_FMT_DBLP: s->iir_frame = iir_frame_dblp; break;
  198. case AV_SAMPLE_FMT_FLTP: s->iir_frame = iir_frame_fltp; break;
  199. case AV_SAMPLE_FMT_S32P: s->iir_frame = iir_frame_s32p; break;
  200. case AV_SAMPLE_FMT_S16P: s->iir_frame = iir_frame_s16p; break;
  201. }
  202. return 0;
  203. }
  204. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  205. {
  206. AVFilterContext *ctx = inlink->dst;
  207. AudioIIRContext *s = ctx->priv;
  208. AVFilterLink *outlink = ctx->outputs[0];
  209. AVFrame *out;
  210. if (av_frame_is_writable(in)) {
  211. out = in;
  212. } else {
  213. out = ff_get_audio_buffer(outlink, in->nb_samples);
  214. if (!out) {
  215. av_frame_free(&in);
  216. return AVERROR(ENOMEM);
  217. }
  218. av_frame_copy_props(out, in);
  219. }
  220. s->iir_frame(ctx, in, out);
  221. if (s->clippings > 0)
  222. av_log(ctx, AV_LOG_WARNING, "clipping %d times. Please reduce gain.\n", s->clippings);
  223. s->clippings = 0;
  224. if (in != out)
  225. av_frame_free(&in);
  226. return ff_filter_frame(outlink, out);
  227. }
  228. static av_cold void uninit(AVFilterContext *ctx)
  229. {
  230. AudioIIRContext *s = ctx->priv;
  231. int ch;
  232. if (s->a) {
  233. for (ch = 0; ch < s->channels; ch++) {
  234. av_freep(&s->a[ch]);
  235. av_freep(&s->output[ch]);
  236. }
  237. }
  238. av_freep(&s->a);
  239. if (s->b) {
  240. for (ch = 0; ch < s->channels; ch++) {
  241. av_freep(&s->b[ch]);
  242. av_freep(&s->input[ch]);
  243. }
  244. }
  245. av_freep(&s->b);
  246. av_freep(&s->input);
  247. av_freep(&s->output);
  248. av_freep(&s->nb_a);
  249. av_freep(&s->nb_b);
  250. }
  251. static const AVFilterPad inputs[] = {
  252. {
  253. .name = "default",
  254. .type = AVMEDIA_TYPE_AUDIO,
  255. .filter_frame = filter_frame,
  256. },
  257. { NULL }
  258. };
  259. static const AVFilterPad outputs[] = {
  260. {
  261. .name = "default",
  262. .type = AVMEDIA_TYPE_AUDIO,
  263. .config_props = config_output,
  264. },
  265. { NULL }
  266. };
  267. #define OFFSET(x) offsetof(AudioIIRContext, x)
  268. #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  269. static const AVOption aiir_options[] = {
  270. { "a", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, AF },
  271. { "b", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, AF },
  272. { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  273. { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  274. { NULL },
  275. };
  276. AVFILTER_DEFINE_CLASS(aiir);
  277. AVFilter ff_af_aiir = {
  278. .name = "aiir",
  279. .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
  280. .priv_size = sizeof(AudioIIRContext),
  281. .uninit = uninit,
  282. .query_formats = query_formats,
  283. .inputs = inputs,
  284. .outputs = outputs,
  285. .priv_class = &aiir_class,
  286. };