You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

355 lines
11KB

  1. /*
  2. * Copyright (c) 2018 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/opt.h"
  23. #include "audio.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. typedef struct AudioIIRContext {
  27. const AVClass *class;
  28. char *a_str, *b_str;
  29. double dry_gain, wet_gain;
  30. int *nb_a, *nb_b;
  31. double **a, **b;
  32. double **input, **output;
  33. int clippings;
  34. int channels;
  35. void (*iir_frame)(AVFilterContext *ctx, AVFrame *in, AVFrame *out);
  36. } AudioIIRContext;
  37. static int query_formats(AVFilterContext *ctx)
  38. {
  39. AVFilterFormats *formats;
  40. AVFilterChannelLayouts *layouts;
  41. static const enum AVSampleFormat sample_fmts[] = {
  42. AV_SAMPLE_FMT_DBLP,
  43. AV_SAMPLE_FMT_FLTP,
  44. AV_SAMPLE_FMT_S32P,
  45. AV_SAMPLE_FMT_S16P,
  46. AV_SAMPLE_FMT_NONE
  47. };
  48. int ret;
  49. layouts = ff_all_channel_counts();
  50. if (!layouts)
  51. return AVERROR(ENOMEM);
  52. ret = ff_set_common_channel_layouts(ctx, layouts);
  53. if (ret < 0)
  54. return ret;
  55. formats = ff_make_format_list(sample_fmts);
  56. if (!formats)
  57. return AVERROR(ENOMEM);
  58. ret = ff_set_common_formats(ctx, formats);
  59. if (ret < 0)
  60. return ret;
  61. formats = ff_all_samplerates();
  62. if (!formats)
  63. return AVERROR(ENOMEM);
  64. return ff_set_common_samplerates(ctx, formats);
  65. }
  66. #define IIR_FRAME(name, type, min, max, need_clipping) \
  67. static void iir_frame_## name(AVFilterContext *ctx, AVFrame *in, AVFrame *out) \
  68. { \
  69. AudioIIRContext *s = ctx->priv; \
  70. const double ig = s->dry_gain; \
  71. const double og = s->wet_gain; \
  72. int ch, n; \
  73. \
  74. for (ch = 0; ch < out->channels; ch++) { \
  75. const type *src = (const type *)in->extended_data[ch]; \
  76. double *ic = (double *)s->input[ch]; \
  77. double *oc = (double *)s->output[ch]; \
  78. const int nb_a = s->nb_a[ch]; \
  79. const int nb_b = s->nb_b[ch]; \
  80. const double *a = s->a[ch]; \
  81. const double *b = s->b[ch]; \
  82. type *dst = (type *)out->extended_data[ch]; \
  83. \
  84. for (n = 0; n < in->nb_samples; n++) { \
  85. double sample = 0.; \
  86. int x; \
  87. \
  88. memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
  89. memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
  90. ic[0] = src[n] * ig; \
  91. for (x = 0; x < nb_b; x++) \
  92. sample += b[x] * ic[x]; \
  93. \
  94. for (x = 1; x < nb_a; x++) \
  95. sample -= a[x] * oc[x]; \
  96. \
  97. oc[0] = sample; \
  98. sample *= og; \
  99. if (need_clipping && sample < min) { \
  100. s->clippings++; \
  101. dst[n] = min; \
  102. } else if (need_clipping && sample > max) { \
  103. s->clippings++; \
  104. dst[n] = max; \
  105. } else { \
  106. dst[n] = sample; \
  107. } \
  108. } \
  109. } \
  110. }
  111. IIR_FRAME(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
  112. IIR_FRAME(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
  113. IIR_FRAME(fltp, float, -1., 1., 0)
  114. IIR_FRAME(dblp, double, -1., 1., 0)
  115. static void count_coefficients(char *item_str, int *nb_items)
  116. {
  117. char *p;
  118. if (!item_str)
  119. return;
  120. *nb_items = 1;
  121. for (p = item_str; *p && *p != '|'; p++) {
  122. if (*p == ' ')
  123. (*nb_items)++;
  124. }
  125. }
  126. static int read_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
  127. {
  128. char *p, *arg, *old_str, *saveptr = NULL;
  129. int i;
  130. p = old_str = av_strdup(item_str);
  131. if (!p)
  132. return AVERROR(ENOMEM);
  133. for (i = 0; i < nb_items; i++) {
  134. if (!(arg = av_strtok(p, " ", &saveptr)))
  135. break;
  136. p = NULL;
  137. if (sscanf(arg, "%lf", &dst[i]) != 1) {
  138. av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
  139. return AVERROR(EINVAL);
  140. }
  141. }
  142. av_freep(&old_str);
  143. return 0;
  144. }
  145. static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int *nb, double **c, double **cache)
  146. {
  147. char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
  148. int i, ret;
  149. p = old_str = av_strdup(item_str);
  150. if (!p)
  151. return AVERROR(ENOMEM);
  152. for (i = 0; i < channels; i++) {
  153. if (!(arg = av_strtok(p, "|", &saveptr)))
  154. arg = prev_arg;
  155. if (!arg)
  156. return AVERROR(EINVAL);
  157. count_coefficients(arg, &nb[i]);
  158. p = NULL;
  159. cache[i] = av_calloc(nb[i] + 1, sizeof(double));
  160. c[i] = av_calloc(nb[i], sizeof(double));
  161. if (!c[i] || !cache[i])
  162. return AVERROR(ENOMEM);
  163. ret = read_coefficients(ctx, arg, nb[i], c[i]);
  164. if (ret < 0)
  165. return ret;
  166. prev_arg = arg;
  167. }
  168. av_freep(&old_str);
  169. return 0;
  170. }
  171. static int config_output(AVFilterLink *outlink)
  172. {
  173. AVFilterContext *ctx = outlink->src;
  174. AudioIIRContext *s = ctx->priv;
  175. AVFilterLink *inlink = ctx->inputs[0];
  176. int ch, ret, i;
  177. s->channels = inlink->channels;
  178. s->a = av_calloc(inlink->channels, sizeof(*s->a));
  179. s->b = av_calloc(inlink->channels, sizeof(*s->b));
  180. s->nb_a = av_calloc(inlink->channels, sizeof(*s->nb_a));
  181. s->nb_b = av_calloc(inlink->channels, sizeof(*s->nb_b));
  182. s->input = av_calloc(inlink->channels, sizeof(*s->input));
  183. s->output = av_calloc(inlink->channels, sizeof(*s->output));
  184. if (!s->a || !s->b || !s->nb_a || !s->nb_b || !s->input || !s->output)
  185. return AVERROR(ENOMEM);
  186. ret = read_channels(ctx, inlink->channels, s->a_str, s->nb_a, s->a, s->output);
  187. if (ret < 0)
  188. return ret;
  189. ret = read_channels(ctx, inlink->channels, s->b_str, s->nb_b, s->b, s->input);
  190. if (ret < 0)
  191. return ret;
  192. for (ch = 0; ch < inlink->channels; ch++) {
  193. for (i = 1; i < s->nb_a[ch]; i++) {
  194. s->a[ch][i] /= s->a[ch][0];
  195. }
  196. for (i = 0; i < s->nb_b[ch]; i++) {
  197. s->b[ch][i] /= s->a[ch][0];
  198. }
  199. }
  200. switch (inlink->format) {
  201. case AV_SAMPLE_FMT_DBLP: s->iir_frame = iir_frame_dblp; break;
  202. case AV_SAMPLE_FMT_FLTP: s->iir_frame = iir_frame_fltp; break;
  203. case AV_SAMPLE_FMT_S32P: s->iir_frame = iir_frame_s32p; break;
  204. case AV_SAMPLE_FMT_S16P: s->iir_frame = iir_frame_s16p; break;
  205. }
  206. return 0;
  207. }
  208. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  209. {
  210. AVFilterContext *ctx = inlink->dst;
  211. AudioIIRContext *s = ctx->priv;
  212. AVFilterLink *outlink = ctx->outputs[0];
  213. AVFrame *out;
  214. if (av_frame_is_writable(in)) {
  215. out = in;
  216. } else {
  217. out = ff_get_audio_buffer(outlink, in->nb_samples);
  218. if (!out) {
  219. av_frame_free(&in);
  220. return AVERROR(ENOMEM);
  221. }
  222. av_frame_copy_props(out, in);
  223. }
  224. s->iir_frame(ctx, in, out);
  225. if (s->clippings > 0)
  226. av_log(ctx, AV_LOG_WARNING, "clipping %d times. Please reduce gain.\n", s->clippings);
  227. s->clippings = 0;
  228. if (in != out)
  229. av_frame_free(&in);
  230. return ff_filter_frame(outlink, out);
  231. }
  232. static av_cold int init(AVFilterContext *ctx)
  233. {
  234. AudioIIRContext *s = ctx->priv;
  235. if (!s->a_str || !s->b_str) {
  236. av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
  237. return AVERROR(EINVAL);
  238. }
  239. return 0;
  240. }
  241. static av_cold void uninit(AVFilterContext *ctx)
  242. {
  243. AudioIIRContext *s = ctx->priv;
  244. int ch;
  245. if (s->a) {
  246. for (ch = 0; ch < s->channels; ch++) {
  247. av_freep(&s->a[ch]);
  248. av_freep(&s->output[ch]);
  249. }
  250. }
  251. av_freep(&s->a);
  252. if (s->b) {
  253. for (ch = 0; ch < s->channels; ch++) {
  254. av_freep(&s->b[ch]);
  255. av_freep(&s->input[ch]);
  256. }
  257. }
  258. av_freep(&s->b);
  259. av_freep(&s->input);
  260. av_freep(&s->output);
  261. av_freep(&s->nb_a);
  262. av_freep(&s->nb_b);
  263. }
  264. static const AVFilterPad inputs[] = {
  265. {
  266. .name = "default",
  267. .type = AVMEDIA_TYPE_AUDIO,
  268. .filter_frame = filter_frame,
  269. },
  270. { NULL }
  271. };
  272. static const AVFilterPad outputs[] = {
  273. {
  274. .name = "default",
  275. .type = AVMEDIA_TYPE_AUDIO,
  276. .config_props = config_output,
  277. },
  278. { NULL }
  279. };
  280. #define OFFSET(x) offsetof(AudioIIRContext, x)
  281. #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  282. static const AVOption aiir_options[] = {
  283. { "a", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, AF },
  284. { "b", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, AF },
  285. { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  286. { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
  287. { NULL },
  288. };
  289. AVFILTER_DEFINE_CLASS(aiir);
  290. AVFilter ff_af_aiir = {
  291. .name = "aiir",
  292. .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
  293. .priv_size = sizeof(AudioIIRContext),
  294. .init = init,
  295. .uninit = uninit,
  296. .query_formats = query_formats,
  297. .inputs = inputs,
  298. .outputs = outputs,
  299. .priv_class = &aiir_class,
  300. };