You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

329 lines
9.6KB

  1. /*
  2. * Copyright (c) 2019 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/channel_layout.h"
  22. #include "libavutil/common.h"
  23. #include "libavutil/float_dsp.h"
  24. #include "libavutil/opt.h"
  25. #include "audio.h"
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "filters.h"
  29. #include "internal.h"
  30. enum OutModes {
  31. IN_MODE,
  32. DESIRED_MODE,
  33. OUT_MODE,
  34. NOISE_MODE,
  35. NB_OMODES
  36. };
  37. typedef struct AudioNLMSContext {
  38. const AVClass *class;
  39. int order;
  40. float mu;
  41. float eps;
  42. float leakage;
  43. int output_mode;
  44. int kernel_size;
  45. AVFrame *offset;
  46. AVFrame *delay;
  47. AVFrame *coeffs;
  48. AVFrame *tmp;
  49. AVFrame *frame[2];
  50. AVFloatDSPContext *fdsp;
  51. } AudioNLMSContext;
  52. #define OFFSET(x) offsetof(AudioNLMSContext, x)
  53. #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  54. static const AVOption anlms_options[] = {
  55. { "order", "set the filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=256}, 1, INT16_MAX, A },
  56. { "mu", "set the filter mu", OFFSET(mu), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, A },
  57. { "eps", "set the filter eps", OFFSET(eps), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, A },
  58. { "leakage", "set the filter leakage", OFFSET(leakage), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, A },
  59. { "out_mode", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_OMODES-1, A, "mode" },
  60. { "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, A, "mode" },
  61. { "d", "desired", 0, AV_OPT_TYPE_CONST, {.i64=DESIRED_MODE}, 0, 0, A, "mode" },
  62. { "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, A, "mode" },
  63. { "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE}, 0, 0, A, "mode" },
  64. { NULL }
  65. };
  66. AVFILTER_DEFINE_CLASS(anlms);
  67. static int query_formats(AVFilterContext *ctx)
  68. {
  69. AVFilterFormats *formats;
  70. AVFilterChannelLayouts *layouts;
  71. static const enum AVSampleFormat sample_fmts[] = {
  72. AV_SAMPLE_FMT_FLTP,
  73. AV_SAMPLE_FMT_NONE
  74. };
  75. int ret;
  76. layouts = ff_all_channel_counts();
  77. if (!layouts)
  78. return AVERROR(ENOMEM);
  79. ret = ff_set_common_channel_layouts(ctx, layouts);
  80. if (ret < 0)
  81. return ret;
  82. formats = ff_make_format_list(sample_fmts);
  83. if (!formats)
  84. return AVERROR(ENOMEM);
  85. ret = ff_set_common_formats(ctx, formats);
  86. if (ret < 0)
  87. return ret;
  88. formats = ff_all_samplerates();
  89. if (!formats)
  90. return AVERROR(ENOMEM);
  91. return ff_set_common_samplerates(ctx, formats);
  92. }
  93. static float fir_sample(AudioNLMSContext *s, float sample, float *delay,
  94. float *coeffs, float *tmp, int *offset)
  95. {
  96. const int order = s->order;
  97. float output;
  98. delay[*offset] = sample;
  99. memcpy(tmp, coeffs + order - *offset, order * sizeof(float));
  100. output = s->fdsp->scalarproduct_float(delay, tmp, s->kernel_size);
  101. if (--(*offset) < 0)
  102. *offset = order - 1;
  103. return output;
  104. }
  105. static float process_sample(AudioNLMSContext *s, float input, float desired,
  106. float *delay, float *coeffs, float *tmp, int *offsetp)
  107. {
  108. const int order = s->order;
  109. const float leakage = s->leakage;
  110. const float mu = s->mu;
  111. const float a = 1.f - leakage * mu;
  112. float sum, output, e, norm, b;
  113. int offset = *offsetp;
  114. delay[offset + order] = input;
  115. output = fir_sample(s, input, delay, coeffs, tmp, offsetp);
  116. e = desired - output;
  117. sum = s->fdsp->scalarproduct_float(delay, delay, s->kernel_size);
  118. norm = s->eps + sum;
  119. b = mu * e / norm;
  120. memcpy(tmp, delay + offset, order * sizeof(float));
  121. s->fdsp->vector_fmul_scalar(coeffs, coeffs, a, s->kernel_size);
  122. s->fdsp->vector_fmac_scalar(coeffs, tmp, b, s->kernel_size);
  123. memcpy(coeffs + order, coeffs, order * sizeof(float));
  124. switch (s->output_mode) {
  125. case IN_MODE: output = input; break;
  126. case DESIRED_MODE: output = desired; break;
  127. case OUT_MODE: /*output = output;*/ break;
  128. case NOISE_MODE: output = desired - output; break;
  129. }
  130. return output;
  131. }
  132. static int process_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  133. {
  134. AudioNLMSContext *s = ctx->priv;
  135. AVFrame *out = arg;
  136. const int start = (out->channels * jobnr) / nb_jobs;
  137. const int end = (out->channels * (jobnr+1)) / nb_jobs;
  138. for (int c = start; c < end; c++) {
  139. const float *input = (const float *)s->frame[0]->extended_data[c];
  140. const float *desired = (const float *)s->frame[1]->extended_data[c];
  141. float *delay = (float *)s->delay->extended_data[c];
  142. float *coeffs = (float *)s->coeffs->extended_data[c];
  143. float *tmp = (float *)s->tmp->extended_data[c];
  144. int *offset = (int *)s->offset->extended_data[c];
  145. float *output = (float *)out->extended_data[c];
  146. for (int n = 0; n < out->nb_samples; n++)
  147. output[n] = process_sample(s, input[n], desired[n], delay, coeffs, tmp, offset);
  148. }
  149. return 0;
  150. }
  151. static int activate(AVFilterContext *ctx)
  152. {
  153. AudioNLMSContext *s = ctx->priv;
  154. int i, ret, status;
  155. int nb_samples;
  156. int64_t pts;
  157. FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
  158. nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]),
  159. ff_inlink_queued_samples(ctx->inputs[1]));
  160. for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) {
  161. if (s->frame[i])
  162. continue;
  163. if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
  164. ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frame[i]);
  165. if (ret < 0)
  166. return ret;
  167. }
  168. }
  169. if (s->frame[0] && s->frame[1]) {
  170. AVFrame *out;
  171. out = ff_get_audio_buffer(ctx->outputs[0], s->frame[0]->nb_samples);
  172. if (!out) {
  173. av_frame_free(&s->frame[0]);
  174. av_frame_free(&s->frame[1]);
  175. return AVERROR(ENOMEM);
  176. }
  177. ctx->internal->execute(ctx, process_channels, out, NULL, FFMIN(ctx->outputs[0]->channels,
  178. ff_filter_get_nb_threads(ctx)));
  179. out->pts = s->frame[0]->pts;
  180. av_frame_free(&s->frame[0]);
  181. av_frame_free(&s->frame[1]);
  182. ret = ff_filter_frame(ctx->outputs[0], out);
  183. if (ret < 0)
  184. return ret;
  185. }
  186. if (!nb_samples) {
  187. for (i = 0; i < 2; i++) {
  188. if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
  189. ff_outlink_set_status(ctx->outputs[0], status, pts);
  190. return 0;
  191. }
  192. }
  193. }
  194. if (ff_outlink_frame_wanted(ctx->outputs[0])) {
  195. for (i = 0; i < 2; i++) {
  196. if (ff_inlink_queued_samples(ctx->inputs[i]) > 0)
  197. continue;
  198. ff_inlink_request_frame(ctx->inputs[i]);
  199. return 0;
  200. }
  201. }
  202. return 0;
  203. }
  204. static int config_output(AVFilterLink *outlink)
  205. {
  206. AVFilterContext *ctx = outlink->src;
  207. AudioNLMSContext *s = ctx->priv;
  208. s->kernel_size = FFALIGN(s->order, 16);
  209. if (!s->offset)
  210. s->offset = ff_get_audio_buffer(outlink, 1);
  211. if (!s->delay)
  212. s->delay = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
  213. if (!s->coeffs)
  214. s->coeffs = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
  215. if (!s->tmp)
  216. s->tmp = ff_get_audio_buffer(outlink, s->kernel_size);
  217. if (!s->delay || !s->coeffs || !s->offset || !s->tmp)
  218. return AVERROR(ENOMEM);
  219. return 0;
  220. }
  221. static av_cold int init(AVFilterContext *ctx)
  222. {
  223. AudioNLMSContext *s = ctx->priv;
  224. s->fdsp = avpriv_float_dsp_alloc(0);
  225. if (!s->fdsp)
  226. return AVERROR(ENOMEM);
  227. return 0;
  228. }
  229. static av_cold void uninit(AVFilterContext *ctx)
  230. {
  231. AudioNLMSContext *s = ctx->priv;
  232. av_freep(&s->fdsp);
  233. av_frame_free(&s->delay);
  234. av_frame_free(&s->coeffs);
  235. av_frame_free(&s->offset);
  236. av_frame_free(&s->tmp);
  237. }
  238. static const AVFilterPad inputs[] = {
  239. {
  240. .name = "input",
  241. .type = AVMEDIA_TYPE_AUDIO,
  242. },
  243. {
  244. .name = "desired",
  245. .type = AVMEDIA_TYPE_AUDIO,
  246. },
  247. { NULL }
  248. };
  249. static const AVFilterPad outputs[] = {
  250. {
  251. .name = "default",
  252. .type = AVMEDIA_TYPE_AUDIO,
  253. .config_props = config_output,
  254. },
  255. { NULL }
  256. };
  257. AVFilter ff_af_anlms = {
  258. .name = "anlms",
  259. .description = NULL_IF_CONFIG_SMALL("Apply Normalized Least-Mean-Squares algorithm to first audio stream."),
  260. .priv_size = sizeof(AudioNLMSContext),
  261. .priv_class = &anlms_class,
  262. .init = init,
  263. .uninit = uninit,
  264. .activate = activate,
  265. .query_formats = query_formats,
  266. .inputs = inputs,
  267. .outputs = outputs,
  268. .flags = AVFILTER_FLAG_SLICE_THREADS,
  269. };