You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

259 lines
7.5KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "libavutil/channel_layout.h"
  19. #include "libavutil/ffmath.h"
  20. #include "libavutil/opt.h"
  21. #include "avfilter.h"
  22. #include "audio.h"
  23. #include "formats.h"
  24. typedef struct ASubBoostContext {
  25. const AVClass *class;
  26. double dry_gain;
  27. double wet_gain;
  28. double feedback;
  29. double decay;
  30. double delay;
  31. double cutoff;
  32. double slope;
  33. double a0, a1, a2;
  34. double b0, b1, b2;
  35. int *write_pos;
  36. int buffer_samples;
  37. AVFrame *w;
  38. AVFrame *buffer;
  39. } ASubBoostContext;
  40. static int query_formats(AVFilterContext *ctx)
  41. {
  42. AVFilterFormats *formats = NULL;
  43. AVFilterChannelLayouts *layouts = NULL;
  44. static const enum AVSampleFormat sample_fmts[] = {
  45. AV_SAMPLE_FMT_DBLP,
  46. AV_SAMPLE_FMT_NONE
  47. };
  48. int ret;
  49. formats = ff_make_format_list(sample_fmts);
  50. if (!formats)
  51. return AVERROR(ENOMEM);
  52. ret = ff_set_common_formats(ctx, formats);
  53. if (ret < 0)
  54. return ret;
  55. layouts = ff_all_channel_counts();
  56. if (!layouts)
  57. return AVERROR(ENOMEM);
  58. ret = ff_set_common_channel_layouts(ctx, layouts);
  59. if (ret < 0)
  60. return ret;
  61. formats = ff_all_samplerates();
  62. return ff_set_common_samplerates(ctx, formats);
  63. }
  64. static int get_coeffs(AVFilterContext *ctx)
  65. {
  66. ASubBoostContext *s = ctx->priv;
  67. AVFilterLink *inlink = ctx->inputs[0];
  68. double w0 = 2 * M_PI * s->cutoff / inlink->sample_rate;
  69. double alpha = sin(w0) / 2 * sqrt(2. * (1. / s->slope - 1.) + 2.);
  70. s->a0 = 1 + alpha;
  71. s->a1 = -2 * cos(w0);
  72. s->a2 = 1 - alpha;
  73. s->b0 = (1 - cos(w0)) / 2;
  74. s->b1 = 1 - cos(w0);
  75. s->b2 = (1 - cos(w0)) / 2;
  76. s->a1 /= s->a0;
  77. s->a2 /= s->a0;
  78. s->b0 /= s->a0;
  79. s->b1 /= s->a0;
  80. s->b2 /= s->a0;
  81. s->buffer_samples = inlink->sample_rate * s->delay / 1000;
  82. return 0;
  83. }
  84. static int config_input(AVFilterLink *inlink)
  85. {
  86. AVFilterContext *ctx = inlink->dst;
  87. ASubBoostContext *s = ctx->priv;
  88. s->buffer = ff_get_audio_buffer(inlink, inlink->sample_rate / 10);
  89. s->w = ff_get_audio_buffer(inlink, 2);
  90. s->write_pos = av_calloc(inlink->channels, sizeof(*s->write_pos));
  91. if (!s->buffer || !s->w || !s->write_pos)
  92. return AVERROR(ENOMEM);
  93. return get_coeffs(ctx);
  94. }
  95. typedef struct ThreadData {
  96. AVFrame *in, *out;
  97. } ThreadData;
  98. static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  99. {
  100. ASubBoostContext *s = ctx->priv;
  101. ThreadData *td = arg;
  102. AVFrame *out = td->out;
  103. AVFrame *in = td->in;
  104. const double wet = ctx->is_disabled ? 0. : s->wet_gain;
  105. const double dry = ctx->is_disabled ? 1. : s->dry_gain;
  106. const double feedback = s->feedback, decay = s->decay;
  107. const double b0 = s->b0;
  108. const double b1 = s->b1;
  109. const double b2 = s->b2;
  110. const double a1 = -s->a1;
  111. const double a2 = -s->a2;
  112. const int start = (in->channels * jobnr) / nb_jobs;
  113. const int end = (in->channels * (jobnr+1)) / nb_jobs;
  114. const int buffer_samples = s->buffer_samples;
  115. for (int ch = start; ch < end; ch++) {
  116. const double *src = (const double *)in->extended_data[ch];
  117. double *dst = (double *)out->extended_data[ch];
  118. double *buffer = (double *)s->buffer->extended_data[ch];
  119. double *w = (double *)s->w->extended_data[ch];
  120. int write_pos = s->write_pos[ch];
  121. for (int n = 0; n < in->nb_samples; n++) {
  122. double out_sample;
  123. out_sample = src[n] * b0 + w[0];
  124. w[0] = b1 * src[n] + w[1] + a1 * out_sample;
  125. w[1] = b2 * src[n] + a2 * out_sample;
  126. buffer[write_pos] = buffer[write_pos] * decay + out_sample * feedback;
  127. dst[n] = src[n] * dry + buffer[write_pos] * wet;
  128. if (++write_pos >= buffer_samples)
  129. write_pos = 0;
  130. }
  131. s->write_pos[ch] = write_pos;
  132. }
  133. return 0;
  134. }
  135. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  136. {
  137. AVFilterContext *ctx = inlink->dst;
  138. AVFilterLink *outlink = ctx->outputs[0];
  139. ThreadData td;
  140. AVFrame *out;
  141. if (av_frame_is_writable(in)) {
  142. out = in;
  143. } else {
  144. out = ff_get_audio_buffer(outlink, in->nb_samples);
  145. if (!out) {
  146. av_frame_free(&in);
  147. return AVERROR(ENOMEM);
  148. }
  149. av_frame_copy_props(out, in);
  150. }
  151. td.in = in; td.out = out;
  152. ctx->internal->execute(ctx, filter_channels, &td, NULL, FFMIN(inlink->channels,
  153. ff_filter_get_nb_threads(ctx)));
  154. if (out != in)
  155. av_frame_free(&in);
  156. return ff_filter_frame(outlink, out);
  157. }
  158. static av_cold void uninit(AVFilterContext *ctx)
  159. {
  160. ASubBoostContext *s = ctx->priv;
  161. av_frame_free(&s->buffer);
  162. av_frame_free(&s->w);
  163. av_freep(&s->write_pos);
  164. }
  165. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  166. char *res, int res_len, int flags)
  167. {
  168. int ret;
  169. ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
  170. if (ret < 0)
  171. return ret;
  172. return get_coeffs(ctx);
  173. }
  174. #define OFFSET(x) offsetof(ASubBoostContext, x)
  175. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  176. static const AVOption asubboost_options[] = {
  177. { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, FLAGS },
  178. { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0.8}, 0, 1, FLAGS },
  179. { "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=0.7}, 0, 1, FLAGS },
  180. { "feedback", "set feedback", OFFSET(feedback), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, FLAGS },
  181. { "cutoff", "set cutoff", OFFSET(cutoff), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 50, 900, FLAGS },
  182. { "slope", "set slope", OFFSET(slope), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.0001, 1, FLAGS },
  183. { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 100, FLAGS },
  184. { NULL }
  185. };
  186. AVFILTER_DEFINE_CLASS(asubboost);
  187. static const AVFilterPad inputs[] = {
  188. {
  189. .name = "default",
  190. .type = AVMEDIA_TYPE_AUDIO,
  191. .filter_frame = filter_frame,
  192. .config_props = config_input,
  193. },
  194. { NULL }
  195. };
  196. static const AVFilterPad outputs[] = {
  197. {
  198. .name = "default",
  199. .type = AVMEDIA_TYPE_AUDIO,
  200. },
  201. { NULL }
  202. };
  203. AVFilter ff_af_asubboost = {
  204. .name = "asubboost",
  205. .description = NULL_IF_CONFIG_SMALL("Boost subwoofer frequencies."),
  206. .query_formats = query_formats,
  207. .priv_size = sizeof(ASubBoostContext),
  208. .priv_class = &asubboost_class,
  209. .uninit = uninit,
  210. .inputs = inputs,
  211. .outputs = outputs,
  212. .process_command = process_command,
  213. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  214. AVFILTER_FLAG_SLICE_THREADS,
  215. };