You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

339 lines
11KB

  1. /*
  2. * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sidechain compressor filter
  24. */
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/opt.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. typedef struct SidechainCompressContext {
  34. const AVClass *class;
  35. double attack, attack_coeff;
  36. double release, release_coeff;
  37. double lin_slope;
  38. double ratio;
  39. double threshold;
  40. double makeup;
  41. double thres;
  42. double knee;
  43. double knee_start;
  44. double knee_stop;
  45. double lin_knee_start;
  46. double compressed_knee_stop;
  47. int link;
  48. int detection;
  49. AVFrame *input_frame[2];
  50. } SidechainCompressContext;
  51. #define OFFSET(x) offsetof(SidechainCompressContext, x)
  52. #define A AV_OPT_FLAG_AUDIO_PARAM
  53. #define F AV_OPT_FLAG_FILTERING_PARAM
  54. static const AVOption sidechaincompress_options[] = {
  55. { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F },
  56. { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F },
  57. { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F },
  58. { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F },
  59. { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 64, A|F },
  60. { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F },
  61. { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" },
  62. { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" },
  63. { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" },
  64. { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" },
  65. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" },
  66. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" },
  67. { NULL }
  68. };
  69. AVFILTER_DEFINE_CLASS(sidechaincompress);
  70. static av_cold int init(AVFilterContext *ctx)
  71. {
  72. SidechainCompressContext *s = ctx->priv;
  73. s->thres = log(s->threshold);
  74. s->lin_knee_start = s->threshold / sqrt(s->knee);
  75. s->knee_start = log(s->lin_knee_start);
  76. s->knee_stop = log(s->threshold * sqrt(s->knee));
  77. s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
  78. return 0;
  79. }
  80. static inline float hermite_interpolation(float x, float x0, float x1,
  81. float p0, float p1,
  82. float m0, float m1)
  83. {
  84. float width = x1 - x0;
  85. float t = (x - x0) / width;
  86. float t2, t3;
  87. float ct0, ct1, ct2, ct3;
  88. m0 *= width;
  89. m1 *= width;
  90. t2 = t*t;
  91. t3 = t2*t;
  92. ct0 = p0;
  93. ct1 = m0;
  94. ct2 = -3 * p0 - 2 * m0 + 3 * p1 - m1;
  95. ct3 = 2 * p0 + m0 - 2 * p1 + m1;
  96. return ct3 * t3 + ct2 * t2 + ct1 * t + ct0;
  97. }
  98. // A fake infinity value (because real infinity may break some hosts)
  99. #define FAKE_INFINITY (65536.0 * 65536.0)
  100. // Check for infinity (with appropriate-ish tolerance)
  101. #define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
  102. static double output_gain(double lin_slope, double ratio, double thres,
  103. double knee, double knee_start, double knee_stop,
  104. double compressed_knee_stop, int detection)
  105. {
  106. double slope = log(lin_slope);
  107. double gain = 0.0;
  108. double delta = 0.0;
  109. if (detection)
  110. slope *= 0.5;
  111. if (IS_FAKE_INFINITY(ratio)) {
  112. gain = thres;
  113. delta = 0.0;
  114. } else {
  115. gain = (slope - thres) / ratio + thres;
  116. delta = 1.0 / ratio;
  117. }
  118. if (knee > 1.0 && slope < knee_stop)
  119. gain = hermite_interpolation(slope, knee_start, knee_stop,
  120. knee_start, compressed_knee_stop,
  121. 1.0, delta);
  122. return exp(gain - slope);
  123. }
  124. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  125. {
  126. AVFilterContext *ctx = link->dst;
  127. SidechainCompressContext *s = ctx->priv;
  128. AVFilterLink *sclink = ctx->inputs[1];
  129. AVFilterLink *outlink = ctx->outputs[0];
  130. const double makeup = s->makeup;
  131. const double *scsrc;
  132. double *sample;
  133. int nb_samples;
  134. int ret, i, c;
  135. for (i = 0; i < 2; i++)
  136. if (link == ctx->inputs[i])
  137. break;
  138. av_assert0(!s->input_frame[i]);
  139. s->input_frame[i] = frame;
  140. if (!s->input_frame[0] || !s->input_frame[1])
  141. return 0;
  142. nb_samples = FFMIN(s->input_frame[0]->nb_samples,
  143. s->input_frame[1]->nb_samples);
  144. sample = (double *)s->input_frame[0]->data[0];
  145. scsrc = (const double *)s->input_frame[1]->data[0];
  146. for (i = 0; i < nb_samples; i++) {
  147. double abs_sample, gain = 1.0;
  148. abs_sample = FFABS(scsrc[0]);
  149. if (s->link == 1) {
  150. for (c = 1; c < sclink->channels; c++)
  151. abs_sample = FFMAX(FFABS(scsrc[c]), abs_sample);
  152. } else {
  153. for (c = 1; c < sclink->channels; c++)
  154. abs_sample += FFABS(scsrc[c]);
  155. abs_sample /= sclink->channels;
  156. }
  157. if (s->detection)
  158. abs_sample *= abs_sample;
  159. s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
  160. if (s->lin_slope > 0.0 && s->lin_slope > s->lin_knee_start)
  161. gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
  162. s->knee_start, s->knee_stop,
  163. s->compressed_knee_stop, s->detection);
  164. for (c = 0; c < outlink->channels; c++)
  165. sample[c] *= gain * makeup;
  166. sample += outlink->channels;
  167. scsrc += sclink->channels;
  168. }
  169. ret = ff_filter_frame(outlink, s->input_frame[0]);
  170. s->input_frame[0] = NULL;
  171. av_frame_free(&s->input_frame[1]);
  172. return ret;
  173. }
  174. static int request_frame(AVFilterLink *outlink)
  175. {
  176. AVFilterContext *ctx = outlink->src;
  177. SidechainCompressContext *s = ctx->priv;
  178. int i, ret;
  179. /* get a frame on each input */
  180. for (i = 0; i < 2; i++) {
  181. AVFilterLink *inlink = ctx->inputs[i];
  182. if (!s->input_frame[i] &&
  183. (ret = ff_request_frame(inlink)) < 0)
  184. return ret;
  185. /* request the same number of samples on all inputs */
  186. if (i == 0)
  187. ctx->inputs[1]->request_samples = s->input_frame[0]->nb_samples;
  188. }
  189. return 0;
  190. }
  191. static int query_formats(AVFilterContext *ctx)
  192. {
  193. AVFilterFormats *formats;
  194. AVFilterChannelLayouts *layouts = NULL;
  195. static const enum AVSampleFormat sample_fmts[] = {
  196. AV_SAMPLE_FMT_DBL,
  197. AV_SAMPLE_FMT_NONE
  198. };
  199. int ret, i;
  200. if (!ctx->inputs[0]->in_channel_layouts ||
  201. !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
  202. av_log(ctx, AV_LOG_WARNING,
  203. "No channel layout for input 1\n");
  204. return AVERROR(EAGAIN);
  205. }
  206. ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0]);
  207. if (!layouts)
  208. return AVERROR(ENOMEM);
  209. ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
  210. for (i = 0; i < 2; i++) {
  211. layouts = ff_all_channel_layouts();
  212. if (!layouts)
  213. return AVERROR(ENOMEM);
  214. ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
  215. }
  216. formats = ff_make_format_list(sample_fmts);
  217. if (!formats)
  218. return AVERROR(ENOMEM);
  219. ret = ff_set_common_formats(ctx, formats);
  220. if (ret < 0)
  221. return ret;
  222. formats = ff_all_samplerates();
  223. if (!formats)
  224. return AVERROR(ENOMEM);
  225. return ff_set_common_samplerates(ctx, formats);
  226. }
  227. static int config_output(AVFilterLink *outlink)
  228. {
  229. AVFilterContext *ctx = outlink->src;
  230. SidechainCompressContext *s = ctx->priv;
  231. if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
  232. av_log(ctx, AV_LOG_ERROR,
  233. "Inputs must have the same sample rate "
  234. "%d for in0 vs %d for in1\n",
  235. ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
  236. return AVERROR(EINVAL);
  237. }
  238. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  239. outlink->time_base = ctx->inputs[0]->time_base;
  240. outlink->channel_layout = ctx->inputs[0]->channel_layout;
  241. outlink->channels = ctx->inputs[0]->channels;
  242. s->attack_coeff = FFMIN(1.f, 1.f / (s->attack * outlink->sample_rate / 4000.f));
  243. s->release_coeff = FFMIN(1.f, 1.f / (s->release * outlink->sample_rate / 4000.f));
  244. return 0;
  245. }
  246. static const AVFilterPad sidechaincompress_inputs[] = {
  247. {
  248. .name = "main",
  249. .type = AVMEDIA_TYPE_AUDIO,
  250. .filter_frame = filter_frame,
  251. .needs_writable = 1,
  252. .needs_fifo = 1,
  253. },{
  254. .name = "sidechain",
  255. .type = AVMEDIA_TYPE_AUDIO,
  256. .filter_frame = filter_frame,
  257. .needs_fifo = 1,
  258. },
  259. { NULL }
  260. };
  261. static const AVFilterPad sidechaincompress_outputs[] = {
  262. {
  263. .name = "default",
  264. .type = AVMEDIA_TYPE_AUDIO,
  265. .config_props = config_output,
  266. .request_frame = request_frame,
  267. },
  268. { NULL }
  269. };
  270. AVFilter ff_af_sidechaincompress = {
  271. .name = "sidechaincompress",
  272. .description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
  273. .priv_size = sizeof(SidechainCompressContext),
  274. .priv_class = &sidechaincompress_class,
  275. .init = init,
  276. .query_formats = query_formats,
  277. .inputs = sidechaincompress_inputs,
  278. .outputs = sidechaincompress_outputs,
  279. };