You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

427 lines
14KB

  1. /*
  2. * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Audio (Sidechain) Compressor filter
  24. */
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/opt.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "hermite.h"
  33. #include "internal.h"
  34. typedef struct SidechainCompressContext {
  35. const AVClass *class;
  36. double level_in;
  37. double level_sc;
  38. double attack, attack_coeff;
  39. double release, release_coeff;
  40. double lin_slope;
  41. double ratio;
  42. double threshold;
  43. double makeup;
  44. double mix;
  45. double thres;
  46. double knee;
  47. double knee_start;
  48. double knee_stop;
  49. double lin_knee_start;
  50. double adj_knee_start;
  51. double compressed_knee_stop;
  52. int link;
  53. int detection;
  54. AVFrame *input_frame[2];
  55. } SidechainCompressContext;
  56. #define OFFSET(x) offsetof(SidechainCompressContext, x)
  57. #define A AV_OPT_FLAG_AUDIO_PARAM
  58. #define F AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption options[] = {
  60. { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  61. { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F },
  62. { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F },
  63. { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F },
  64. { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F },
  65. { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 64, A|F },
  66. { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F },
  67. { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" },
  68. { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" },
  69. { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" },
  70. { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" },
  71. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" },
  72. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" },
  73. { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  74. { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F },
  75. { NULL }
  76. };
  77. #define sidechaincompress_options options
  78. AVFILTER_DEFINE_CLASS(sidechaincompress);
  79. // A fake infinity value (because real infinity may break some hosts)
  80. #define FAKE_INFINITY (65536.0 * 65536.0)
  81. // Check for infinity (with appropriate-ish tolerance)
  82. #define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
  83. static double output_gain(double lin_slope, double ratio, double thres,
  84. double knee, double knee_start, double knee_stop,
  85. double compressed_knee_stop, int detection)
  86. {
  87. double slope = log(lin_slope);
  88. double gain = 0.0;
  89. double delta = 0.0;
  90. if (detection)
  91. slope *= 0.5;
  92. if (IS_FAKE_INFINITY(ratio)) {
  93. gain = thres;
  94. delta = 0.0;
  95. } else {
  96. gain = (slope - thres) / ratio + thres;
  97. delta = 1.0 / ratio;
  98. }
  99. if (knee > 1.0 && slope < knee_stop)
  100. gain = hermite_interpolation(slope, knee_start, knee_stop,
  101. knee_start, compressed_knee_stop,
  102. 1.0, delta);
  103. return exp(gain - slope);
  104. }
  105. static int compressor_config_output(AVFilterLink *outlink)
  106. {
  107. AVFilterContext *ctx = outlink->src;
  108. SidechainCompressContext *s = ctx->priv;
  109. s->thres = log(s->threshold);
  110. s->lin_knee_start = s->threshold / sqrt(s->knee);
  111. s->adj_knee_start = s->lin_knee_start * s->lin_knee_start;
  112. s->knee_start = log(s->lin_knee_start);
  113. s->knee_stop = log(s->threshold * sqrt(s->knee));
  114. s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
  115. s->attack_coeff = FFMIN(1., 1. / (s->attack * outlink->sample_rate / 4000.));
  116. s->release_coeff = FFMIN(1., 1. / (s->release * outlink->sample_rate / 4000.));
  117. return 0;
  118. }
  119. static void compressor(SidechainCompressContext *s,
  120. const double *src, double *dst, const double *scsrc, int nb_samples,
  121. double level_in, double level_sc,
  122. AVFilterLink *inlink, AVFilterLink *sclink)
  123. {
  124. const double makeup = s->makeup;
  125. const double mix = s->mix;
  126. int i, c;
  127. for (i = 0; i < nb_samples; i++) {
  128. double abs_sample, gain = 1.0;
  129. abs_sample = fabs(scsrc[0] * level_sc);
  130. if (s->link == 1) {
  131. for (c = 1; c < sclink->channels; c++)
  132. abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
  133. } else {
  134. for (c = 1; c < sclink->channels; c++)
  135. abs_sample += fabs(scsrc[c] * level_sc);
  136. abs_sample /= sclink->channels;
  137. }
  138. if (s->detection)
  139. abs_sample *= abs_sample;
  140. s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
  141. if (s->lin_slope > 0.0 && s->lin_slope > (s->detection ? s->adj_knee_start : s->lin_knee_start))
  142. gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
  143. s->knee_start, s->knee_stop,
  144. s->compressed_knee_stop, s->detection);
  145. for (c = 0; c < inlink->channels; c++)
  146. dst[c] = src[c] * level_in * (gain * makeup * mix + (1. - mix));
  147. src += inlink->channels;
  148. dst += inlink->channels;
  149. scsrc += sclink->channels;
  150. }
  151. }
  152. #if CONFIG_SIDECHAINCOMPRESS_FILTER
  153. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  154. {
  155. AVFilterContext *ctx = link->dst;
  156. SidechainCompressContext *s = ctx->priv;
  157. AVFilterLink *outlink = ctx->outputs[0];
  158. const double *scsrc;
  159. double *sample;
  160. int nb_samples;
  161. int ret, i;
  162. for (i = 0; i < 2; i++)
  163. if (link == ctx->inputs[i])
  164. break;
  165. av_assert0(i < 2 && !s->input_frame[i]);
  166. s->input_frame[i] = frame;
  167. if (!s->input_frame[0] || !s->input_frame[1])
  168. return 0;
  169. nb_samples = FFMIN(s->input_frame[0]->nb_samples,
  170. s->input_frame[1]->nb_samples);
  171. sample = (double *)s->input_frame[0]->data[0];
  172. scsrc = (const double *)s->input_frame[1]->data[0];
  173. compressor(s, sample, sample, scsrc, nb_samples,
  174. s->level_in, s->level_sc,
  175. ctx->inputs[0], ctx->inputs[1]);
  176. ret = ff_filter_frame(outlink, s->input_frame[0]);
  177. s->input_frame[0] = NULL;
  178. av_frame_free(&s->input_frame[1]);
  179. return ret;
  180. }
  181. static int request_frame(AVFilterLink *outlink)
  182. {
  183. AVFilterContext *ctx = outlink->src;
  184. SidechainCompressContext *s = ctx->priv;
  185. int i, ret;
  186. /* get a frame on each input */
  187. for (i = 0; i < 2; i++) {
  188. AVFilterLink *inlink = ctx->inputs[i];
  189. if (!s->input_frame[i] &&
  190. (ret = ff_request_frame(inlink)) < 0)
  191. return ret;
  192. /* request the same number of samples on all inputs */
  193. if (i == 0)
  194. ctx->inputs[1]->request_samples = s->input_frame[0]->nb_samples;
  195. }
  196. return 0;
  197. }
  198. static int query_formats(AVFilterContext *ctx)
  199. {
  200. AVFilterFormats *formats;
  201. AVFilterChannelLayouts *layouts = NULL;
  202. static const enum AVSampleFormat sample_fmts[] = {
  203. AV_SAMPLE_FMT_DBL,
  204. AV_SAMPLE_FMT_NONE
  205. };
  206. int ret, i;
  207. if (!ctx->inputs[0]->in_channel_layouts ||
  208. !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
  209. av_log(ctx, AV_LOG_WARNING,
  210. "No channel layout for input 1\n");
  211. return AVERROR(EAGAIN);
  212. }
  213. if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
  214. (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
  215. return ret;
  216. for (i = 0; i < 2; i++) {
  217. layouts = ff_all_channel_counts();
  218. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
  219. return ret;
  220. }
  221. formats = ff_make_format_list(sample_fmts);
  222. if ((ret = ff_set_common_formats(ctx, formats)) < 0)
  223. return ret;
  224. formats = ff_all_samplerates();
  225. return ff_set_common_samplerates(ctx, formats);
  226. }
  227. static int config_output(AVFilterLink *outlink)
  228. {
  229. AVFilterContext *ctx = outlink->src;
  230. if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
  231. av_log(ctx, AV_LOG_ERROR,
  232. "Inputs must have the same sample rate "
  233. "%d for in0 vs %d for in1\n",
  234. ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
  235. return AVERROR(EINVAL);
  236. }
  237. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  238. outlink->time_base = ctx->inputs[0]->time_base;
  239. outlink->channel_layout = ctx->inputs[0]->channel_layout;
  240. outlink->channels = ctx->inputs[0]->channels;
  241. compressor_config_output(outlink);
  242. return 0;
  243. }
  244. static const AVFilterPad sidechaincompress_inputs[] = {
  245. {
  246. .name = "main",
  247. .type = AVMEDIA_TYPE_AUDIO,
  248. .filter_frame = filter_frame,
  249. .needs_writable = 1,
  250. .needs_fifo = 1,
  251. },{
  252. .name = "sidechain",
  253. .type = AVMEDIA_TYPE_AUDIO,
  254. .filter_frame = filter_frame,
  255. .needs_fifo = 1,
  256. },
  257. { NULL }
  258. };
  259. static const AVFilterPad sidechaincompress_outputs[] = {
  260. {
  261. .name = "default",
  262. .type = AVMEDIA_TYPE_AUDIO,
  263. .config_props = config_output,
  264. .request_frame = request_frame,
  265. },
  266. { NULL }
  267. };
  268. AVFilter ff_af_sidechaincompress = {
  269. .name = "sidechaincompress",
  270. .description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
  271. .priv_size = sizeof(SidechainCompressContext),
  272. .priv_class = &sidechaincompress_class,
  273. .query_formats = query_formats,
  274. .inputs = sidechaincompress_inputs,
  275. .outputs = sidechaincompress_outputs,
  276. };
  277. #endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */
  278. #if CONFIG_ACOMPRESSOR_FILTER
  279. static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in)
  280. {
  281. const double *src = (const double *)in->data[0];
  282. AVFilterContext *ctx = inlink->dst;
  283. SidechainCompressContext *s = ctx->priv;
  284. AVFilterLink *outlink = ctx->outputs[0];
  285. AVFrame *out;
  286. double *dst;
  287. if (av_frame_is_writable(in)) {
  288. out = in;
  289. } else {
  290. out = ff_get_audio_buffer(inlink, in->nb_samples);
  291. if (!out) {
  292. av_frame_free(&in);
  293. return AVERROR(ENOMEM);
  294. }
  295. av_frame_copy_props(out, in);
  296. }
  297. dst = (double *)out->data[0];
  298. compressor(s, src, dst, src, in->nb_samples,
  299. s->level_in, s->level_in,
  300. inlink, inlink);
  301. if (out != in)
  302. av_frame_free(&in);
  303. return ff_filter_frame(outlink, out);
  304. }
  305. static int acompressor_query_formats(AVFilterContext *ctx)
  306. {
  307. AVFilterFormats *formats;
  308. AVFilterChannelLayouts *layouts;
  309. static const enum AVSampleFormat sample_fmts[] = {
  310. AV_SAMPLE_FMT_DBL,
  311. AV_SAMPLE_FMT_NONE
  312. };
  313. int ret;
  314. layouts = ff_all_channel_counts();
  315. if (!layouts)
  316. return AVERROR(ENOMEM);
  317. ret = ff_set_common_channel_layouts(ctx, layouts);
  318. if (ret < 0)
  319. return ret;
  320. formats = ff_make_format_list(sample_fmts);
  321. if (!formats)
  322. return AVERROR(ENOMEM);
  323. ret = ff_set_common_formats(ctx, formats);
  324. if (ret < 0)
  325. return ret;
  326. formats = ff_all_samplerates();
  327. if (!formats)
  328. return AVERROR(ENOMEM);
  329. return ff_set_common_samplerates(ctx, formats);
  330. }
  331. #define acompressor_options options
  332. AVFILTER_DEFINE_CLASS(acompressor);
  333. static const AVFilterPad acompressor_inputs[] = {
  334. {
  335. .name = "default",
  336. .type = AVMEDIA_TYPE_AUDIO,
  337. .filter_frame = acompressor_filter_frame,
  338. },
  339. { NULL }
  340. };
  341. static const AVFilterPad acompressor_outputs[] = {
  342. {
  343. .name = "default",
  344. .type = AVMEDIA_TYPE_AUDIO,
  345. .config_props = compressor_config_output,
  346. },
  347. { NULL }
  348. };
  349. AVFilter ff_af_acompressor = {
  350. .name = "acompressor",
  351. .description = NULL_IF_CONFIG_SMALL("Audio compressor."),
  352. .priv_size = sizeof(SidechainCompressContext),
  353. .priv_class = &acompressor_class,
  354. .query_formats = acompressor_query_formats,
  355. .inputs = acompressor_inputs,
  356. .outputs = acompressor_outputs,
  357. };
  358. #endif /* CONFIG_ACOMPRESSOR_FILTER */