You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

436 lines
14KB

  1. /*
  2. * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Audio (Sidechain) Compressor filter
  24. */
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/opt.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "hermite.h"
  33. #include "internal.h"
  34. typedef struct SidechainCompressContext {
  35. const AVClass *class;
  36. double level_in;
  37. double level_sc;
  38. double attack, attack_coeff;
  39. double release, release_coeff;
  40. double lin_slope;
  41. double ratio;
  42. double threshold;
  43. double makeup;
  44. double mix;
  45. double thres;
  46. double knee;
  47. double knee_start;
  48. double knee_stop;
  49. double lin_knee_start;
  50. double adj_knee_start;
  51. double compressed_knee_stop;
  52. int link;
  53. int detection;
  54. AVFrame *input_frame[2];
  55. } SidechainCompressContext;
  56. #define OFFSET(x) offsetof(SidechainCompressContext, x)
  57. #define A AV_OPT_FLAG_AUDIO_PARAM
  58. #define F AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption options[] = {
  60. { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  61. { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F },
  62. { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F },
  63. { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F },
  64. { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F },
  65. { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 64, A|F },
  66. { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F },
  67. { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" },
  68. { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" },
  69. { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" },
  70. { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" },
  71. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" },
  72. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" },
  73. { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  74. { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F },
  75. { NULL }
  76. };
  77. #define sidechaincompress_options options
  78. AVFILTER_DEFINE_CLASS(sidechaincompress);
  79. static av_cold int init(AVFilterContext *ctx)
  80. {
  81. SidechainCompressContext *s = ctx->priv;
  82. s->thres = log(s->threshold);
  83. s->lin_knee_start = s->threshold / sqrt(s->knee);
  84. s->adj_knee_start = s->lin_knee_start * s->lin_knee_start;
  85. s->knee_start = log(s->lin_knee_start);
  86. s->knee_stop = log(s->threshold * sqrt(s->knee));
  87. s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
  88. return 0;
  89. }
  90. // A fake infinity value (because real infinity may break some hosts)
  91. #define FAKE_INFINITY (65536.0 * 65536.0)
  92. // Check for infinity (with appropriate-ish tolerance)
  93. #define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
  94. static double output_gain(double lin_slope, double ratio, double thres,
  95. double knee, double knee_start, double knee_stop,
  96. double compressed_knee_stop, int detection)
  97. {
  98. double slope = log(lin_slope);
  99. double gain = 0.0;
  100. double delta = 0.0;
  101. if (detection)
  102. slope *= 0.5;
  103. if (IS_FAKE_INFINITY(ratio)) {
  104. gain = thres;
  105. delta = 0.0;
  106. } else {
  107. gain = (slope - thres) / ratio + thres;
  108. delta = 1.0 / ratio;
  109. }
  110. if (knee > 1.0 && slope < knee_stop)
  111. gain = hermite_interpolation(slope, knee_start, knee_stop,
  112. knee_start, compressed_knee_stop,
  113. 1.0, delta);
  114. return exp(gain - slope);
  115. }
  116. static int compressor_config_output(AVFilterLink *outlink)
  117. {
  118. AVFilterContext *ctx = outlink->src;
  119. SidechainCompressContext *s = ctx->priv;
  120. s->attack_coeff = FFMIN(1., 1. / (s->attack * outlink->sample_rate / 4000.));
  121. s->release_coeff = FFMIN(1., 1. / (s->release * outlink->sample_rate / 4000.));
  122. return 0;
  123. }
  124. static void compressor(SidechainCompressContext *s,
  125. const double *src, double *dst, const double *scsrc, int nb_samples,
  126. double level_in, double level_sc,
  127. AVFilterLink *inlink, AVFilterLink *sclink)
  128. {
  129. const double makeup = s->makeup;
  130. const double mix = s->mix;
  131. int i, c;
  132. for (i = 0; i < nb_samples; i++) {
  133. double abs_sample, gain = 1.0;
  134. abs_sample = fabs(scsrc[0] * level_sc);
  135. if (s->link == 1) {
  136. for (c = 1; c < sclink->channels; c++)
  137. abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
  138. } else {
  139. for (c = 1; c < sclink->channels; c++)
  140. abs_sample += fabs(scsrc[c] * level_sc);
  141. abs_sample /= sclink->channels;
  142. }
  143. if (s->detection)
  144. abs_sample *= abs_sample;
  145. s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
  146. if (s->lin_slope > 0.0 && s->lin_slope > (s->detection ? s->adj_knee_start : s->lin_knee_start))
  147. gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
  148. s->knee_start, s->knee_stop,
  149. s->compressed_knee_stop, s->detection);
  150. for (c = 0; c < inlink->channels; c++)
  151. dst[c] = src[c] * level_in * (gain * makeup * mix + (1. - mix));
  152. src += inlink->channels;
  153. dst += inlink->channels;
  154. scsrc += sclink->channels;
  155. }
  156. }
  157. #if CONFIG_SIDECHAINCOMPRESS_FILTER
  158. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  159. {
  160. AVFilterContext *ctx = link->dst;
  161. SidechainCompressContext *s = ctx->priv;
  162. AVFilterLink *outlink = ctx->outputs[0];
  163. const double *scsrc;
  164. double *sample;
  165. int nb_samples;
  166. int ret, i;
  167. for (i = 0; i < 2; i++)
  168. if (link == ctx->inputs[i])
  169. break;
  170. av_assert0(i < 2 && !s->input_frame[i]);
  171. s->input_frame[i] = frame;
  172. if (!s->input_frame[0] || !s->input_frame[1])
  173. return 0;
  174. nb_samples = FFMIN(s->input_frame[0]->nb_samples,
  175. s->input_frame[1]->nb_samples);
  176. sample = (double *)s->input_frame[0]->data[0];
  177. scsrc = (const double *)s->input_frame[1]->data[0];
  178. compressor(s, sample, sample, scsrc, nb_samples,
  179. s->level_in, s->level_sc,
  180. ctx->inputs[0], ctx->inputs[1]);
  181. ret = ff_filter_frame(outlink, s->input_frame[0]);
  182. s->input_frame[0] = NULL;
  183. av_frame_free(&s->input_frame[1]);
  184. return ret;
  185. }
  186. static int request_frame(AVFilterLink *outlink)
  187. {
  188. AVFilterContext *ctx = outlink->src;
  189. SidechainCompressContext *s = ctx->priv;
  190. int i, ret;
  191. /* get a frame on each input */
  192. for (i = 0; i < 2; i++) {
  193. AVFilterLink *inlink = ctx->inputs[i];
  194. if (!s->input_frame[i] &&
  195. (ret = ff_request_frame(inlink)) < 0)
  196. return ret;
  197. /* request the same number of samples on all inputs */
  198. if (i == 0)
  199. ctx->inputs[1]->request_samples = s->input_frame[0]->nb_samples;
  200. }
  201. return 0;
  202. }
  203. static int query_formats(AVFilterContext *ctx)
  204. {
  205. AVFilterFormats *formats;
  206. AVFilterChannelLayouts *layouts = NULL;
  207. static const enum AVSampleFormat sample_fmts[] = {
  208. AV_SAMPLE_FMT_DBL,
  209. AV_SAMPLE_FMT_NONE
  210. };
  211. int ret, i;
  212. if (!ctx->inputs[0]->in_channel_layouts ||
  213. !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
  214. av_log(ctx, AV_LOG_WARNING,
  215. "No channel layout for input 1\n");
  216. return AVERROR(EAGAIN);
  217. }
  218. if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
  219. (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
  220. return ret;
  221. for (i = 0; i < 2; i++) {
  222. layouts = ff_all_channel_counts();
  223. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
  224. return ret;
  225. }
  226. formats = ff_make_format_list(sample_fmts);
  227. if ((ret = ff_set_common_formats(ctx, formats)) < 0)
  228. return ret;
  229. formats = ff_all_samplerates();
  230. return ff_set_common_samplerates(ctx, formats);
  231. }
  232. static int config_output(AVFilterLink *outlink)
  233. {
  234. AVFilterContext *ctx = outlink->src;
  235. if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
  236. av_log(ctx, AV_LOG_ERROR,
  237. "Inputs must have the same sample rate "
  238. "%d for in0 vs %d for in1\n",
  239. ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
  240. return AVERROR(EINVAL);
  241. }
  242. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  243. outlink->time_base = ctx->inputs[0]->time_base;
  244. outlink->channel_layout = ctx->inputs[0]->channel_layout;
  245. outlink->channels = ctx->inputs[0]->channels;
  246. compressor_config_output(outlink);
  247. return 0;
  248. }
  249. static const AVFilterPad sidechaincompress_inputs[] = {
  250. {
  251. .name = "main",
  252. .type = AVMEDIA_TYPE_AUDIO,
  253. .filter_frame = filter_frame,
  254. .needs_writable = 1,
  255. .needs_fifo = 1,
  256. },{
  257. .name = "sidechain",
  258. .type = AVMEDIA_TYPE_AUDIO,
  259. .filter_frame = filter_frame,
  260. .needs_fifo = 1,
  261. },
  262. { NULL }
  263. };
  264. static const AVFilterPad sidechaincompress_outputs[] = {
  265. {
  266. .name = "default",
  267. .type = AVMEDIA_TYPE_AUDIO,
  268. .config_props = config_output,
  269. .request_frame = request_frame,
  270. },
  271. { NULL }
  272. };
  273. AVFilter ff_af_sidechaincompress = {
  274. .name = "sidechaincompress",
  275. .description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
  276. .priv_size = sizeof(SidechainCompressContext),
  277. .priv_class = &sidechaincompress_class,
  278. .init = init,
  279. .query_formats = query_formats,
  280. .inputs = sidechaincompress_inputs,
  281. .outputs = sidechaincompress_outputs,
  282. };
  283. #endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */
  284. #if CONFIG_ACOMPRESSOR_FILTER
  285. static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in)
  286. {
  287. const double *src = (const double *)in->data[0];
  288. AVFilterContext *ctx = inlink->dst;
  289. SidechainCompressContext *s = ctx->priv;
  290. AVFilterLink *outlink = ctx->outputs[0];
  291. AVFrame *out;
  292. double *dst;
  293. if (av_frame_is_writable(in)) {
  294. out = in;
  295. } else {
  296. out = ff_get_audio_buffer(inlink, in->nb_samples);
  297. if (!out) {
  298. av_frame_free(&in);
  299. return AVERROR(ENOMEM);
  300. }
  301. av_frame_copy_props(out, in);
  302. }
  303. dst = (double *)out->data[0];
  304. compressor(s, src, dst, src, in->nb_samples,
  305. s->level_in, s->level_in,
  306. inlink, inlink);
  307. if (out != in)
  308. av_frame_free(&in);
  309. return ff_filter_frame(outlink, out);
  310. }
  311. static int acompressor_query_formats(AVFilterContext *ctx)
  312. {
  313. AVFilterFormats *formats;
  314. AVFilterChannelLayouts *layouts;
  315. static const enum AVSampleFormat sample_fmts[] = {
  316. AV_SAMPLE_FMT_DBL,
  317. AV_SAMPLE_FMT_NONE
  318. };
  319. int ret;
  320. layouts = ff_all_channel_counts();
  321. if (!layouts)
  322. return AVERROR(ENOMEM);
  323. ret = ff_set_common_channel_layouts(ctx, layouts);
  324. if (ret < 0)
  325. return ret;
  326. formats = ff_make_format_list(sample_fmts);
  327. if (!formats)
  328. return AVERROR(ENOMEM);
  329. ret = ff_set_common_formats(ctx, formats);
  330. if (ret < 0)
  331. return ret;
  332. formats = ff_all_samplerates();
  333. if (!formats)
  334. return AVERROR(ENOMEM);
  335. return ff_set_common_samplerates(ctx, formats);
  336. }
  337. #define acompressor_options options
  338. AVFILTER_DEFINE_CLASS(acompressor);
  339. static const AVFilterPad acompressor_inputs[] = {
  340. {
  341. .name = "default",
  342. .type = AVMEDIA_TYPE_AUDIO,
  343. .filter_frame = acompressor_filter_frame,
  344. },
  345. { NULL }
  346. };
  347. static const AVFilterPad acompressor_outputs[] = {
  348. {
  349. .name = "default",
  350. .type = AVMEDIA_TYPE_AUDIO,
  351. .config_props = compressor_config_output,
  352. },
  353. { NULL }
  354. };
  355. AVFilter ff_af_acompressor = {
  356. .name = "acompressor",
  357. .description = NULL_IF_CONFIG_SMALL("Audio compressor."),
  358. .priv_size = sizeof(SidechainCompressContext),
  359. .priv_class = &acompressor_class,
  360. .init = init,
  361. .query_formats = acompressor_query_formats,
  362. .inputs = acompressor_inputs,
  363. .outputs = acompressor_outputs,
  364. };
  365. #endif /* CONFIG_ACOMPRESSOR_FILTER */