You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

421 lines
14KB

  1. /*
  2. * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Audio (Sidechain) Compressor filter
  24. */
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/opt.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "hermite.h"
  33. #include "internal.h"
  34. typedef struct SidechainCompressContext {
  35. const AVClass *class;
  36. double level_in;
  37. double level_sc;
  38. double attack, attack_coeff;
  39. double release, release_coeff;
  40. double lin_slope;
  41. double ratio;
  42. double threshold;
  43. double makeup;
  44. double mix;
  45. double thres;
  46. double knee;
  47. double knee_start;
  48. double knee_stop;
  49. double lin_knee_start;
  50. double adj_knee_start;
  51. double compressed_knee_stop;
  52. int link;
  53. int detection;
  54. AVFrame *input_frame[2];
  55. } SidechainCompressContext;
  56. #define OFFSET(x) offsetof(SidechainCompressContext, x)
  57. #define A AV_OPT_FLAG_AUDIO_PARAM
  58. #define F AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption options[] = {
  60. { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  61. { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F },
  62. { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F },
  63. { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F },
  64. { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F },
  65. { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 64, A|F },
  66. { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F },
  67. { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" },
  68. { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" },
  69. { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" },
  70. { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" },
  71. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" },
  72. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" },
  73. { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  74. { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F },
  75. { NULL }
  76. };
  77. #define sidechaincompress_options options
  78. AVFILTER_DEFINE_CLASS(sidechaincompress);
  79. static av_cold int init(AVFilterContext *ctx)
  80. {
  81. SidechainCompressContext *s = ctx->priv;
  82. s->thres = log(s->threshold);
  83. s->lin_knee_start = s->threshold / sqrt(s->knee);
  84. s->adj_knee_start = s->lin_knee_start * s->lin_knee_start;
  85. s->knee_start = log(s->lin_knee_start);
  86. s->knee_stop = log(s->threshold * sqrt(s->knee));
  87. s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
  88. return 0;
  89. }
  90. // A fake infinity value (because real infinity may break some hosts)
  91. #define FAKE_INFINITY (65536.0 * 65536.0)
  92. // Check for infinity (with appropriate-ish tolerance)
  93. #define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
  94. static double output_gain(double lin_slope, double ratio, double thres,
  95. double knee, double knee_start, double knee_stop,
  96. double compressed_knee_stop, int detection)
  97. {
  98. double slope = log(lin_slope);
  99. double gain = 0.0;
  100. double delta = 0.0;
  101. if (detection)
  102. slope *= 0.5;
  103. if (IS_FAKE_INFINITY(ratio)) {
  104. gain = thres;
  105. delta = 0.0;
  106. } else {
  107. gain = (slope - thres) / ratio + thres;
  108. delta = 1.0 / ratio;
  109. }
  110. if (knee > 1.0 && slope < knee_stop)
  111. gain = hermite_interpolation(slope, knee_start, knee_stop,
  112. knee_start, compressed_knee_stop,
  113. 1.0, delta);
  114. return exp(gain - slope);
  115. }
  116. static int compressor_config_output(AVFilterLink *outlink)
  117. {
  118. AVFilterContext *ctx = outlink->src;
  119. SidechainCompressContext *s = ctx->priv;
  120. s->attack_coeff = FFMIN(1., 1. / (s->attack * outlink->sample_rate / 4000.));
  121. s->release_coeff = FFMIN(1., 1. / (s->release * outlink->sample_rate / 4000.));
  122. return 0;
  123. }
  124. static void compressor(SidechainCompressContext *s,
  125. double *sample, const double *scsrc, int nb_samples,
  126. double level_in, double level_sc,
  127. AVFilterLink *inlink, AVFilterLink *sclink)
  128. {
  129. const double makeup = s->makeup;
  130. const double mix = s->mix;
  131. int i, c;
  132. for (i = 0; i < nb_samples; i++) {
  133. double abs_sample, gain = 1.0;
  134. abs_sample = fabs(scsrc[0] * level_sc);
  135. if (s->link == 1) {
  136. for (c = 1; c < sclink->channels; c++)
  137. abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
  138. } else {
  139. for (c = 1; c < sclink->channels; c++)
  140. abs_sample += fabs(scsrc[c] * level_sc);
  141. abs_sample /= sclink->channels;
  142. }
  143. if (s->detection)
  144. abs_sample *= abs_sample;
  145. s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
  146. if (s->lin_slope > 0.0 && s->lin_slope > (s->detection ? s->adj_knee_start : s->lin_knee_start))
  147. gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
  148. s->knee_start, s->knee_stop,
  149. s->compressed_knee_stop, s->detection);
  150. for (c = 0; c < inlink->channels; c++)
  151. sample[c] *= level_in * (gain * makeup * mix + (1. - mix));
  152. sample += inlink->channels;
  153. scsrc += sclink->channels;
  154. }
  155. }
  156. #if CONFIG_SIDECHAINCOMPRESS_FILTER
  157. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  158. {
  159. AVFilterContext *ctx = link->dst;
  160. SidechainCompressContext *s = ctx->priv;
  161. AVFilterLink *outlink = ctx->outputs[0];
  162. const double *scsrc;
  163. double *sample;
  164. int nb_samples;
  165. int ret, i;
  166. for (i = 0; i < 2; i++)
  167. if (link == ctx->inputs[i])
  168. break;
  169. av_assert0(i < 2 && !s->input_frame[i]);
  170. s->input_frame[i] = frame;
  171. if (!s->input_frame[0] || !s->input_frame[1])
  172. return 0;
  173. nb_samples = FFMIN(s->input_frame[0]->nb_samples,
  174. s->input_frame[1]->nb_samples);
  175. sample = (double *)s->input_frame[0]->data[0];
  176. scsrc = (const double *)s->input_frame[1]->data[0];
  177. compressor(s, sample, scsrc, nb_samples,
  178. s->level_in, s->level_sc,
  179. ctx->inputs[0], ctx->inputs[1]);
  180. ret = ff_filter_frame(outlink, s->input_frame[0]);
  181. s->input_frame[0] = NULL;
  182. av_frame_free(&s->input_frame[1]);
  183. return ret;
  184. }
  185. static int request_frame(AVFilterLink *outlink)
  186. {
  187. AVFilterContext *ctx = outlink->src;
  188. SidechainCompressContext *s = ctx->priv;
  189. int i, ret;
  190. /* get a frame on each input */
  191. for (i = 0; i < 2; i++) {
  192. AVFilterLink *inlink = ctx->inputs[i];
  193. if (!s->input_frame[i] &&
  194. (ret = ff_request_frame(inlink)) < 0)
  195. return ret;
  196. /* request the same number of samples on all inputs */
  197. if (i == 0)
  198. ctx->inputs[1]->request_samples = s->input_frame[0]->nb_samples;
  199. }
  200. return 0;
  201. }
  202. static int query_formats(AVFilterContext *ctx)
  203. {
  204. AVFilterFormats *formats;
  205. AVFilterChannelLayouts *layouts = NULL;
  206. static const enum AVSampleFormat sample_fmts[] = {
  207. AV_SAMPLE_FMT_DBL,
  208. AV_SAMPLE_FMT_NONE
  209. };
  210. int ret, i;
  211. if (!ctx->inputs[0]->in_channel_layouts ||
  212. !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
  213. av_log(ctx, AV_LOG_WARNING,
  214. "No channel layout for input 1\n");
  215. return AVERROR(EAGAIN);
  216. }
  217. if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
  218. (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
  219. return ret;
  220. for (i = 0; i < 2; i++) {
  221. layouts = ff_all_channel_counts();
  222. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
  223. return ret;
  224. }
  225. formats = ff_make_format_list(sample_fmts);
  226. if ((ret = ff_set_common_formats(ctx, formats)) < 0)
  227. return ret;
  228. formats = ff_all_samplerates();
  229. return ff_set_common_samplerates(ctx, formats);
  230. }
  231. static int config_output(AVFilterLink *outlink)
  232. {
  233. AVFilterContext *ctx = outlink->src;
  234. if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
  235. av_log(ctx, AV_LOG_ERROR,
  236. "Inputs must have the same sample rate "
  237. "%d for in0 vs %d for in1\n",
  238. ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
  239. return AVERROR(EINVAL);
  240. }
  241. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  242. outlink->time_base = ctx->inputs[0]->time_base;
  243. outlink->channel_layout = ctx->inputs[0]->channel_layout;
  244. outlink->channels = ctx->inputs[0]->channels;
  245. compressor_config_output(outlink);
  246. return 0;
  247. }
  248. static const AVFilterPad sidechaincompress_inputs[] = {
  249. {
  250. .name = "main",
  251. .type = AVMEDIA_TYPE_AUDIO,
  252. .filter_frame = filter_frame,
  253. .needs_writable = 1,
  254. .needs_fifo = 1,
  255. },{
  256. .name = "sidechain",
  257. .type = AVMEDIA_TYPE_AUDIO,
  258. .filter_frame = filter_frame,
  259. .needs_fifo = 1,
  260. },
  261. { NULL }
  262. };
  263. static const AVFilterPad sidechaincompress_outputs[] = {
  264. {
  265. .name = "default",
  266. .type = AVMEDIA_TYPE_AUDIO,
  267. .config_props = config_output,
  268. .request_frame = request_frame,
  269. },
  270. { NULL }
  271. };
  272. AVFilter ff_af_sidechaincompress = {
  273. .name = "sidechaincompress",
  274. .description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
  275. .priv_size = sizeof(SidechainCompressContext),
  276. .priv_class = &sidechaincompress_class,
  277. .init = init,
  278. .query_formats = query_formats,
  279. .inputs = sidechaincompress_inputs,
  280. .outputs = sidechaincompress_outputs,
  281. };
  282. #endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */
  283. #if CONFIG_ACOMPRESSOR_FILTER
  284. static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  285. {
  286. AVFilterContext *ctx = inlink->dst;
  287. SidechainCompressContext *s = ctx->priv;
  288. AVFilterLink *outlink = ctx->outputs[0];
  289. double *sample;
  290. sample = (double *)frame->data[0];
  291. compressor(s, sample, sample, frame->nb_samples,
  292. s->level_in, s->level_in,
  293. inlink, inlink);
  294. return ff_filter_frame(outlink, frame);
  295. }
  296. static int acompressor_query_formats(AVFilterContext *ctx)
  297. {
  298. AVFilterFormats *formats;
  299. AVFilterChannelLayouts *layouts;
  300. static const enum AVSampleFormat sample_fmts[] = {
  301. AV_SAMPLE_FMT_DBL,
  302. AV_SAMPLE_FMT_NONE
  303. };
  304. int ret;
  305. layouts = ff_all_channel_counts();
  306. if (!layouts)
  307. return AVERROR(ENOMEM);
  308. ret = ff_set_common_channel_layouts(ctx, layouts);
  309. if (ret < 0)
  310. return ret;
  311. formats = ff_make_format_list(sample_fmts);
  312. if (!formats)
  313. return AVERROR(ENOMEM);
  314. ret = ff_set_common_formats(ctx, formats);
  315. if (ret < 0)
  316. return ret;
  317. formats = ff_all_samplerates();
  318. if (!formats)
  319. return AVERROR(ENOMEM);
  320. return ff_set_common_samplerates(ctx, formats);
  321. }
  322. #define acompressor_options options
  323. AVFILTER_DEFINE_CLASS(acompressor);
  324. static const AVFilterPad acompressor_inputs[] = {
  325. {
  326. .name = "default",
  327. .type = AVMEDIA_TYPE_AUDIO,
  328. .filter_frame = acompressor_filter_frame,
  329. .needs_writable = 1,
  330. },
  331. { NULL }
  332. };
  333. static const AVFilterPad acompressor_outputs[] = {
  334. {
  335. .name = "default",
  336. .type = AVMEDIA_TYPE_AUDIO,
  337. .config_props = compressor_config_output,
  338. },
  339. { NULL }
  340. };
  341. AVFilter ff_af_acompressor = {
  342. .name = "acompressor",
  343. .description = NULL_IF_CONFIG_SMALL("Audio compressor."),
  344. .priv_size = sizeof(SidechainCompressContext),
  345. .priv_class = &acompressor_class,
  346. .init = init,
  347. .query_formats = acompressor_query_formats,
  348. .inputs = acompressor_inputs,
  349. .outputs = acompressor_outputs,
  350. };
  351. #endif /* CONFIG_ACOMPRESSOR_FILTER */