You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

455 lines
15KB

  1. /*
  2. * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Audio (Sidechain) Compressor filter
  24. */
  25. #include "libavutil/audio_fifo.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/common.h"
  29. #include "libavutil/opt.h"
  30. #include "audio.h"
  31. #include "avfilter.h"
  32. #include "filters.h"
  33. #include "formats.h"
  34. #include "hermite.h"
  35. #include "internal.h"
  36. typedef struct SidechainCompressContext {
  37. const AVClass *class;
  38. double level_in;
  39. double level_sc;
  40. double attack, attack_coeff;
  41. double release, release_coeff;
  42. double lin_slope;
  43. double ratio;
  44. double threshold;
  45. double makeup;
  46. double mix;
  47. double thres;
  48. double knee;
  49. double knee_start;
  50. double knee_stop;
  51. double lin_knee_start;
  52. double adj_knee_start;
  53. double compressed_knee_stop;
  54. int link;
  55. int detection;
  56. AVAudioFifo *fifo[2];
  57. int64_t pts;
  58. } SidechainCompressContext;
  59. #define OFFSET(x) offsetof(SidechainCompressContext, x)
  60. #define A AV_OPT_FLAG_AUDIO_PARAM
  61. #define F AV_OPT_FLAG_FILTERING_PARAM
  62. static const AVOption options[] = {
  63. { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  64. { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F },
  65. { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F },
  66. { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F },
  67. { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F },
  68. { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 64, A|F },
  69. { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F },
  70. { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" },
  71. { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" },
  72. { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" },
  73. { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" },
  74. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" },
  75. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" },
  76. { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F },
  77. { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F },
  78. { NULL }
  79. };
  80. #define sidechaincompress_options options
  81. AVFILTER_DEFINE_CLASS(sidechaincompress);
  82. // A fake infinity value (because real infinity may break some hosts)
  83. #define FAKE_INFINITY (65536.0 * 65536.0)
  84. // Check for infinity (with appropriate-ish tolerance)
  85. #define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
  86. static double output_gain(double lin_slope, double ratio, double thres,
  87. double knee, double knee_start, double knee_stop,
  88. double compressed_knee_stop, int detection)
  89. {
  90. double slope = log(lin_slope);
  91. double gain = 0.0;
  92. double delta = 0.0;
  93. if (detection)
  94. slope *= 0.5;
  95. if (IS_FAKE_INFINITY(ratio)) {
  96. gain = thres;
  97. delta = 0.0;
  98. } else {
  99. gain = (slope - thres) / ratio + thres;
  100. delta = 1.0 / ratio;
  101. }
  102. if (knee > 1.0 && slope < knee_stop)
  103. gain = hermite_interpolation(slope, knee_start, knee_stop,
  104. knee_start, compressed_knee_stop,
  105. 1.0, delta);
  106. return exp(gain - slope);
  107. }
  108. static int compressor_config_output(AVFilterLink *outlink)
  109. {
  110. AVFilterContext *ctx = outlink->src;
  111. SidechainCompressContext *s = ctx->priv;
  112. s->thres = log(s->threshold);
  113. s->lin_knee_start = s->threshold / sqrt(s->knee);
  114. s->adj_knee_start = s->lin_knee_start * s->lin_knee_start;
  115. s->knee_start = log(s->lin_knee_start);
  116. s->knee_stop = log(s->threshold * sqrt(s->knee));
  117. s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
  118. s->attack_coeff = FFMIN(1., 1. / (s->attack * outlink->sample_rate / 4000.));
  119. s->release_coeff = FFMIN(1., 1. / (s->release * outlink->sample_rate / 4000.));
  120. return 0;
  121. }
  122. static void compressor(SidechainCompressContext *s,
  123. const double *src, double *dst, const double *scsrc, int nb_samples,
  124. double level_in, double level_sc,
  125. AVFilterLink *inlink, AVFilterLink *sclink)
  126. {
  127. const double makeup = s->makeup;
  128. const double mix = s->mix;
  129. int i, c;
  130. for (i = 0; i < nb_samples; i++) {
  131. double abs_sample, gain = 1.0;
  132. abs_sample = fabs(scsrc[0] * level_sc);
  133. if (s->link == 1) {
  134. for (c = 1; c < sclink->channels; c++)
  135. abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
  136. } else {
  137. for (c = 1; c < sclink->channels; c++)
  138. abs_sample += fabs(scsrc[c] * level_sc);
  139. abs_sample /= sclink->channels;
  140. }
  141. if (s->detection)
  142. abs_sample *= abs_sample;
  143. s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
  144. if (s->lin_slope > 0.0 && s->lin_slope > (s->detection ? s->adj_knee_start : s->lin_knee_start))
  145. gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
  146. s->knee_start, s->knee_stop,
  147. s->compressed_knee_stop, s->detection);
  148. for (c = 0; c < inlink->channels; c++)
  149. dst[c] = src[c] * level_in * (gain * makeup * mix + (1. - mix));
  150. src += inlink->channels;
  151. dst += inlink->channels;
  152. scsrc += sclink->channels;
  153. }
  154. }
  155. #if CONFIG_SIDECHAINCOMPRESS_FILTER
  156. static int activate(AVFilterContext *ctx)
  157. {
  158. SidechainCompressContext *s = ctx->priv;
  159. AVFrame *out = NULL, *in[2] = { NULL };
  160. int ret, i, status, nb_samples;
  161. double *dst;
  162. int64_t pts;
  163. if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &in[0])) > 0) {
  164. av_audio_fifo_write(s->fifo[0], (void **)in[0]->extended_data,
  165. in[0]->nb_samples);
  166. av_frame_free(&in[0]);
  167. }
  168. if (ret < 0)
  169. return ret;
  170. if ((ret = ff_inlink_consume_frame(ctx->inputs[1], &in[1])) > 0) {
  171. av_audio_fifo_write(s->fifo[1], (void **)in[1]->extended_data,
  172. in[1]->nb_samples);
  173. av_frame_free(&in[1]);
  174. }
  175. if (ret < 0)
  176. return ret;
  177. nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
  178. if (nb_samples) {
  179. out = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
  180. if (!out)
  181. return AVERROR(ENOMEM);
  182. for (i = 0; i < 2; i++) {
  183. in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples);
  184. if (!in[i]) {
  185. av_frame_free(&in[0]);
  186. av_frame_free(&in[1]);
  187. av_frame_free(&out);
  188. return AVERROR(ENOMEM);
  189. }
  190. av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples);
  191. }
  192. dst = (double *)out->data[0];
  193. out->pts = s->pts;
  194. s->pts += nb_samples;
  195. compressor(s, (double *)in[0]->data[0], dst,
  196. (double *)in[1]->data[0], nb_samples,
  197. s->level_in, s->level_sc,
  198. ctx->inputs[0], ctx->inputs[1]);
  199. av_frame_free(&in[0]);
  200. av_frame_free(&in[1]);
  201. ret = ff_filter_frame(ctx->outputs[0], out);
  202. if (ret < 0)
  203. return ret;
  204. }
  205. if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
  206. ff_outlink_set_status(ctx->outputs[0], status, pts);
  207. return 0;
  208. } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
  209. ff_outlink_set_status(ctx->outputs[0], status, pts);
  210. return 0;
  211. } else {
  212. if (ff_outlink_frame_wanted(ctx->outputs[0])) {
  213. if (!av_audio_fifo_size(s->fifo[0]))
  214. ff_inlink_request_frame(ctx->inputs[0]);
  215. if (!av_audio_fifo_size(s->fifo[1]))
  216. ff_inlink_request_frame(ctx->inputs[1]);
  217. }
  218. return 0;
  219. }
  220. }
  221. static int query_formats(AVFilterContext *ctx)
  222. {
  223. AVFilterFormats *formats;
  224. AVFilterChannelLayouts *layouts = NULL;
  225. static const enum AVSampleFormat sample_fmts[] = {
  226. AV_SAMPLE_FMT_DBL,
  227. AV_SAMPLE_FMT_NONE
  228. };
  229. int ret, i;
  230. if (!ctx->inputs[0]->in_channel_layouts ||
  231. !ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
  232. av_log(ctx, AV_LOG_WARNING,
  233. "No channel layout for input 1\n");
  234. return AVERROR(EAGAIN);
  235. }
  236. if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
  237. (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
  238. return ret;
  239. for (i = 0; i < 2; i++) {
  240. layouts = ff_all_channel_counts();
  241. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
  242. return ret;
  243. }
  244. formats = ff_make_format_list(sample_fmts);
  245. if ((ret = ff_set_common_formats(ctx, formats)) < 0)
  246. return ret;
  247. formats = ff_all_samplerates();
  248. return ff_set_common_samplerates(ctx, formats);
  249. }
  250. static int config_output(AVFilterLink *outlink)
  251. {
  252. AVFilterContext *ctx = outlink->src;
  253. SidechainCompressContext *s = ctx->priv;
  254. if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
  255. av_log(ctx, AV_LOG_ERROR,
  256. "Inputs must have the same sample rate "
  257. "%d for in0 vs %d for in1\n",
  258. ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
  259. return AVERROR(EINVAL);
  260. }
  261. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  262. outlink->time_base = ctx->inputs[0]->time_base;
  263. outlink->channel_layout = ctx->inputs[0]->channel_layout;
  264. outlink->channels = ctx->inputs[0]->channels;
  265. s->fifo[0] = av_audio_fifo_alloc(ctx->inputs[0]->format, ctx->inputs[0]->channels, 1024);
  266. s->fifo[1] = av_audio_fifo_alloc(ctx->inputs[1]->format, ctx->inputs[1]->channels, 1024);
  267. if (!s->fifo[0] || !s->fifo[1])
  268. return AVERROR(ENOMEM);
  269. compressor_config_output(outlink);
  270. return 0;
  271. }
  272. static av_cold void uninit(AVFilterContext *ctx)
  273. {
  274. SidechainCompressContext *s = ctx->priv;
  275. av_audio_fifo_free(s->fifo[0]);
  276. av_audio_fifo_free(s->fifo[1]);
  277. }
  278. static const AVFilterPad sidechaincompress_inputs[] = {
  279. {
  280. .name = "main",
  281. .type = AVMEDIA_TYPE_AUDIO,
  282. },{
  283. .name = "sidechain",
  284. .type = AVMEDIA_TYPE_AUDIO,
  285. },
  286. { NULL }
  287. };
  288. static const AVFilterPad sidechaincompress_outputs[] = {
  289. {
  290. .name = "default",
  291. .type = AVMEDIA_TYPE_AUDIO,
  292. .config_props = config_output,
  293. },
  294. { NULL }
  295. };
  296. AVFilter ff_af_sidechaincompress = {
  297. .name = "sidechaincompress",
  298. .description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
  299. .priv_size = sizeof(SidechainCompressContext),
  300. .priv_class = &sidechaincompress_class,
  301. .query_formats = query_formats,
  302. .activate = activate,
  303. .uninit = uninit,
  304. .inputs = sidechaincompress_inputs,
  305. .outputs = sidechaincompress_outputs,
  306. };
  307. #endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */
  308. #if CONFIG_ACOMPRESSOR_FILTER
  309. static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in)
  310. {
  311. const double *src = (const double *)in->data[0];
  312. AVFilterContext *ctx = inlink->dst;
  313. SidechainCompressContext *s = ctx->priv;
  314. AVFilterLink *outlink = ctx->outputs[0];
  315. AVFrame *out;
  316. double *dst;
  317. if (av_frame_is_writable(in)) {
  318. out = in;
  319. } else {
  320. out = ff_get_audio_buffer(inlink, in->nb_samples);
  321. if (!out) {
  322. av_frame_free(&in);
  323. return AVERROR(ENOMEM);
  324. }
  325. av_frame_copy_props(out, in);
  326. }
  327. dst = (double *)out->data[0];
  328. compressor(s, src, dst, src, in->nb_samples,
  329. s->level_in, s->level_in,
  330. inlink, inlink);
  331. if (out != in)
  332. av_frame_free(&in);
  333. return ff_filter_frame(outlink, out);
  334. }
  335. static int acompressor_query_formats(AVFilterContext *ctx)
  336. {
  337. AVFilterFormats *formats;
  338. AVFilterChannelLayouts *layouts;
  339. static const enum AVSampleFormat sample_fmts[] = {
  340. AV_SAMPLE_FMT_DBL,
  341. AV_SAMPLE_FMT_NONE
  342. };
  343. int ret;
  344. layouts = ff_all_channel_counts();
  345. if (!layouts)
  346. return AVERROR(ENOMEM);
  347. ret = ff_set_common_channel_layouts(ctx, layouts);
  348. if (ret < 0)
  349. return ret;
  350. formats = ff_make_format_list(sample_fmts);
  351. if (!formats)
  352. return AVERROR(ENOMEM);
  353. ret = ff_set_common_formats(ctx, formats);
  354. if (ret < 0)
  355. return ret;
  356. formats = ff_all_samplerates();
  357. if (!formats)
  358. return AVERROR(ENOMEM);
  359. return ff_set_common_samplerates(ctx, formats);
  360. }
  361. #define acompressor_options options
  362. AVFILTER_DEFINE_CLASS(acompressor);
  363. static const AVFilterPad acompressor_inputs[] = {
  364. {
  365. .name = "default",
  366. .type = AVMEDIA_TYPE_AUDIO,
  367. .filter_frame = acompressor_filter_frame,
  368. },
  369. { NULL }
  370. };
  371. static const AVFilterPad acompressor_outputs[] = {
  372. {
  373. .name = "default",
  374. .type = AVMEDIA_TYPE_AUDIO,
  375. .config_props = compressor_config_output,
  376. },
  377. { NULL }
  378. };
  379. AVFilter ff_af_acompressor = {
  380. .name = "acompressor",
  381. .description = NULL_IF_CONFIG_SMALL("Audio compressor."),
  382. .priv_size = sizeof(SidechainCompressContext),
  383. .priv_class = &acompressor_class,
  384. .query_formats = acompressor_query_formats,
  385. .inputs = acompressor_inputs,
  386. .outputs = acompressor_outputs,
  387. };
  388. #endif /* CONFIG_ACOMPRESSOR_FILTER */