You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

604 lines
17KB

  1. /*
  2. * Copyright (c) 1999 Chris Bagwell
  3. * Copyright (c) 1999 Nick Bailey
  4. * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
  5. * Copyright (c) 2013 Paul B Mahol
  6. * Copyright (c) 2014 Andrew Kelley
  7. *
  8. * This file is part of libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * audio compand filter
  27. */
  28. #include <string.h>
  29. #include "libavutil/avstring.h"
  30. #include "libavutil/channel_layout.h"
  31. #include "libavutil/common.h"
  32. #include "libavutil/mathematics.h"
  33. #include "libavutil/mem.h"
  34. #include "libavutil/opt.h"
  35. #include "audio.h"
  36. #include "avfilter.h"
  37. #include "formats.h"
  38. #include "internal.h"
  39. typedef struct ChanParam {
  40. float attack;
  41. float decay;
  42. float volume;
  43. } ChanParam;
  44. typedef struct CompandSegment {
  45. float x, y;
  46. float a, b;
  47. } CompandSegment;
  48. typedef struct CompandContext {
  49. const AVClass *class;
  50. int nb_channels;
  51. int nb_segments;
  52. char *attacks, *decays, *points;
  53. CompandSegment *segments;
  54. ChanParam *channels;
  55. float in_min_lin;
  56. float out_min_lin;
  57. double curve_dB;
  58. double gain_dB;
  59. double initial_volume;
  60. double delay;
  61. AVFrame *delay_frame;
  62. int delay_samples;
  63. int delay_count;
  64. int delay_index;
  65. int64_t pts;
  66. int (*compand)(AVFilterContext *ctx, AVFrame *frame);
  67. } CompandContext;
  68. #define OFFSET(x) offsetof(CompandContext, x)
  69. #define A AV_OPT_FLAG_AUDIO_PARAM
  70. static const AVOption compand_options[] = {
  71. { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0.3" }, 0, 0, A },
  72. { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
  73. { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20" }, 0, 0, A },
  74. { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
  75. { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
  76. { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
  77. { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
  78. { NULL }
  79. };
  80. static const AVClass compand_class = {
  81. .class_name = "compand filter",
  82. .item_name = av_default_item_name,
  83. .option = compand_options,
  84. .version = LIBAVUTIL_VERSION_INT,
  85. };
  86. static av_cold int init(AVFilterContext *ctx)
  87. {
  88. CompandContext *s = ctx->priv;
  89. s->pts = AV_NOPTS_VALUE;
  90. return 0;
  91. }
  92. static av_cold void uninit(AVFilterContext *ctx)
  93. {
  94. CompandContext *s = ctx->priv;
  95. av_freep(&s->channels);
  96. av_freep(&s->segments);
  97. av_frame_free(&s->delay_frame);
  98. }
  99. static int query_formats(AVFilterContext *ctx)
  100. {
  101. AVFilterChannelLayouts *layouts;
  102. AVFilterFormats *formats;
  103. static const enum AVSampleFormat sample_fmts[] = {
  104. AV_SAMPLE_FMT_FLTP,
  105. AV_SAMPLE_FMT_NONE
  106. };
  107. layouts = ff_all_channel_layouts();
  108. if (!layouts)
  109. return AVERROR(ENOMEM);
  110. ff_set_common_channel_layouts(ctx, layouts);
  111. formats = ff_make_format_list(sample_fmts);
  112. if (!formats)
  113. return AVERROR(ENOMEM);
  114. ff_set_common_formats(ctx, formats);
  115. formats = ff_all_samplerates();
  116. if (!formats)
  117. return AVERROR(ENOMEM);
  118. ff_set_common_samplerates(ctx, formats);
  119. return 0;
  120. }
  121. static void count_items(char *item_str, int *nb_items)
  122. {
  123. char *p;
  124. *nb_items = 1;
  125. for (p = item_str; *p; p++) {
  126. if (*p == '|')
  127. (*nb_items)++;
  128. }
  129. }
  130. static void update_volume(ChanParam *cp, float in)
  131. {
  132. float delta = in - cp->volume;
  133. if (delta > 0.0)
  134. cp->volume += delta * cp->attack;
  135. else
  136. cp->volume += delta * cp->decay;
  137. }
  138. static float get_volume(CompandContext *s, float in_lin)
  139. {
  140. CompandSegment *cs;
  141. float in_log, out_log;
  142. int i;
  143. if (in_lin < s->in_min_lin)
  144. return s->out_min_lin;
  145. in_log = logf(in_lin);
  146. for (i = 1; i < s->nb_segments; i++)
  147. if (in_log <= s->segments[i].x)
  148. break;
  149. cs = &s->segments[i - 1];
  150. in_log -= cs->x;
  151. out_log = cs->y + in_log * (cs->a * in_log + cs->b);
  152. return expf(out_log);
  153. }
  154. static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
  155. {
  156. CompandContext *s = ctx->priv;
  157. AVFilterLink *inlink = ctx->inputs[0];
  158. const int channels = s->nb_channels;
  159. const int nb_samples = frame->nb_samples;
  160. AVFrame *out_frame;
  161. int chan, i;
  162. int err;
  163. if (av_frame_is_writable(frame)) {
  164. out_frame = frame;
  165. } else {
  166. out_frame = ff_get_audio_buffer(inlink, nb_samples);
  167. if (!out_frame) {
  168. av_frame_free(&frame);
  169. return AVERROR(ENOMEM);
  170. }
  171. err = av_frame_copy_props(out_frame, frame);
  172. if (err < 0) {
  173. av_frame_free(&out_frame);
  174. av_frame_free(&frame);
  175. return err;
  176. }
  177. }
  178. for (chan = 0; chan < channels; chan++) {
  179. const float *src = (float *)frame->extended_data[chan];
  180. float *dst = (float *)out_frame->extended_data[chan];
  181. ChanParam *cp = &s->channels[chan];
  182. for (i = 0; i < nb_samples; i++) {
  183. update_volume(cp, fabs(src[i]));
  184. dst[i] = av_clipf(src[i] * get_volume(s, cp->volume), -1.0f, 1.0f);
  185. }
  186. }
  187. if (frame != out_frame)
  188. av_frame_free(&frame);
  189. return ff_filter_frame(ctx->outputs[0], out_frame);
  190. }
  191. #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
  192. static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
  193. {
  194. CompandContext *s = ctx->priv;
  195. AVFilterLink *inlink = ctx->inputs[0];
  196. const int channels = s->nb_channels;
  197. const int nb_samples = frame->nb_samples;
  198. int chan, i, dindex = 0, oindex, count = 0;
  199. AVFrame *out_frame = NULL;
  200. int err;
  201. if (s->pts == AV_NOPTS_VALUE) {
  202. s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
  203. }
  204. for (chan = 0; chan < channels; chan++) {
  205. AVFrame *delay_frame = s->delay_frame;
  206. const float *src = (float *)frame->extended_data[chan];
  207. float *dbuf = (float *)delay_frame->extended_data[chan];
  208. ChanParam *cp = &s->channels[chan];
  209. float *dst;
  210. count = s->delay_count;
  211. dindex = s->delay_index;
  212. for (i = 0, oindex = 0; i < nb_samples; i++) {
  213. const float in = src[i];
  214. update_volume(cp, fabs(in));
  215. if (count >= s->delay_samples) {
  216. if (!out_frame) {
  217. out_frame = ff_get_audio_buffer(inlink, nb_samples - i);
  218. if (!out_frame) {
  219. av_frame_free(&frame);
  220. return AVERROR(ENOMEM);
  221. }
  222. err = av_frame_copy_props(out_frame, frame);
  223. if (err < 0) {
  224. av_frame_free(&out_frame);
  225. av_frame_free(&frame);
  226. return err;
  227. }
  228. out_frame->pts = s->pts;
  229. s->pts += av_rescale_q(nb_samples - i,
  230. (AVRational){ 1, inlink->sample_rate },
  231. inlink->time_base);
  232. }
  233. dst = (float *)out_frame->extended_data[chan];
  234. dst[oindex++] = av_clipf(dbuf[dindex] *
  235. get_volume(s, cp->volume), -1.0f, 1.0f);
  236. } else {
  237. count++;
  238. }
  239. dbuf[dindex] = in;
  240. dindex = MOD(dindex + 1, s->delay_samples);
  241. }
  242. }
  243. s->delay_count = count;
  244. s->delay_index = dindex;
  245. av_frame_free(&frame);
  246. return out_frame ? ff_filter_frame(ctx->outputs[0], out_frame) : 0;
  247. }
  248. static int compand_drain(AVFilterLink *outlink)
  249. {
  250. AVFilterContext *ctx = outlink->src;
  251. CompandContext *s = ctx->priv;
  252. const int channels = s->nb_channels;
  253. AVFrame *frame = NULL;
  254. int chan, i, dindex;
  255. /* 2048 is to limit output frame size during drain */
  256. frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
  257. if (!frame)
  258. return AVERROR(ENOMEM);
  259. frame->pts = s->pts;
  260. s->pts += av_rescale_q(frame->nb_samples,
  261. (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
  262. for (chan = 0; chan < channels; chan++) {
  263. AVFrame *delay_frame = s->delay_frame;
  264. float *dbuf = (float *)delay_frame->extended_data[chan];
  265. float *dst = (float *)frame->extended_data[chan];
  266. ChanParam *cp = &s->channels[chan];
  267. dindex = s->delay_index;
  268. for (i = 0; i < frame->nb_samples; i++) {
  269. dst[i] = av_clipf(dbuf[dindex] * get_volume(s, cp->volume),
  270. -1.0f, 1.0f);
  271. dindex = MOD(dindex + 1, s->delay_samples);
  272. }
  273. }
  274. s->delay_count -= frame->nb_samples;
  275. s->delay_index = dindex;
  276. return ff_filter_frame(outlink, frame);
  277. }
  278. static int config_output(AVFilterLink *outlink)
  279. {
  280. AVFilterContext *ctx = outlink->src;
  281. CompandContext *s = ctx->priv;
  282. const int sample_rate = outlink->sample_rate;
  283. double radius = s->curve_dB * M_LN10 / 20.0;
  284. const char *p;
  285. const int channels =
  286. av_get_channel_layout_nb_channels(outlink->channel_layout);
  287. int nb_attacks, nb_decays, nb_points;
  288. int new_nb_items, num;
  289. int i;
  290. int err;
  291. count_items(s->attacks, &nb_attacks);
  292. count_items(s->decays, &nb_decays);
  293. count_items(s->points, &nb_points);
  294. if (channels <= 0) {
  295. av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
  296. return AVERROR(EINVAL);
  297. }
  298. if (nb_attacks > channels || nb_decays > channels) {
  299. av_log(ctx, AV_LOG_ERROR,
  300. "Number of attacks/decays bigger than number of channels.\n");
  301. return AVERROR(EINVAL);
  302. }
  303. uninit(ctx);
  304. s->nb_channels = channels;
  305. s->channels = av_mallocz_array(channels, sizeof(*s->channels));
  306. s->nb_segments = (nb_points + 4) * 2;
  307. s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
  308. if (!s->channels || !s->segments) {
  309. uninit(ctx);
  310. return AVERROR(ENOMEM);
  311. }
  312. p = s->attacks;
  313. for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
  314. char *tstr = av_get_token(&p, "|");
  315. if (!tstr)
  316. return AVERROR(ENOMEM);
  317. new_nb_items += sscanf(tstr, "%f", &s->channels[i].attack) == 1;
  318. av_freep(&tstr);
  319. if (s->channels[i].attack < 0) {
  320. uninit(ctx);
  321. return AVERROR(EINVAL);
  322. }
  323. if (*p)
  324. p++;
  325. }
  326. nb_attacks = new_nb_items;
  327. p = s->decays;
  328. for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
  329. char *tstr = av_get_token(&p, "|");
  330. if (!tstr)
  331. return AVERROR(ENOMEM);
  332. new_nb_items += sscanf(tstr, "%f", &s->channels[i].decay) == 1;
  333. av_freep(&tstr);
  334. if (s->channels[i].decay < 0) {
  335. uninit(ctx);
  336. return AVERROR(EINVAL);
  337. }
  338. if (*p)
  339. p++;
  340. }
  341. nb_decays = new_nb_items;
  342. if (nb_attacks != nb_decays) {
  343. av_log(ctx, AV_LOG_ERROR,
  344. "Number of attacks %d differs from number of decays %d.\n",
  345. nb_attacks, nb_decays);
  346. uninit(ctx);
  347. return AVERROR(EINVAL);
  348. }
  349. #define S(x) s->segments[2 * ((x) + 1)]
  350. p = s->points;
  351. for (i = 0, new_nb_items = 0; i < nb_points; i++) {
  352. char *tstr = av_get_token(&p, "|");
  353. if (!tstr)
  354. return AVERROR(ENOMEM);
  355. err = sscanf(tstr, "%f/%f", &S(i).x, &S(i).y);
  356. av_freep(&tstr);
  357. if (err != 2) {
  358. av_log(ctx, AV_LOG_ERROR,
  359. "Invalid and/or missing input/output value.\n");
  360. uninit(ctx);
  361. return AVERROR(EINVAL);
  362. }
  363. if (i && S(i - 1).x > S(i).x) {
  364. av_log(ctx, AV_LOG_ERROR,
  365. "Transfer function input values must be increasing.\n");
  366. uninit(ctx);
  367. return AVERROR(EINVAL);
  368. }
  369. S(i).y -= S(i).x;
  370. av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
  371. new_nb_items++;
  372. if (*p)
  373. p++;
  374. }
  375. num = new_nb_items;
  376. /* Add 0,0 if necessary */
  377. if (num == 0 || S(num - 1).x)
  378. num++;
  379. #undef S
  380. #define S(x) s->segments[2 * (x)]
  381. /* Add a tail off segment at the start */
  382. S(0).x = S(1).x - 2 * s->curve_dB;
  383. S(0).y = S(1).y;
  384. num++;
  385. /* Join adjacent colinear segments */
  386. for (i = 2; i < num; i++) {
  387. double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
  388. double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
  389. int j;
  390. /* here we purposefully lose precision so that we can compare floats */
  391. if (fabs(g1 - g2))
  392. continue;
  393. num--;
  394. for (j = --i; j < num; j++)
  395. S(j) = S(j + 1);
  396. }
  397. for (i = 0; !i || s->segments[i - 2].x; i += 2) {
  398. s->segments[i].y += s->gain_dB;
  399. s->segments[i].x *= M_LN10 / 20;
  400. s->segments[i].y *= M_LN10 / 20;
  401. }
  402. #define L(x) s->segments[i - (x)]
  403. for (i = 4; s->segments[i - 2].x; i += 2) {
  404. double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
  405. L(4).a = 0;
  406. L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
  407. L(2).a = 0;
  408. L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
  409. theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
  410. len = sqrt(pow(L(2).x - L(4).x, 2.) + pow(L(2).y - L(4).y, 2.));
  411. r = FFMIN(radius, len);
  412. L(3).x = L(2).x - r * cos(theta);
  413. L(3).y = L(2).y - r * sin(theta);
  414. theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
  415. len = sqrt(pow(L(0).x - L(2).x, 2.) + pow(L(0).y - L(2).y, 2.));
  416. r = FFMIN(radius, len / 2);
  417. x = L(2).x + r * cos(theta);
  418. y = L(2).y + r * sin(theta);
  419. cx = (L(3).x + L(2).x + x) / 3;
  420. cy = (L(3).y + L(2).y + y) / 3;
  421. L(2).x = x;
  422. L(2).y = y;
  423. in1 = cx - L(3).x;
  424. out1 = cy - L(3).y;
  425. in2 = L(2).x - L(3).x;
  426. out2 = L(2).y - L(3).y;
  427. L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
  428. L(3).b = out1 / in1 - L(3).a * in1;
  429. }
  430. L(3).x = 0;
  431. L(3).y = L(2).y;
  432. s->in_min_lin = exp(s->segments[1].x);
  433. s->out_min_lin = exp(s->segments[1].y);
  434. for (i = 0; i < channels; i++) {
  435. ChanParam *cp = &s->channels[i];
  436. if (cp->attack > 1.0 / sample_rate)
  437. cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
  438. else
  439. cp->attack = 1.0;
  440. if (cp->decay > 1.0 / sample_rate)
  441. cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
  442. else
  443. cp->decay = 1.0;
  444. cp->volume = pow(10.0, s->initial_volume / 20);
  445. }
  446. s->delay_samples = s->delay * sample_rate;
  447. if (s->delay_samples <= 0) {
  448. s->compand = compand_nodelay;
  449. return 0;
  450. }
  451. s->delay_frame = av_frame_alloc();
  452. if (!s->delay_frame) {
  453. uninit(ctx);
  454. return AVERROR(ENOMEM);
  455. }
  456. s->delay_frame->format = outlink->format;
  457. s->delay_frame->nb_samples = s->delay_samples;
  458. s->delay_frame->channel_layout = outlink->channel_layout;
  459. err = av_frame_get_buffer(s->delay_frame, 32);
  460. if (err)
  461. return err;
  462. s->compand = compand_delay;
  463. return 0;
  464. }
  465. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  466. {
  467. AVFilterContext *ctx = inlink->dst;
  468. CompandContext *s = ctx->priv;
  469. return s->compand(ctx, frame);
  470. }
  471. static int request_frame(AVFilterLink *outlink)
  472. {
  473. AVFilterContext *ctx = outlink->src;
  474. CompandContext *s = ctx->priv;
  475. int ret;
  476. ret = ff_request_frame(ctx->inputs[0]);
  477. if (ret == AVERROR_EOF && s->delay_count)
  478. ret = compand_drain(outlink);
  479. return ret;
  480. }
  481. static const AVFilterPad compand_inputs[] = {
  482. {
  483. .name = "default",
  484. .type = AVMEDIA_TYPE_AUDIO,
  485. .filter_frame = filter_frame,
  486. },
  487. { NULL }
  488. };
  489. static const AVFilterPad compand_outputs[] = {
  490. {
  491. .name = "default",
  492. .request_frame = request_frame,
  493. .config_props = config_output,
  494. .type = AVMEDIA_TYPE_AUDIO,
  495. },
  496. { NULL }
  497. };
  498. AVFilter ff_af_compand = {
  499. .name = "compand",
  500. .description = NULL_IF_CONFIG_SMALL(
  501. "Compress or expand audio dynamic range."),
  502. .query_formats = query_formats,
  503. .priv_size = sizeof(CompandContext),
  504. .priv_class = &compand_class,
  505. .init = init,
  506. .uninit = uninit,
  507. .inputs = compand_inputs,
  508. .outputs = compand_outputs,
  509. };