You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

366 lines
13KB

  1. /*
  2. * Copyright (c) 2013 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/opt.h"
  23. #include "libavutil/samplefmt.h"
  24. #include "avfilter.h"
  25. #include "audio.h"
  26. #include "internal.h"
  27. typedef struct AudioEchoContext {
  28. const AVClass *class;
  29. float in_gain, out_gain;
  30. char *delays, *decays;
  31. float *delay, *decay;
  32. int nb_echoes;
  33. int delay_index;
  34. uint8_t **delayptrs;
  35. int max_samples, fade_out;
  36. int *samples;
  37. int64_t next_pts;
  38. void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
  39. uint8_t * const *src, uint8_t **dst,
  40. int nb_samples, int channels);
  41. } AudioEchoContext;
  42. #define OFFSET(x) offsetof(AudioEchoContext, x)
  43. #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  44. static const AVOption aecho_options[] = {
  45. { "in_gain", "set signal input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.6}, 0, 1, A },
  46. { "out_gain", "set signal output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, A },
  47. { "delays", "set list of signal delays", OFFSET(delays), AV_OPT_TYPE_STRING, {.str="1000"}, 0, 0, A },
  48. { "decays", "set list of signal decays", OFFSET(decays), AV_OPT_TYPE_STRING, {.str="0.5"}, 0, 0, A },
  49. { NULL }
  50. };
  51. AVFILTER_DEFINE_CLASS(aecho);
  52. static void count_items(char *item_str, int *nb_items)
  53. {
  54. char *p;
  55. *nb_items = 1;
  56. for (p = item_str; *p; p++) {
  57. if (*p == '|')
  58. (*nb_items)++;
  59. }
  60. }
  61. static void fill_items(char *item_str, int *nb_items, float *items)
  62. {
  63. char *p, *saveptr = NULL;
  64. int i, new_nb_items = 0;
  65. p = item_str;
  66. for (i = 0; i < *nb_items; i++) {
  67. char *tstr = av_strtok(p, "|", &saveptr);
  68. p = NULL;
  69. if (tstr)
  70. new_nb_items += sscanf(tstr, "%f", &items[new_nb_items]) == 1;
  71. }
  72. *nb_items = new_nb_items;
  73. }
  74. static av_cold void uninit(AVFilterContext *ctx)
  75. {
  76. AudioEchoContext *s = ctx->priv;
  77. av_freep(&s->delay);
  78. av_freep(&s->decay);
  79. av_freep(&s->samples);
  80. if (s->delayptrs)
  81. av_freep(&s->delayptrs[0]);
  82. av_freep(&s->delayptrs);
  83. }
  84. static av_cold int init(AVFilterContext *ctx)
  85. {
  86. AudioEchoContext *s = ctx->priv;
  87. int nb_delays, nb_decays, i;
  88. if (!s->delays || !s->decays) {
  89. av_log(ctx, AV_LOG_ERROR, "Missing delays and/or decays.\n");
  90. return AVERROR(EINVAL);
  91. }
  92. count_items(s->delays, &nb_delays);
  93. count_items(s->decays, &nb_decays);
  94. s->delay = av_realloc_f(s->delay, nb_delays, sizeof(*s->delay));
  95. s->decay = av_realloc_f(s->decay, nb_decays, sizeof(*s->decay));
  96. if (!s->delay || !s->decay)
  97. return AVERROR(ENOMEM);
  98. fill_items(s->delays, &nb_delays, s->delay);
  99. fill_items(s->decays, &nb_decays, s->decay);
  100. if (nb_delays != nb_decays) {
  101. av_log(ctx, AV_LOG_ERROR, "Number of delays %d differs from number of decays %d.\n", nb_delays, nb_decays);
  102. return AVERROR(EINVAL);
  103. }
  104. s->nb_echoes = nb_delays;
  105. if (!s->nb_echoes) {
  106. av_log(ctx, AV_LOG_ERROR, "At least one decay & delay must be set.\n");
  107. return AVERROR(EINVAL);
  108. }
  109. s->samples = av_realloc_f(s->samples, nb_delays, sizeof(*s->samples));
  110. if (!s->samples)
  111. return AVERROR(ENOMEM);
  112. for (i = 0; i < nb_delays; i++) {
  113. if (s->delay[i] <= 0 || s->delay[i] > 90000) {
  114. av_log(ctx, AV_LOG_ERROR, "delay[%d]: %f is out of allowed range: (0, 90000]\n", i, s->delay[i]);
  115. return AVERROR(EINVAL);
  116. }
  117. if (s->decay[i] <= 0 || s->decay[i] > 1) {
  118. av_log(ctx, AV_LOG_ERROR, "decay[%d]: %f is out of allowed range: (0, 1]\n", i, s->decay[i]);
  119. return AVERROR(EINVAL);
  120. }
  121. }
  122. s->next_pts = AV_NOPTS_VALUE;
  123. av_log(ctx, AV_LOG_DEBUG, "nb_echoes:%d\n", s->nb_echoes);
  124. return 0;
  125. }
  126. static int query_formats(AVFilterContext *ctx)
  127. {
  128. AVFilterChannelLayouts *layouts;
  129. AVFilterFormats *formats;
  130. static const enum AVSampleFormat sample_fmts[] = {
  131. AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
  132. AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
  133. AV_SAMPLE_FMT_NONE
  134. };
  135. int ret;
  136. layouts = ff_all_channel_counts();
  137. if (!layouts)
  138. return AVERROR(ENOMEM);
  139. ret = ff_set_common_channel_layouts(ctx, layouts);
  140. if (ret < 0)
  141. return ret;
  142. formats = ff_make_format_list(sample_fmts);
  143. if (!formats)
  144. return AVERROR(ENOMEM);
  145. ret = ff_set_common_formats(ctx, formats);
  146. if (ret < 0)
  147. return ret;
  148. formats = ff_all_samplerates();
  149. if (!formats)
  150. return AVERROR(ENOMEM);
  151. return ff_set_common_samplerates(ctx, formats);
  152. }
  153. #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
  154. #define ECHO(name, type, min, max) \
  155. static void echo_samples_## name ##p(AudioEchoContext *ctx, \
  156. uint8_t **delayptrs, \
  157. uint8_t * const *src, uint8_t **dst, \
  158. int nb_samples, int channels) \
  159. { \
  160. const double out_gain = ctx->out_gain; \
  161. const double in_gain = ctx->in_gain; \
  162. const int nb_echoes = ctx->nb_echoes; \
  163. const int max_samples = ctx->max_samples; \
  164. int i, j, chan, av_uninit(index); \
  165. \
  166. av_assert1(channels > 0); /* would corrupt delay_index */ \
  167. \
  168. for (chan = 0; chan < channels; chan++) { \
  169. const type *s = (type *)src[chan]; \
  170. type *d = (type *)dst[chan]; \
  171. type *dbuf = (type *)delayptrs[chan]; \
  172. \
  173. index = ctx->delay_index; \
  174. for (i = 0; i < nb_samples; i++, s++, d++) { \
  175. double out, in; \
  176. \
  177. in = *s; \
  178. out = in * in_gain; \
  179. for (j = 0; j < nb_echoes; j++) { \
  180. int ix = index + max_samples - ctx->samples[j]; \
  181. ix = MOD(ix, max_samples); \
  182. out += dbuf[ix] * ctx->decay[j]; \
  183. } \
  184. out *= out_gain; \
  185. \
  186. *d = av_clipd(out, min, max); \
  187. dbuf[index] = in; \
  188. \
  189. index = MOD(index + 1, max_samples); \
  190. } \
  191. } \
  192. ctx->delay_index = index; \
  193. }
  194. ECHO(dbl, double, -1.0, 1.0 )
  195. ECHO(flt, float, -1.0, 1.0 )
  196. ECHO(s16, int16_t, INT16_MIN, INT16_MAX)
  197. ECHO(s32, int32_t, INT32_MIN, INT32_MAX)
  198. static int config_output(AVFilterLink *outlink)
  199. {
  200. AVFilterContext *ctx = outlink->src;
  201. AudioEchoContext *s = ctx->priv;
  202. float volume = 1.0;
  203. int i;
  204. for (i = 0; i < s->nb_echoes; i++) {
  205. s->samples[i] = s->delay[i] * outlink->sample_rate / 1000.0;
  206. s->max_samples = FFMAX(s->max_samples, s->samples[i]);
  207. volume += s->decay[i];
  208. }
  209. if (s->max_samples <= 0) {
  210. av_log(ctx, AV_LOG_ERROR, "Nothing to echo - missing delay samples.\n");
  211. return AVERROR(EINVAL);
  212. }
  213. s->fade_out = s->max_samples;
  214. if (volume * s->in_gain * s->out_gain > 1.0)
  215. av_log(ctx, AV_LOG_WARNING,
  216. "out_gain %f can cause saturation of output\n", s->out_gain);
  217. switch (outlink->format) {
  218. case AV_SAMPLE_FMT_DBLP: s->echo_samples = echo_samples_dblp; break;
  219. case AV_SAMPLE_FMT_FLTP: s->echo_samples = echo_samples_fltp; break;
  220. case AV_SAMPLE_FMT_S16P: s->echo_samples = echo_samples_s16p; break;
  221. case AV_SAMPLE_FMT_S32P: s->echo_samples = echo_samples_s32p; break;
  222. }
  223. if (s->delayptrs)
  224. av_freep(&s->delayptrs[0]);
  225. av_freep(&s->delayptrs);
  226. return av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
  227. outlink->channels,
  228. s->max_samples,
  229. outlink->format, 0);
  230. }
  231. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  232. {
  233. AVFilterContext *ctx = inlink->dst;
  234. AudioEchoContext *s = ctx->priv;
  235. AVFrame *out_frame;
  236. if (av_frame_is_writable(frame)) {
  237. out_frame = frame;
  238. } else {
  239. out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
  240. if (!out_frame) {
  241. av_frame_free(&frame);
  242. return AVERROR(ENOMEM);
  243. }
  244. av_frame_copy_props(out_frame, frame);
  245. }
  246. s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data,
  247. frame->nb_samples, inlink->channels);
  248. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
  249. if (frame != out_frame)
  250. av_frame_free(&frame);
  251. return ff_filter_frame(ctx->outputs[0], out_frame);
  252. }
  253. static int request_frame(AVFilterLink *outlink)
  254. {
  255. AVFilterContext *ctx = outlink->src;
  256. AudioEchoContext *s = ctx->priv;
  257. int ret;
  258. ret = ff_request_frame(ctx->inputs[0]);
  259. if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
  260. int nb_samples = FFMIN(s->fade_out, 2048);
  261. AVFrame *frame;
  262. frame = ff_get_audio_buffer(outlink, nb_samples);
  263. if (!frame)
  264. return AVERROR(ENOMEM);
  265. s->fade_out -= nb_samples;
  266. av_samples_set_silence(frame->extended_data, 0,
  267. frame->nb_samples,
  268. outlink->channels,
  269. frame->format);
  270. s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
  271. frame->nb_samples, outlink->channels);
  272. frame->pts = s->next_pts;
  273. if (s->next_pts != AV_NOPTS_VALUE)
  274. s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  275. return ff_filter_frame(outlink, frame);
  276. }
  277. return ret;
  278. }
  279. static const AVFilterPad aecho_inputs[] = {
  280. {
  281. .name = "default",
  282. .type = AVMEDIA_TYPE_AUDIO,
  283. .filter_frame = filter_frame,
  284. },
  285. { NULL }
  286. };
  287. static const AVFilterPad aecho_outputs[] = {
  288. {
  289. .name = "default",
  290. .request_frame = request_frame,
  291. .config_props = config_output,
  292. .type = AVMEDIA_TYPE_AUDIO,
  293. },
  294. { NULL }
  295. };
  296. AVFilter ff_af_aecho = {
  297. .name = "aecho",
  298. .description = NULL_IF_CONFIG_SMALL("Add echoing to the audio."),
  299. .query_formats = query_formats,
  300. .priv_size = sizeof(AudioEchoContext),
  301. .priv_class = &aecho_class,
  302. .init = init,
  303. .uninit = uninit,
  304. .inputs = aecho_inputs,
  305. .outputs = aecho_outputs,
  306. };