You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

360 lines
11KB

  1. /*
  2. * Copyright (c) 2013 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avstring.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/opt.h"
  23. #include "libavutil/samplefmt.h"
  24. #include "avfilter.h"
  25. #include "audio.h"
  26. #include "filters.h"
  27. #include "internal.h"
  28. typedef struct ChanDelay {
  29. int delay;
  30. unsigned delay_index;
  31. unsigned index;
  32. uint8_t *samples;
  33. } ChanDelay;
  34. typedef struct AudioDelayContext {
  35. const AVClass *class;
  36. int all;
  37. char *delays;
  38. ChanDelay *chandelay;
  39. int nb_delays;
  40. int block_align;
  41. int64_t padding;
  42. int64_t max_delay;
  43. int64_t next_pts;
  44. int eof;
  45. void (*delay_channel)(ChanDelay *d, int nb_samples,
  46. const uint8_t *src, uint8_t *dst);
  47. } AudioDelayContext;
  48. #define OFFSET(x) offsetof(AudioDelayContext, x)
  49. #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  50. static const AVOption adelay_options[] = {
  51. { "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
  52. { "all", "use last available delay for remained channels", OFFSET(all), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
  53. { NULL }
  54. };
  55. AVFILTER_DEFINE_CLASS(adelay);
  56. static int query_formats(AVFilterContext *ctx)
  57. {
  58. AVFilterChannelLayouts *layouts;
  59. AVFilterFormats *formats;
  60. static const enum AVSampleFormat sample_fmts[] = {
  61. AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
  62. AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
  63. AV_SAMPLE_FMT_NONE
  64. };
  65. int ret;
  66. layouts = ff_all_channel_counts();
  67. if (!layouts)
  68. return AVERROR(ENOMEM);
  69. ret = ff_set_common_channel_layouts(ctx, layouts);
  70. if (ret < 0)
  71. return ret;
  72. formats = ff_make_format_list(sample_fmts);
  73. if (!formats)
  74. return AVERROR(ENOMEM);
  75. ret = ff_set_common_formats(ctx, formats);
  76. if (ret < 0)
  77. return ret;
  78. formats = ff_all_samplerates();
  79. if (!formats)
  80. return AVERROR(ENOMEM);
  81. return ff_set_common_samplerates(ctx, formats);
  82. }
  83. #define DELAY(name, type, fill) \
  84. static void delay_channel_## name ##p(ChanDelay *d, int nb_samples, \
  85. const uint8_t *ssrc, uint8_t *ddst) \
  86. { \
  87. const type *src = (type *)ssrc; \
  88. type *dst = (type *)ddst; \
  89. type *samples = (type *)d->samples; \
  90. \
  91. while (nb_samples) { \
  92. if (d->delay_index < d->delay) { \
  93. const int len = FFMIN(nb_samples, d->delay - d->delay_index); \
  94. \
  95. memcpy(&samples[d->delay_index], src, len * sizeof(type)); \
  96. memset(dst, fill, len * sizeof(type)); \
  97. d->delay_index += len; \
  98. src += len; \
  99. dst += len; \
  100. nb_samples -= len; \
  101. } else { \
  102. *dst = samples[d->index]; \
  103. samples[d->index] = *src; \
  104. nb_samples--; \
  105. d->index++; \
  106. src++, dst++; \
  107. d->index = d->index >= d->delay ? 0 : d->index; \
  108. } \
  109. } \
  110. }
  111. DELAY(u8, uint8_t, 0x80)
  112. DELAY(s16, int16_t, 0)
  113. DELAY(s32, int32_t, 0)
  114. DELAY(flt, float, 0)
  115. DELAY(dbl, double, 0)
  116. static int config_input(AVFilterLink *inlink)
  117. {
  118. AVFilterContext *ctx = inlink->dst;
  119. AudioDelayContext *s = ctx->priv;
  120. char *p, *arg, *saveptr = NULL;
  121. int i;
  122. s->chandelay = av_calloc(inlink->channels, sizeof(*s->chandelay));
  123. if (!s->chandelay)
  124. return AVERROR(ENOMEM);
  125. s->nb_delays = inlink->channels;
  126. s->block_align = av_get_bytes_per_sample(inlink->format);
  127. p = s->delays;
  128. for (i = 0; i < s->nb_delays; i++) {
  129. ChanDelay *d = &s->chandelay[i];
  130. float delay, div;
  131. char type = 0;
  132. int ret;
  133. if (!(arg = av_strtok(p, "|", &saveptr)))
  134. break;
  135. p = NULL;
  136. ret = av_sscanf(arg, "%d%c", &d->delay, &type);
  137. if (ret != 2 || type != 'S') {
  138. div = type == 's' ? 1.0 : 1000.0;
  139. if (av_sscanf(arg, "%f", &delay) != 1) {
  140. av_log(ctx, AV_LOG_ERROR, "Invalid syntax for delay.\n");
  141. return AVERROR(EINVAL);
  142. }
  143. d->delay = delay * inlink->sample_rate / div;
  144. }
  145. if (d->delay < 0) {
  146. av_log(ctx, AV_LOG_ERROR, "Delay must be non negative number.\n");
  147. return AVERROR(EINVAL);
  148. }
  149. }
  150. if (s->all && i) {
  151. for (int j = i; j < s->nb_delays; j++)
  152. s->chandelay[j].delay = s->chandelay[i-1].delay;
  153. }
  154. s->padding = s->chandelay[0].delay;
  155. for (i = 1; i < s->nb_delays; i++) {
  156. ChanDelay *d = &s->chandelay[i];
  157. s->padding = FFMIN(s->padding, d->delay);
  158. }
  159. if (s->padding) {
  160. for (i = 0; i < s->nb_delays; i++) {
  161. ChanDelay *d = &s->chandelay[i];
  162. d->delay -= s->padding;
  163. }
  164. }
  165. for (i = 0; i < s->nb_delays; i++) {
  166. ChanDelay *d = &s->chandelay[i];
  167. if (!d->delay)
  168. continue;
  169. d->samples = av_malloc_array(d->delay, s->block_align);
  170. if (!d->samples)
  171. return AVERROR(ENOMEM);
  172. s->max_delay = FFMAX(s->max_delay, d->delay);
  173. }
  174. switch (inlink->format) {
  175. case AV_SAMPLE_FMT_U8P : s->delay_channel = delay_channel_u8p ; break;
  176. case AV_SAMPLE_FMT_S16P: s->delay_channel = delay_channel_s16p; break;
  177. case AV_SAMPLE_FMT_S32P: s->delay_channel = delay_channel_s32p; break;
  178. case AV_SAMPLE_FMT_FLTP: s->delay_channel = delay_channel_fltp; break;
  179. case AV_SAMPLE_FMT_DBLP: s->delay_channel = delay_channel_dblp; break;
  180. }
  181. return 0;
  182. }
  183. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  184. {
  185. AVFilterContext *ctx = inlink->dst;
  186. AudioDelayContext *s = ctx->priv;
  187. AVFrame *out_frame;
  188. int i;
  189. if (ctx->is_disabled || !s->delays)
  190. return ff_filter_frame(ctx->outputs[0], frame);
  191. out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples);
  192. if (!out_frame) {
  193. av_frame_free(&frame);
  194. return AVERROR(ENOMEM);
  195. }
  196. av_frame_copy_props(out_frame, frame);
  197. for (i = 0; i < s->nb_delays; i++) {
  198. ChanDelay *d = &s->chandelay[i];
  199. const uint8_t *src = frame->extended_data[i];
  200. uint8_t *dst = out_frame->extended_data[i];
  201. if (!d->delay)
  202. memcpy(dst, src, frame->nb_samples * s->block_align);
  203. else
  204. s->delay_channel(d, frame->nb_samples, src, dst);
  205. }
  206. out_frame->pts = s->next_pts;
  207. s->next_pts += av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
  208. av_frame_free(&frame);
  209. return ff_filter_frame(ctx->outputs[0], out_frame);
  210. }
  211. static int activate(AVFilterContext *ctx)
  212. {
  213. AVFilterLink *inlink = ctx->inputs[0];
  214. AVFilterLink *outlink = ctx->outputs[0];
  215. AudioDelayContext *s = ctx->priv;
  216. AVFrame *frame = NULL;
  217. int ret, status;
  218. int64_t pts;
  219. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  220. if (s->padding) {
  221. int nb_samples = FFMIN(s->padding, 2048);
  222. frame = ff_get_audio_buffer(outlink, nb_samples);
  223. if (!frame)
  224. return AVERROR(ENOMEM);
  225. s->padding -= nb_samples;
  226. av_samples_set_silence(frame->extended_data, 0,
  227. frame->nb_samples,
  228. outlink->channels,
  229. frame->format);
  230. frame->pts = s->next_pts;
  231. if (s->next_pts != AV_NOPTS_VALUE)
  232. s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  233. return ff_filter_frame(outlink, frame);
  234. }
  235. ret = ff_inlink_consume_frame(inlink, &frame);
  236. if (ret < 0)
  237. return ret;
  238. if (ret > 0)
  239. return filter_frame(inlink, frame);
  240. if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
  241. if (status == AVERROR_EOF)
  242. s->eof = 1;
  243. }
  244. if (s->eof && s->max_delay) {
  245. int nb_samples = FFMIN(s->max_delay, 2048);
  246. frame = ff_get_audio_buffer(outlink, nb_samples);
  247. if (!frame)
  248. return AVERROR(ENOMEM);
  249. s->max_delay -= nb_samples;
  250. av_samples_set_silence(frame->extended_data, 0,
  251. frame->nb_samples,
  252. outlink->channels,
  253. frame->format);
  254. frame->pts = s->next_pts;
  255. return filter_frame(inlink, frame);
  256. }
  257. if (s->eof && s->max_delay == 0) {
  258. ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
  259. return 0;
  260. }
  261. if (!s->eof)
  262. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  263. return FFERROR_NOT_READY;
  264. }
  265. static av_cold void uninit(AVFilterContext *ctx)
  266. {
  267. AudioDelayContext *s = ctx->priv;
  268. if (s->chandelay) {
  269. for (int i = 0; i < s->nb_delays; i++)
  270. av_freep(&s->chandelay[i].samples);
  271. }
  272. av_freep(&s->chandelay);
  273. }
  274. static const AVFilterPad adelay_inputs[] = {
  275. {
  276. .name = "default",
  277. .type = AVMEDIA_TYPE_AUDIO,
  278. .config_props = config_input,
  279. },
  280. { NULL }
  281. };
  282. static const AVFilterPad adelay_outputs[] = {
  283. {
  284. .name = "default",
  285. .type = AVMEDIA_TYPE_AUDIO,
  286. },
  287. { NULL }
  288. };
  289. AVFilter ff_af_adelay = {
  290. .name = "adelay",
  291. .description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
  292. .query_formats = query_formats,
  293. .priv_size = sizeof(AudioDelayContext),
  294. .priv_class = &adelay_class,
  295. .activate = activate,
  296. .uninit = uninit,
  297. .inputs = adelay_inputs,
  298. .outputs = adelay_outputs,
  299. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
  300. };