You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

364 lines
12KB

  1. /*
  2. * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Audio merging filter
  23. */
  24. #include "libavutil/avstring.h"
  25. #include "libavutil/bprint.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "avfilter.h"
  29. #include "filters.h"
  30. #include "audio.h"
  31. #include "internal.h"
  32. #define SWR_CH_MAX 64
  33. typedef struct AMergeContext {
  34. const AVClass *class;
  35. int nb_inputs;
  36. int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
  37. int bps;
  38. struct amerge_input {
  39. int nb_ch; /**< number of channels for the input */
  40. } *in;
  41. } AMergeContext;
  42. #define OFFSET(x) offsetof(AMergeContext, x)
  43. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  44. static const AVOption amerge_options[] = {
  45. { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
  46. AV_OPT_TYPE_INT, { .i64 = 2 }, 1, SWR_CH_MAX, FLAGS },
  47. { NULL }
  48. };
  49. AVFILTER_DEFINE_CLASS(amerge);
  50. static av_cold void uninit(AVFilterContext *ctx)
  51. {
  52. AMergeContext *s = ctx->priv;
  53. av_freep(&s->in);
  54. for (unsigned i = 0; i < ctx->nb_inputs; i++)
  55. av_freep(&ctx->input_pads[i].name);
  56. }
  57. static int query_formats(AVFilterContext *ctx)
  58. {
  59. static const enum AVSampleFormat packed_sample_fmts[] = {
  60. AV_SAMPLE_FMT_U8,
  61. AV_SAMPLE_FMT_S16,
  62. AV_SAMPLE_FMT_S32,
  63. AV_SAMPLE_FMT_FLT,
  64. AV_SAMPLE_FMT_DBL,
  65. AV_SAMPLE_FMT_NONE
  66. };
  67. AMergeContext *s = ctx->priv;
  68. int64_t inlayout[SWR_CH_MAX], outlayout = 0;
  69. AVFilterFormats *formats;
  70. AVFilterChannelLayouts *layouts;
  71. int i, ret, overlap = 0, nb_ch = 0;
  72. for (i = 0; i < s->nb_inputs; i++) {
  73. if (!ctx->inputs[i]->incfg.channel_layouts ||
  74. !ctx->inputs[i]->incfg.channel_layouts->nb_channel_layouts) {
  75. av_log(ctx, AV_LOG_WARNING,
  76. "No channel layout for input %d\n", i + 1);
  77. return AVERROR(EAGAIN);
  78. }
  79. inlayout[i] = ctx->inputs[i]->incfg.channel_layouts->channel_layouts[0];
  80. if (ctx->inputs[i]->incfg.channel_layouts->nb_channel_layouts > 1) {
  81. char buf[256];
  82. av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
  83. av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
  84. }
  85. s->in[i].nb_ch = FF_LAYOUT2COUNT(inlayout[i]);
  86. if (s->in[i].nb_ch) {
  87. overlap++;
  88. } else {
  89. s->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
  90. if (outlayout & inlayout[i])
  91. overlap++;
  92. outlayout |= inlayout[i];
  93. }
  94. nb_ch += s->in[i].nb_ch;
  95. }
  96. if (nb_ch > SWR_CH_MAX) {
  97. av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
  98. return AVERROR(EINVAL);
  99. }
  100. if (overlap) {
  101. av_log(ctx, AV_LOG_WARNING,
  102. "Input channel layouts overlap: "
  103. "output layout will be determined by the number of distinct input channels\n");
  104. for (i = 0; i < nb_ch; i++)
  105. s->route[i] = i;
  106. outlayout = av_get_default_channel_layout(nb_ch);
  107. if (!outlayout && nb_ch)
  108. outlayout = 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch);
  109. } else {
  110. int *route[SWR_CH_MAX];
  111. int c, out_ch_number = 0;
  112. route[0] = s->route;
  113. for (i = 1; i < s->nb_inputs; i++)
  114. route[i] = route[i - 1] + s->in[i - 1].nb_ch;
  115. for (c = 0; c < 64; c++)
  116. for (i = 0; i < s->nb_inputs; i++)
  117. if ((inlayout[i] >> c) & 1)
  118. *(route[i]++) = out_ch_number++;
  119. }
  120. formats = ff_make_format_list(packed_sample_fmts);
  121. if ((ret = ff_set_common_formats(ctx, formats)) < 0)
  122. return ret;
  123. for (i = 0; i < s->nb_inputs; i++) {
  124. layouts = NULL;
  125. if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
  126. return ret;
  127. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
  128. return ret;
  129. }
  130. layouts = NULL;
  131. if ((ret = ff_add_channel_layout(&layouts, outlayout)) < 0)
  132. return ret;
  133. if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
  134. return ret;
  135. return ff_set_common_samplerates(ctx, ff_all_samplerates());
  136. }
  137. static int config_output(AVFilterLink *outlink)
  138. {
  139. AVFilterContext *ctx = outlink->src;
  140. AMergeContext *s = ctx->priv;
  141. AVBPrint bp;
  142. int i;
  143. for (i = 1; i < s->nb_inputs; i++) {
  144. if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
  145. av_log(ctx, AV_LOG_ERROR,
  146. "Inputs must have the same sample rate "
  147. "%d for in%d vs %d\n",
  148. ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
  149. return AVERROR(EINVAL);
  150. }
  151. }
  152. s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
  153. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  154. outlink->time_base = ctx->inputs[0]->time_base;
  155. av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
  156. for (i = 0; i < s->nb_inputs; i++) {
  157. av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
  158. av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
  159. }
  160. av_bprintf(&bp, " -> out:");
  161. av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
  162. av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
  163. return 0;
  164. }
  165. /**
  166. * Copy samples from several input streams to one output stream.
  167. * @param nb_inputs number of inputs
  168. * @param in inputs; used only for the nb_ch field;
  169. * @param route routing values;
  170. * input channel i goes to output channel route[i];
  171. * i < in[0].nb_ch are the channels from the first output;
  172. * i >= in[0].nb_ch are the channels from the second output
  173. * @param ins pointer to the samples of each inputs, in packed format;
  174. * will be left at the end of the copied samples
  175. * @param outs pointer to the samples of the output, in packet format;
  176. * must point to a buffer big enough;
  177. * will be left at the end of the copied samples
  178. * @param ns number of samples to copy
  179. * @param bps bytes per sample
  180. */
  181. static inline void copy_samples(int nb_inputs, struct amerge_input in[],
  182. int *route, uint8_t *ins[],
  183. uint8_t **outs, int ns, int bps)
  184. {
  185. int *route_cur;
  186. int i, c, nb_ch = 0;
  187. for (i = 0; i < nb_inputs; i++)
  188. nb_ch += in[i].nb_ch;
  189. while (ns--) {
  190. route_cur = route;
  191. for (i = 0; i < nb_inputs; i++) {
  192. for (c = 0; c < in[i].nb_ch; c++) {
  193. memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
  194. ins[i] += bps;
  195. }
  196. }
  197. *outs += nb_ch * bps;
  198. }
  199. }
  200. static void free_frames(int nb_inputs, AVFrame **input_frames)
  201. {
  202. int i;
  203. for (i = 0; i < nb_inputs; i++)
  204. av_frame_free(&input_frames[i]);
  205. }
  206. static int try_push_frame(AVFilterContext *ctx, int nb_samples)
  207. {
  208. AMergeContext *s = ctx->priv;
  209. AVFilterLink *outlink = ctx->outputs[0];
  210. int i, ret;
  211. AVFrame *outbuf, *inbuf[SWR_CH_MAX] = { NULL };
  212. uint8_t *outs, *ins[SWR_CH_MAX];
  213. for (i = 0; i < ctx->nb_inputs; i++) {
  214. ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &inbuf[i]);
  215. if (ret < 0) {
  216. free_frames(i, inbuf);
  217. return ret;
  218. }
  219. ins[i] = inbuf[i]->data[0];
  220. }
  221. outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
  222. if (!outbuf) {
  223. free_frames(s->nb_inputs, inbuf);
  224. return AVERROR(ENOMEM);
  225. }
  226. outs = outbuf->data[0];
  227. outbuf->pts = inbuf[0]->pts;
  228. outbuf->nb_samples = nb_samples;
  229. outbuf->channel_layout = outlink->channel_layout;
  230. outbuf->channels = outlink->channels;
  231. while (nb_samples) {
  232. /* Unroll the most common sample formats: speed +~350% for the loop,
  233. +~13% overall (including two common decoders) */
  234. switch (s->bps) {
  235. case 1:
  236. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 1);
  237. break;
  238. case 2:
  239. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 2);
  240. break;
  241. case 4:
  242. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 4);
  243. break;
  244. default:
  245. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, s->bps);
  246. break;
  247. }
  248. nb_samples = 0;
  249. }
  250. free_frames(s->nb_inputs, inbuf);
  251. return ff_filter_frame(ctx->outputs[0], outbuf);
  252. }
  253. static int activate(AVFilterContext *ctx)
  254. {
  255. int i, status;
  256. int ret, nb_samples;
  257. int64_t pts;
  258. FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
  259. nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
  260. for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
  261. nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[i]), nb_samples);
  262. }
  263. if (nb_samples) {
  264. ret = try_push_frame(ctx, nb_samples);
  265. if (ret < 0)
  266. return ret;
  267. }
  268. for (i = 0; i < ctx->nb_inputs; i++) {
  269. if (ff_inlink_queued_samples(ctx->inputs[i]))
  270. continue;
  271. if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
  272. ff_outlink_set_status(ctx->outputs[0], status, pts);
  273. return 0;
  274. } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
  275. ff_inlink_request_frame(ctx->inputs[i]);
  276. return 0;
  277. }
  278. }
  279. return 0;
  280. }
  281. static av_cold int init(AVFilterContext *ctx)
  282. {
  283. AMergeContext *s = ctx->priv;
  284. int i, ret;
  285. s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
  286. if (!s->in)
  287. return AVERROR(ENOMEM);
  288. for (i = 0; i < s->nb_inputs; i++) {
  289. char *name = av_asprintf("in%d", i);
  290. AVFilterPad pad = {
  291. .name = name,
  292. .type = AVMEDIA_TYPE_AUDIO,
  293. };
  294. if (!name)
  295. return AVERROR(ENOMEM);
  296. if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
  297. av_freep(&pad.name);
  298. return ret;
  299. }
  300. }
  301. return 0;
  302. }
  303. static const AVFilterPad amerge_outputs[] = {
  304. {
  305. .name = "default",
  306. .type = AVMEDIA_TYPE_AUDIO,
  307. .config_props = config_output,
  308. },
  309. { NULL }
  310. };
  311. AVFilter ff_af_amerge = {
  312. .name = "amerge",
  313. .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
  314. "a single multi-channel stream."),
  315. .priv_size = sizeof(AMergeContext),
  316. .init = init,
  317. .uninit = uninit,
  318. .query_formats = query_formats,
  319. .activate = activate,
  320. .inputs = NULL,
  321. .outputs = amerge_outputs,
  322. .priv_class = &amerge_class,
  323. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  324. };