You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

531 lines
16KB

  1. /*
  2. *
  3. * This file is part of Libav.
  4. *
  5. * Libav is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2.1 of the License, or (at your option) any later version.
  9. *
  10. * Libav is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with Libav; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. /**
  20. * @file
  21. * Audio join filter
  22. *
  23. * Join multiple audio inputs as different channels in
  24. * a single output
  25. */
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/common.h"
  29. #include "libavutil/opt.h"
  30. #include "audio.h"
  31. #include "avfilter.h"
  32. #include "formats.h"
  33. #include "internal.h"
  34. typedef struct ChannelMap {
  35. int input; ///< input stream index
  36. int in_channel_idx; ///< index of in_channel in the input stream data
  37. uint64_t in_channel; ///< layout describing the input channel
  38. uint64_t out_channel; ///< layout describing the output channel
  39. } ChannelMap;
  40. typedef struct JoinContext {
  41. const AVClass *class;
  42. int inputs;
  43. char *map;
  44. char *channel_layout_str;
  45. uint64_t channel_layout;
  46. int nb_channels;
  47. ChannelMap *channels;
  48. /**
  49. * Temporary storage for input frames, until we get one on each input.
  50. */
  51. AVFrame **input_frames;
  52. /**
  53. * Temporary storage for buffer references, for assembling the output frame.
  54. */
  55. AVBufferRef **buffers;
  56. } JoinContext;
  57. #define OFFSET(x) offsetof(JoinContext, x)
  58. #define A AV_OPT_FLAG_AUDIO_PARAM
  59. #define F AV_OPT_FLAG_FILTERING_PARAM
  60. static const AVOption join_options[] = {
  61. { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
  62. { "channel_layout", "Channel layout of the "
  63. "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
  64. { "map", "A comma-separated list of channels maps in the format "
  65. "'input_stream.input_channel-output_channel.",
  66. OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
  67. { NULL },
  68. };
  69. static const AVClass join_class = {
  70. .class_name = "join filter",
  71. .item_name = av_default_item_name,
  72. .option = join_options,
  73. .version = LIBAVUTIL_VERSION_INT,
  74. };
  75. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  76. {
  77. AVFilterContext *ctx = link->dst;
  78. JoinContext *s = ctx->priv;
  79. int i;
  80. for (i = 0; i < ctx->nb_inputs; i++)
  81. if (link == ctx->inputs[i])
  82. break;
  83. av_assert0(i < ctx->nb_inputs);
  84. av_assert0(!s->input_frames[i]);
  85. s->input_frames[i] = frame;
  86. return 0;
  87. }
  88. static int parse_maps(AVFilterContext *ctx)
  89. {
  90. JoinContext *s = ctx->priv;
  91. char separator = '|';
  92. char *cur = s->map;
  93. #if FF_API_OLD_FILTER_OPTS
  94. if (cur && strchr(cur, ',')) {
  95. av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "
  96. "separate the mappings.\n");
  97. separator = ',';
  98. }
  99. #endif
  100. while (cur && *cur) {
  101. char *sep, *next, *p;
  102. uint64_t in_channel = 0, out_channel = 0;
  103. int input_idx, out_ch_idx, in_ch_idx;
  104. next = strchr(cur, separator);
  105. if (next)
  106. *next++ = 0;
  107. /* split the map into input and output parts */
  108. if (!(sep = strchr(cur, '-'))) {
  109. av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
  110. "map '%s'\n", cur);
  111. return AVERROR(EINVAL);
  112. }
  113. *sep++ = 0;
  114. #define PARSE_CHANNEL(str, var, inout) \
  115. if (!(var = av_get_channel_layout(str))) { \
  116. av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
  117. return AVERROR(EINVAL); \
  118. } \
  119. if (av_get_channel_layout_nb_channels(var) != 1) { \
  120. av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
  121. inout " channel.\n"); \
  122. return AVERROR(EINVAL); \
  123. }
  124. /* parse output channel */
  125. PARSE_CHANNEL(sep, out_channel, "output");
  126. if (!(out_channel & s->channel_layout)) {
  127. av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
  128. "requested channel layout.\n", sep);
  129. return AVERROR(EINVAL);
  130. }
  131. out_ch_idx = av_get_channel_layout_channel_index(s->channel_layout,
  132. out_channel);
  133. if (s->channels[out_ch_idx].input >= 0) {
  134. av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
  135. "'%s'.\n", sep);
  136. return AVERROR(EINVAL);
  137. }
  138. /* parse input channel */
  139. input_idx = strtol(cur, &cur, 0);
  140. if (input_idx < 0 || input_idx >= s->inputs) {
  141. av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
  142. input_idx);
  143. return AVERROR(EINVAL);
  144. }
  145. if (*cur)
  146. cur++;
  147. in_ch_idx = strtol(cur, &p, 0);
  148. if (p == cur) {
  149. /* channel specifier is not a number,
  150. * try to parse as channel name */
  151. PARSE_CHANNEL(cur, in_channel, "input");
  152. }
  153. s->channels[out_ch_idx].input = input_idx;
  154. if (in_channel)
  155. s->channels[out_ch_idx].in_channel = in_channel;
  156. else
  157. s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
  158. cur = next;
  159. }
  160. return 0;
  161. }
  162. static int join_init(AVFilterContext *ctx)
  163. {
  164. JoinContext *s = ctx->priv;
  165. int ret, i;
  166. if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
  167. av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
  168. s->channel_layout_str);
  169. ret = AVERROR(EINVAL);
  170. goto fail;
  171. }
  172. s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
  173. s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
  174. s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
  175. s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
  176. if (!s->channels || !s->buffers|| !s->input_frames) {
  177. ret = AVERROR(ENOMEM);
  178. goto fail;
  179. }
  180. for (i = 0; i < s->nb_channels; i++) {
  181. s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
  182. s->channels[i].input = -1;
  183. }
  184. if ((ret = parse_maps(ctx)) < 0)
  185. goto fail;
  186. for (i = 0; i < s->inputs; i++) {
  187. char name[32];
  188. AVFilterPad pad = { 0 };
  189. snprintf(name, sizeof(name), "input%d", i);
  190. pad.type = AVMEDIA_TYPE_AUDIO;
  191. pad.name = av_strdup(name);
  192. pad.filter_frame = filter_frame;
  193. pad.needs_fifo = 1;
  194. ff_insert_inpad(ctx, i, &pad);
  195. }
  196. fail:
  197. av_opt_free(s);
  198. return ret;
  199. }
  200. static void join_uninit(AVFilterContext *ctx)
  201. {
  202. JoinContext *s = ctx->priv;
  203. int i;
  204. for (i = 0; i < ctx->nb_inputs; i++) {
  205. av_freep(&ctx->input_pads[i].name);
  206. av_frame_free(&s->input_frames[i]);
  207. }
  208. av_freep(&s->channels);
  209. av_freep(&s->buffers);
  210. av_freep(&s->input_frames);
  211. }
  212. static int join_query_formats(AVFilterContext *ctx)
  213. {
  214. JoinContext *s = ctx->priv;
  215. AVFilterChannelLayouts *layouts = NULL;
  216. int i;
  217. ff_add_channel_layout(&layouts, s->channel_layout);
  218. ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
  219. for (i = 0; i < ctx->nb_inputs; i++)
  220. ff_channel_layouts_ref(ff_all_channel_layouts(),
  221. &ctx->inputs[i]->out_channel_layouts);
  222. ff_set_common_formats (ctx, ff_planar_sample_fmts());
  223. ff_set_common_samplerates(ctx, ff_all_samplerates());
  224. return 0;
  225. }
  226. static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch,
  227. uint64_t *inputs)
  228. {
  229. int i;
  230. for (i = 0; i < ctx->nb_inputs; i++) {
  231. AVFilterLink *link = ctx->inputs[i];
  232. if (ch->out_channel & link->channel_layout &&
  233. !(ch->out_channel & inputs[i])) {
  234. ch->input = i;
  235. ch->in_channel = ch->out_channel;
  236. inputs[i] |= ch->out_channel;
  237. return;
  238. }
  239. }
  240. }
  241. static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch,
  242. uint64_t *inputs)
  243. {
  244. int i;
  245. for (i = 0; i < ctx->nb_inputs; i++) {
  246. AVFilterLink *link = ctx->inputs[i];
  247. if ((inputs[i] & link->channel_layout) != link->channel_layout) {
  248. uint64_t unused = link->channel_layout & ~inputs[i];
  249. ch->input = i;
  250. ch->in_channel = av_channel_layout_extract_channel(unused, 0);
  251. inputs[i] |= ch->in_channel;
  252. return;
  253. }
  254. }
  255. }
  256. static int join_config_output(AVFilterLink *outlink)
  257. {
  258. AVFilterContext *ctx = outlink->src;
  259. JoinContext *s = ctx->priv;
  260. uint64_t *inputs; // nth element tracks which channels are used from nth input
  261. int i, ret = 0;
  262. /* initialize inputs to user-specified mappings */
  263. if (!(inputs = av_mallocz(sizeof(*inputs) * ctx->nb_inputs)))
  264. return AVERROR(ENOMEM);
  265. for (i = 0; i < s->nb_channels; i++) {
  266. ChannelMap *ch = &s->channels[i];
  267. AVFilterLink *inlink;
  268. if (ch->input < 0)
  269. continue;
  270. inlink = ctx->inputs[ch->input];
  271. if (!ch->in_channel)
  272. ch->in_channel = av_channel_layout_extract_channel(inlink->channel_layout,
  273. ch->in_channel_idx);
  274. if (!(ch->in_channel & inlink->channel_layout)) {
  275. av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
  276. "input stream #%d.\n", av_get_channel_name(ch->in_channel),
  277. ch->input);
  278. ret = AVERROR(EINVAL);
  279. goto fail;
  280. }
  281. inputs[ch->input] |= ch->in_channel;
  282. }
  283. /* guess channel maps when not explicitly defined */
  284. /* first try unused matching channels */
  285. for (i = 0; i < s->nb_channels; i++) {
  286. ChannelMap *ch = &s->channels[i];
  287. if (ch->input < 0)
  288. guess_map_matching(ctx, ch, inputs);
  289. }
  290. /* if the above failed, try to find _any_ unused input channel */
  291. for (i = 0; i < s->nb_channels; i++) {
  292. ChannelMap *ch = &s->channels[i];
  293. if (ch->input < 0)
  294. guess_map_any(ctx, ch, inputs);
  295. if (ch->input < 0) {
  296. av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
  297. "output channel '%s'.\n",
  298. av_get_channel_name(ch->out_channel));
  299. goto fail;
  300. }
  301. ch->in_channel_idx = av_get_channel_layout_channel_index(ctx->inputs[ch->input]->channel_layout,
  302. ch->in_channel);
  303. }
  304. /* print mappings */
  305. av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
  306. for (i = 0; i < s->nb_channels; i++) {
  307. ChannelMap *ch = &s->channels[i];
  308. av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
  309. av_get_channel_name(ch->in_channel),
  310. av_get_channel_name(ch->out_channel));
  311. }
  312. av_log(ctx, AV_LOG_VERBOSE, "\n");
  313. for (i = 0; i < ctx->nb_inputs; i++) {
  314. if (!inputs[i])
  315. av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
  316. "stream %d.\n", i);
  317. }
  318. fail:
  319. av_freep(&inputs);
  320. return ret;
  321. }
  322. static int join_request_frame(AVFilterLink *outlink)
  323. {
  324. AVFilterContext *ctx = outlink->src;
  325. JoinContext *s = ctx->priv;
  326. AVFrame *frame;
  327. int linesize = INT_MAX;
  328. int nb_samples = 0;
  329. int nb_buffers = 0;
  330. int i, j, ret;
  331. /* get a frame on each input */
  332. for (i = 0; i < ctx->nb_inputs; i++) {
  333. AVFilterLink *inlink = ctx->inputs[i];
  334. if (!s->input_frames[i] &&
  335. (ret = ff_request_frame(inlink)) < 0)
  336. return ret;
  337. /* request the same number of samples on all inputs */
  338. if (i == 0) {
  339. nb_samples = s->input_frames[0]->nb_samples;
  340. for (j = 1; !i && j < ctx->nb_inputs; j++)
  341. ctx->inputs[j]->request_samples = nb_samples;
  342. }
  343. }
  344. /* setup the output frame */
  345. frame = av_frame_alloc();
  346. if (!frame)
  347. return AVERROR(ENOMEM);
  348. if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
  349. frame->extended_data = av_mallocz(s->nb_channels *
  350. sizeof(*frame->extended_data));
  351. if (!frame->extended_data) {
  352. ret = AVERROR(ENOMEM);
  353. goto fail;
  354. }
  355. }
  356. /* copy the data pointers */
  357. for (i = 0; i < s->nb_channels; i++) {
  358. ChannelMap *ch = &s->channels[i];
  359. AVFrame *cur = s->input_frames[ch->input];
  360. AVBufferRef *buf;
  361. frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
  362. linesize = FFMIN(linesize, cur->linesize[0]);
  363. /* add the buffer where this plan is stored to the list if it's
  364. * not already there */
  365. buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
  366. if (!buf) {
  367. ret = AVERROR(EINVAL);
  368. goto fail;
  369. }
  370. for (j = 0; j < nb_buffers; j++)
  371. if (s->buffers[j]->buffer == buf->buffer)
  372. break;
  373. if (j == i)
  374. s->buffers[nb_buffers++] = buf;
  375. }
  376. /* create references to the buffers we copied to output */
  377. if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
  378. frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
  379. frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
  380. frame->nb_extended_buf);
  381. if (!frame->extended_buf) {
  382. frame->nb_extended_buf = 0;
  383. ret = AVERROR(ENOMEM);
  384. goto fail;
  385. }
  386. }
  387. for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
  388. frame->buf[i] = av_buffer_ref(s->buffers[i]);
  389. if (!frame->buf[i]) {
  390. ret = AVERROR(ENOMEM);
  391. goto fail;
  392. }
  393. }
  394. for (i = 0; i < frame->nb_extended_buf; i++) {
  395. frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
  396. FF_ARRAY_ELEMS(frame->buf)]);
  397. if (!frame->extended_buf[i]) {
  398. ret = AVERROR(ENOMEM);
  399. goto fail;
  400. }
  401. }
  402. frame->nb_samples = nb_samples;
  403. frame->channel_layout = outlink->channel_layout;
  404. av_frame_set_channels(frame, outlink->channels);
  405. frame->format = outlink->format;
  406. frame->sample_rate = outlink->sample_rate;
  407. frame->pts = s->input_frames[0]->pts;
  408. frame->linesize[0] = linesize;
  409. if (frame->data != frame->extended_data) {
  410. memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
  411. FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
  412. }
  413. ret = ff_filter_frame(outlink, frame);
  414. for (i = 0; i < ctx->nb_inputs; i++)
  415. av_frame_free(&s->input_frames[i]);
  416. return ret;
  417. fail:
  418. av_frame_free(&frame);
  419. return ret;
  420. }
  421. static const AVFilterPad avfilter_af_join_outputs[] = {
  422. {
  423. .name = "default",
  424. .type = AVMEDIA_TYPE_AUDIO,
  425. .config_props = join_config_output,
  426. .request_frame = join_request_frame,
  427. },
  428. { NULL }
  429. };
  430. AVFilter avfilter_af_join = {
  431. .name = "join",
  432. .description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
  433. "multi-channel output"),
  434. .priv_size = sizeof(JoinContext),
  435. .priv_class = &join_class,
  436. .init = join_init,
  437. .uninit = join_uninit,
  438. .query_formats = join_query_formats,
  439. .inputs = NULL,
  440. .outputs = avfilter_af_join_outputs,
  441. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  442. };