You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

523 lines
16KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Audio join filter
  21. *
  22. * Join multiple audio inputs as different channels in
  23. * a single output
  24. */
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/opt.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. typedef struct ChannelMap {
  34. int input; ///< input stream index
  35. int in_channel_idx; ///< index of in_channel in the input stream data
  36. uint64_t in_channel; ///< layout describing the input channel
  37. uint64_t out_channel; ///< layout describing the output channel
  38. } ChannelMap;
  39. typedef struct JoinContext {
  40. const AVClass *class;
  41. int inputs;
  42. char *map;
  43. char *channel_layout_str;
  44. uint64_t channel_layout;
  45. int nb_channels;
  46. ChannelMap *channels;
  47. /**
  48. * Temporary storage for input frames, until we get one on each input.
  49. */
  50. AVFrame **input_frames;
  51. /**
  52. * Temporary storage for buffer references, for assembling the output frame.
  53. */
  54. AVBufferRef **buffers;
  55. } JoinContext;
  56. #define OFFSET(x) offsetof(JoinContext, x)
  57. #define A AV_OPT_FLAG_AUDIO_PARAM
  58. #define F AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption join_options[] = {
  60. { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
  61. { "channel_layout", "Channel layout of the "
  62. "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
  63. { "map", "A comma-separated list of channels maps in the format "
  64. "'input_stream.input_channel-output_channel.",
  65. OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
  66. { NULL }
  67. };
  68. AVFILTER_DEFINE_CLASS(join);
  69. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  70. {
  71. AVFilterContext *ctx = link->dst;
  72. JoinContext *s = ctx->priv;
  73. int i;
  74. for (i = 0; i < ctx->nb_inputs; i++)
  75. if (link == ctx->inputs[i])
  76. break;
  77. av_assert0(i < ctx->nb_inputs);
  78. av_assert0(!s->input_frames[i]);
  79. s->input_frames[i] = frame;
  80. return 0;
  81. }
  82. static int parse_maps(AVFilterContext *ctx)
  83. {
  84. JoinContext *s = ctx->priv;
  85. char separator = '|';
  86. char *cur = s->map;
  87. #if FF_API_OLD_FILTER_OPTS
  88. if (cur && strchr(cur, ',')) {
  89. av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "
  90. "separate the mappings.\n");
  91. separator = ',';
  92. }
  93. #endif
  94. while (cur && *cur) {
  95. char *sep, *next, *p;
  96. uint64_t in_channel = 0, out_channel = 0;
  97. int input_idx, out_ch_idx, in_ch_idx;
  98. next = strchr(cur, separator);
  99. if (next)
  100. *next++ = 0;
  101. /* split the map into input and output parts */
  102. if (!(sep = strchr(cur, '-'))) {
  103. av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
  104. "map '%s'\n", cur);
  105. return AVERROR(EINVAL);
  106. }
  107. *sep++ = 0;
  108. #define PARSE_CHANNEL(str, var, inout) \
  109. if (!(var = av_get_channel_layout(str))) { \
  110. av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
  111. return AVERROR(EINVAL); \
  112. } \
  113. if (av_get_channel_layout_nb_channels(var) != 1) { \
  114. av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
  115. inout " channel.\n"); \
  116. return AVERROR(EINVAL); \
  117. }
  118. /* parse output channel */
  119. PARSE_CHANNEL(sep, out_channel, "output");
  120. if (!(out_channel & s->channel_layout)) {
  121. av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
  122. "requested channel layout.\n", sep);
  123. return AVERROR(EINVAL);
  124. }
  125. out_ch_idx = av_get_channel_layout_channel_index(s->channel_layout,
  126. out_channel);
  127. if (s->channels[out_ch_idx].input >= 0) {
  128. av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
  129. "'%s'.\n", sep);
  130. return AVERROR(EINVAL);
  131. }
  132. /* parse input channel */
  133. input_idx = strtol(cur, &cur, 0);
  134. if (input_idx < 0 || input_idx >= s->inputs) {
  135. av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
  136. input_idx);
  137. return AVERROR(EINVAL);
  138. }
  139. if (*cur)
  140. cur++;
  141. in_ch_idx = strtol(cur, &p, 0);
  142. if (p == cur) {
  143. /* channel specifier is not a number,
  144. * try to parse as channel name */
  145. PARSE_CHANNEL(cur, in_channel, "input");
  146. }
  147. s->channels[out_ch_idx].input = input_idx;
  148. if (in_channel)
  149. s->channels[out_ch_idx].in_channel = in_channel;
  150. else
  151. s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
  152. cur = next;
  153. }
  154. return 0;
  155. }
  156. static av_cold int join_init(AVFilterContext *ctx)
  157. {
  158. JoinContext *s = ctx->priv;
  159. int ret, i;
  160. if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
  161. av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
  162. s->channel_layout_str);
  163. return AVERROR(EINVAL);
  164. }
  165. s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
  166. s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels));
  167. s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers));
  168. s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames));
  169. if (!s->channels || !s->buffers|| !s->input_frames)
  170. return AVERROR(ENOMEM);
  171. for (i = 0; i < s->nb_channels; i++) {
  172. s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
  173. s->channels[i].input = -1;
  174. }
  175. if ((ret = parse_maps(ctx)) < 0)
  176. return ret;
  177. for (i = 0; i < s->inputs; i++) {
  178. char name[32];
  179. AVFilterPad pad = { 0 };
  180. snprintf(name, sizeof(name), "input%d", i);
  181. pad.type = AVMEDIA_TYPE_AUDIO;
  182. pad.name = av_strdup(name);
  183. if (!pad.name)
  184. return AVERROR(ENOMEM);
  185. pad.filter_frame = filter_frame;
  186. pad.needs_fifo = 1;
  187. ff_insert_inpad(ctx, i, &pad);
  188. }
  189. return 0;
  190. }
  191. static av_cold void join_uninit(AVFilterContext *ctx)
  192. {
  193. JoinContext *s = ctx->priv;
  194. int i;
  195. for (i = 0; i < ctx->nb_inputs; i++) {
  196. av_freep(&ctx->input_pads[i].name);
  197. av_frame_free(&s->input_frames[i]);
  198. }
  199. av_freep(&s->channels);
  200. av_freep(&s->buffers);
  201. av_freep(&s->input_frames);
  202. }
  203. static int join_query_formats(AVFilterContext *ctx)
  204. {
  205. JoinContext *s = ctx->priv;
  206. AVFilterChannelLayouts *layouts = NULL;
  207. int i, ret;
  208. if ((ret = ff_add_channel_layout(&layouts, s->channel_layout)) < 0 ||
  209. (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
  210. return ret;
  211. for (i = 0; i < ctx->nb_inputs; i++) {
  212. layouts = ff_all_channel_layouts();
  213. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
  214. return ret;
  215. }
  216. if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
  217. (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
  218. return ret;
  219. return 0;
  220. }
  221. static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch,
  222. uint64_t *inputs)
  223. {
  224. int i;
  225. for (i = 0; i < ctx->nb_inputs; i++) {
  226. AVFilterLink *link = ctx->inputs[i];
  227. if (ch->out_channel & link->channel_layout &&
  228. !(ch->out_channel & inputs[i])) {
  229. ch->input = i;
  230. ch->in_channel = ch->out_channel;
  231. inputs[i] |= ch->out_channel;
  232. return;
  233. }
  234. }
  235. }
  236. static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch,
  237. uint64_t *inputs)
  238. {
  239. int i;
  240. for (i = 0; i < ctx->nb_inputs; i++) {
  241. AVFilterLink *link = ctx->inputs[i];
  242. if ((inputs[i] & link->channel_layout) != link->channel_layout) {
  243. uint64_t unused = link->channel_layout & ~inputs[i];
  244. ch->input = i;
  245. ch->in_channel = av_channel_layout_extract_channel(unused, 0);
  246. inputs[i] |= ch->in_channel;
  247. return;
  248. }
  249. }
  250. }
  251. static int join_config_output(AVFilterLink *outlink)
  252. {
  253. AVFilterContext *ctx = outlink->src;
  254. JoinContext *s = ctx->priv;
  255. uint64_t *inputs; // nth element tracks which channels are used from nth input
  256. int i, ret = 0;
  257. /* initialize inputs to user-specified mappings */
  258. if (!(inputs = av_mallocz_array(ctx->nb_inputs, sizeof(*inputs))))
  259. return AVERROR(ENOMEM);
  260. for (i = 0; i < s->nb_channels; i++) {
  261. ChannelMap *ch = &s->channels[i];
  262. AVFilterLink *inlink;
  263. if (ch->input < 0)
  264. continue;
  265. inlink = ctx->inputs[ch->input];
  266. if (!ch->in_channel)
  267. ch->in_channel = av_channel_layout_extract_channel(inlink->channel_layout,
  268. ch->in_channel_idx);
  269. if (!(ch->in_channel & inlink->channel_layout)) {
  270. av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
  271. "input stream #%d.\n", av_get_channel_name(ch->in_channel),
  272. ch->input);
  273. ret = AVERROR(EINVAL);
  274. goto fail;
  275. }
  276. inputs[ch->input] |= ch->in_channel;
  277. }
  278. /* guess channel maps when not explicitly defined */
  279. /* first try unused matching channels */
  280. for (i = 0; i < s->nb_channels; i++) {
  281. ChannelMap *ch = &s->channels[i];
  282. if (ch->input < 0)
  283. guess_map_matching(ctx, ch, inputs);
  284. }
  285. /* if the above failed, try to find _any_ unused input channel */
  286. for (i = 0; i < s->nb_channels; i++) {
  287. ChannelMap *ch = &s->channels[i];
  288. if (ch->input < 0)
  289. guess_map_any(ctx, ch, inputs);
  290. if (ch->input < 0) {
  291. av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
  292. "output channel '%s'.\n",
  293. av_get_channel_name(ch->out_channel));
  294. goto fail;
  295. }
  296. ch->in_channel_idx = av_get_channel_layout_channel_index(ctx->inputs[ch->input]->channel_layout,
  297. ch->in_channel);
  298. }
  299. /* print mappings */
  300. av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
  301. for (i = 0; i < s->nb_channels; i++) {
  302. ChannelMap *ch = &s->channels[i];
  303. av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
  304. av_get_channel_name(ch->in_channel),
  305. av_get_channel_name(ch->out_channel));
  306. }
  307. av_log(ctx, AV_LOG_VERBOSE, "\n");
  308. for (i = 0; i < ctx->nb_inputs; i++) {
  309. if (!inputs[i])
  310. av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
  311. "stream %d.\n", i);
  312. }
  313. fail:
  314. av_freep(&inputs);
  315. return ret;
  316. }
  317. static int join_request_frame(AVFilterLink *outlink)
  318. {
  319. AVFilterContext *ctx = outlink->src;
  320. JoinContext *s = ctx->priv;
  321. AVFrame *frame;
  322. int linesize = INT_MAX;
  323. int nb_samples = 0;
  324. int nb_buffers = 0;
  325. int i, j, ret;
  326. /* get a frame on each input */
  327. for (i = 0; i < ctx->nb_inputs; i++) {
  328. AVFilterLink *inlink = ctx->inputs[i];
  329. if (!s->input_frames[i] &&
  330. (ret = ff_request_frame(inlink)) < 0)
  331. return ret;
  332. /* request the same number of samples on all inputs */
  333. if (i == 0) {
  334. nb_samples = s->input_frames[0]->nb_samples;
  335. for (j = 1; !i && j < ctx->nb_inputs; j++)
  336. ctx->inputs[j]->request_samples = nb_samples;
  337. }
  338. }
  339. /* setup the output frame */
  340. frame = av_frame_alloc();
  341. if (!frame)
  342. return AVERROR(ENOMEM);
  343. if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
  344. frame->extended_data = av_mallocz_array(s->nb_channels,
  345. sizeof(*frame->extended_data));
  346. if (!frame->extended_data) {
  347. ret = AVERROR(ENOMEM);
  348. goto fail;
  349. }
  350. }
  351. /* copy the data pointers */
  352. for (i = 0; i < s->nb_channels; i++) {
  353. ChannelMap *ch = &s->channels[i];
  354. AVFrame *cur = s->input_frames[ch->input];
  355. AVBufferRef *buf;
  356. frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
  357. linesize = FFMIN(linesize, cur->linesize[0]);
  358. /* add the buffer where this plan is stored to the list if it's
  359. * not already there */
  360. buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
  361. if (!buf) {
  362. ret = AVERROR(EINVAL);
  363. goto fail;
  364. }
  365. for (j = 0; j < nb_buffers; j++)
  366. if (s->buffers[j]->buffer == buf->buffer)
  367. break;
  368. if (j == i)
  369. s->buffers[nb_buffers++] = buf;
  370. }
  371. /* create references to the buffers we copied to output */
  372. if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
  373. frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
  374. frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
  375. sizeof(*frame->extended_buf));
  376. if (!frame->extended_buf) {
  377. frame->nb_extended_buf = 0;
  378. ret = AVERROR(ENOMEM);
  379. goto fail;
  380. }
  381. }
  382. for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
  383. frame->buf[i] = av_buffer_ref(s->buffers[i]);
  384. if (!frame->buf[i]) {
  385. ret = AVERROR(ENOMEM);
  386. goto fail;
  387. }
  388. }
  389. for (i = 0; i < frame->nb_extended_buf; i++) {
  390. frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
  391. FF_ARRAY_ELEMS(frame->buf)]);
  392. if (!frame->extended_buf[i]) {
  393. ret = AVERROR(ENOMEM);
  394. goto fail;
  395. }
  396. }
  397. frame->nb_samples = nb_samples;
  398. frame->channel_layout = outlink->channel_layout;
  399. av_frame_set_channels(frame, outlink->channels);
  400. frame->sample_rate = outlink->sample_rate;
  401. frame->format = outlink->format;
  402. frame->pts = s->input_frames[0]->pts;
  403. frame->linesize[0] = linesize;
  404. if (frame->data != frame->extended_data) {
  405. memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
  406. FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
  407. }
  408. ret = ff_filter_frame(outlink, frame);
  409. for (i = 0; i < ctx->nb_inputs; i++)
  410. av_frame_free(&s->input_frames[i]);
  411. return ret;
  412. fail:
  413. av_frame_free(&frame);
  414. return ret;
  415. }
  416. static const AVFilterPad avfilter_af_join_outputs[] = {
  417. {
  418. .name = "default",
  419. .type = AVMEDIA_TYPE_AUDIO,
  420. .config_props = join_config_output,
  421. .request_frame = join_request_frame,
  422. },
  423. { NULL }
  424. };
  425. AVFilter ff_af_join = {
  426. .name = "join",
  427. .description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
  428. "multi-channel output."),
  429. .priv_size = sizeof(JoinContext),
  430. .priv_class = &join_class,
  431. .init = join_init,
  432. .uninit = join_uninit,
  433. .query_formats = join_query_formats,
  434. .inputs = NULL,
  435. .outputs = avfilter_af_join_outputs,
  436. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  437. };