You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

415 lines
14KB

  1. /*
  2. * Copyright (c) 2012 Nicolas George
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. * See the GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public License
  17. * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * concat audio-video filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "avfilter.h"
  29. #define FF_BUFQUEUE_SIZE 256
  30. #include "bufferqueue.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. #include "audio.h"
  34. #define TYPE_ALL 2
  35. typedef struct {
  36. const AVClass *class;
  37. unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
  38. unsigned nb_segments;
  39. unsigned cur_idx; /**< index of the first input of current segment */
  40. int64_t delta_ts; /**< timestamp to add to produce output timestamps */
  41. unsigned nb_in_active; /**< number of active inputs in current segment */
  42. unsigned unsafe;
  43. struct concat_in {
  44. int64_t pts;
  45. int64_t nb_frames;
  46. unsigned eof;
  47. struct FFBufQueue queue;
  48. } *in;
  49. } ConcatContext;
  50. #define OFFSET(x) offsetof(ConcatContext, x)
  51. #define A AV_OPT_FLAG_AUDIO_PARAM
  52. #define F AV_OPT_FLAG_FILTERING_PARAM
  53. #define V AV_OPT_FLAG_VIDEO_PARAM
  54. static const AVOption concat_options[] = {
  55. { "n", "specify the number of segments", OFFSET(nb_segments),
  56. AV_OPT_TYPE_INT, { .i64 = 2 }, 2, INT_MAX, V|A|F},
  57. { "v", "specify the number of video streams",
  58. OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
  59. AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F },
  60. { "a", "specify the number of audio streams",
  61. OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
  62. AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
  63. { "unsafe", "enable unsafe mode",
  64. OFFSET(unsafe),
  65. AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|A|F},
  66. { 0 }
  67. };
  68. AVFILTER_DEFINE_CLASS(concat);
  69. static int query_formats(AVFilterContext *ctx)
  70. {
  71. ConcatContext *cat = ctx->priv;
  72. unsigned type, nb_str, idx0 = 0, idx, str, seg;
  73. AVFilterFormats *formats, *rates = NULL;
  74. AVFilterChannelLayouts *layouts = NULL;
  75. for (type = 0; type < TYPE_ALL; type++) {
  76. nb_str = cat->nb_streams[type];
  77. for (str = 0; str < nb_str; str++) {
  78. idx = idx0;
  79. /* Set the output formats */
  80. formats = ff_all_formats(type);
  81. if (!formats)
  82. return AVERROR(ENOMEM);
  83. ff_formats_ref(formats, &ctx->outputs[idx]->in_formats);
  84. if (type == AVMEDIA_TYPE_AUDIO) {
  85. rates = ff_all_samplerates();
  86. if (!rates)
  87. return AVERROR(ENOMEM);
  88. ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates);
  89. layouts = ff_all_channel_layouts();
  90. if (!layouts)
  91. return AVERROR(ENOMEM);
  92. ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts);
  93. }
  94. /* Set the same formats for each corresponding input */
  95. for (seg = 0; seg < cat->nb_segments; seg++) {
  96. ff_formats_ref(formats, &ctx->inputs[idx]->out_formats);
  97. if (type == AVMEDIA_TYPE_AUDIO) {
  98. ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates);
  99. ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts);
  100. }
  101. idx += ctx->nb_outputs;
  102. }
  103. idx0++;
  104. }
  105. }
  106. return 0;
  107. }
  108. static int config_output(AVFilterLink *outlink)
  109. {
  110. AVFilterContext *ctx = outlink->src;
  111. ConcatContext *cat = ctx->priv;
  112. unsigned out_no = FF_OUTLINK_IDX(outlink);
  113. unsigned in_no = out_no, seg;
  114. AVFilterLink *inlink = ctx->inputs[in_no];
  115. /* enhancement: find a common one */
  116. outlink->time_base = AV_TIME_BASE_Q;
  117. outlink->w = inlink->w;
  118. outlink->h = inlink->h;
  119. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  120. outlink->format = inlink->format;
  121. for (seg = 1; seg < cat->nb_segments; seg++) {
  122. inlink = ctx->inputs[in_no += ctx->nb_outputs];
  123. /* possible enhancement: unsafe mode, do not check */
  124. if (outlink->w != inlink->w ||
  125. outlink->h != inlink->h ||
  126. outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
  127. outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
  128. av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
  129. "(size %dx%d, SAR %d:%d) do not match the corresponding "
  130. "output link %s parameters (%dx%d, SAR %d:%d)\n",
  131. ctx->input_pads[in_no].name, inlink->w, inlink->h,
  132. inlink->sample_aspect_ratio.num,
  133. inlink->sample_aspect_ratio.den,
  134. ctx->input_pads[out_no].name, outlink->w, outlink->h,
  135. outlink->sample_aspect_ratio.num,
  136. outlink->sample_aspect_ratio.den);
  137. if (!cat->unsafe)
  138. return AVERROR(EINVAL);
  139. }
  140. }
  141. return 0;
  142. }
  143. static void push_frame(AVFilterContext *ctx, unsigned in_no,
  144. AVFrame *buf)
  145. {
  146. ConcatContext *cat = ctx->priv;
  147. unsigned out_no = in_no % ctx->nb_outputs;
  148. AVFilterLink * inlink = ctx-> inputs[ in_no];
  149. AVFilterLink *outlink = ctx->outputs[out_no];
  150. struct concat_in *in = &cat->in[in_no];
  151. buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
  152. in->pts = buf->pts;
  153. in->nb_frames++;
  154. /* add duration to input PTS */
  155. if (inlink->sample_rate)
  156. /* use number of audio samples */
  157. in->pts += av_rescale_q(buf->nb_samples,
  158. (AVRational){ 1, inlink->sample_rate },
  159. outlink->time_base);
  160. else if (in->nb_frames >= 2)
  161. /* use mean duration */
  162. in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
  163. buf->pts += cat->delta_ts;
  164. ff_filter_frame(outlink, buf);
  165. }
  166. static void process_frame(AVFilterLink *inlink, AVFrame *buf)
  167. {
  168. AVFilterContext *ctx = inlink->dst;
  169. ConcatContext *cat = ctx->priv;
  170. unsigned in_no = FF_INLINK_IDX(inlink);
  171. if (in_no < cat->cur_idx) {
  172. av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
  173. ctx->input_pads[in_no].name);
  174. av_frame_free(&buf);
  175. } else if (in_no >= cat->cur_idx + ctx->nb_outputs) {
  176. ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
  177. } else {
  178. push_frame(ctx, in_no, buf);
  179. }
  180. }
  181. static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
  182. {
  183. AVFilterContext *ctx = inlink->dst;
  184. unsigned in_no = FF_INLINK_IDX(inlink);
  185. AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
  186. return ff_get_video_buffer(outlink, w, h);
  187. }
  188. static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
  189. {
  190. AVFilterContext *ctx = inlink->dst;
  191. unsigned in_no = FF_INLINK_IDX(inlink);
  192. AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
  193. return ff_get_audio_buffer(outlink, nb_samples);
  194. }
  195. static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
  196. {
  197. process_frame(inlink, buf);
  198. return 0; /* enhancement: handle error return */
  199. }
  200. static void close_input(AVFilterContext *ctx, unsigned in_no)
  201. {
  202. ConcatContext *cat = ctx->priv;
  203. cat->in[in_no].eof = 1;
  204. cat->nb_in_active--;
  205. av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
  206. ctx->input_pads[in_no].name, cat->nb_in_active);
  207. }
  208. static void find_next_delta_ts(AVFilterContext *ctx)
  209. {
  210. ConcatContext *cat = ctx->priv;
  211. unsigned i = cat->cur_idx;
  212. unsigned imax = i + ctx->nb_outputs;
  213. int64_t pts;
  214. pts = cat->in[i++].pts;
  215. for (; i < imax; i++)
  216. pts = FFMAX(pts, cat->in[i].pts);
  217. cat->delta_ts += pts;
  218. }
  219. static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
  220. {
  221. ConcatContext *cat = ctx->priv;
  222. AVFilterLink *outlink = ctx->outputs[out_no];
  223. int64_t base_pts = cat->in[in_no].pts + cat->delta_ts;
  224. int64_t nb_samples, sent = 0;
  225. int frame_nb_samples;
  226. AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
  227. AVFrame *buf;
  228. int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
  229. if (!rate_tb.den)
  230. return;
  231. nb_samples = av_rescale_q(cat->delta_ts - base_pts,
  232. outlink->time_base, rate_tb);
  233. frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
  234. while (nb_samples) {
  235. frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
  236. buf = ff_get_audio_buffer(outlink, frame_nb_samples);
  237. if (!buf)
  238. return;
  239. av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
  240. nb_channels, outlink->format);
  241. buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
  242. ff_filter_frame(outlink, buf);
  243. sent += frame_nb_samples;
  244. nb_samples -= frame_nb_samples;
  245. }
  246. }
  247. static void flush_segment(AVFilterContext *ctx)
  248. {
  249. ConcatContext *cat = ctx->priv;
  250. unsigned str, str_max;
  251. find_next_delta_ts(ctx);
  252. cat->cur_idx += ctx->nb_outputs;
  253. cat->nb_in_active = ctx->nb_outputs;
  254. av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
  255. cat->delta_ts);
  256. if (cat->cur_idx < ctx->nb_inputs) {
  257. /* pad audio streams with silence */
  258. str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
  259. str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
  260. for (; str < str_max; str++)
  261. send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str);
  262. /* flush queued buffers */
  263. /* possible enhancement: flush in PTS order */
  264. str_max = cat->cur_idx + ctx->nb_outputs;
  265. for (str = cat->cur_idx; str < str_max; str++)
  266. while (cat->in[str].queue.available)
  267. push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue));
  268. }
  269. }
  270. static int request_frame(AVFilterLink *outlink)
  271. {
  272. AVFilterContext *ctx = outlink->src;
  273. ConcatContext *cat = ctx->priv;
  274. unsigned out_no = FF_OUTLINK_IDX(outlink);
  275. unsigned in_no = out_no + cat->cur_idx;
  276. unsigned str, str_max;
  277. int ret;
  278. while (1) {
  279. if (in_no >= ctx->nb_inputs)
  280. return AVERROR_EOF;
  281. if (!cat->in[in_no].eof) {
  282. ret = ff_request_frame(ctx->inputs[in_no]);
  283. if (ret != AVERROR_EOF)
  284. return ret;
  285. close_input(ctx, in_no);
  286. }
  287. /* cycle on all inputs to finish the segment */
  288. /* possible enhancement: request in PTS order */
  289. str_max = cat->cur_idx + ctx->nb_outputs - 1;
  290. for (str = cat->cur_idx; cat->nb_in_active;
  291. str = str == str_max ? cat->cur_idx : str + 1) {
  292. if (cat->in[str].eof)
  293. continue;
  294. ret = ff_request_frame(ctx->inputs[str]);
  295. if (ret == AVERROR_EOF)
  296. close_input(ctx, str);
  297. else if (ret < 0)
  298. return ret;
  299. }
  300. flush_segment(ctx);
  301. in_no += ctx->nb_outputs;
  302. }
  303. }
  304. static av_cold int init(AVFilterContext *ctx, const char *args)
  305. {
  306. ConcatContext *cat = ctx->priv;
  307. int ret;
  308. unsigned seg, type, str;
  309. cat->class = &concat_class;
  310. av_opt_set_defaults(cat);
  311. ret = av_set_options_string(cat, args, "=", ":");
  312. if (ret < 0) {
  313. av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args);
  314. return ret;
  315. }
  316. /* create input pads */
  317. for (seg = 0; seg < cat->nb_segments; seg++) {
  318. for (type = 0; type < TYPE_ALL; type++) {
  319. for (str = 0; str < cat->nb_streams[type]; str++) {
  320. AVFilterPad pad = {
  321. .type = type,
  322. .get_video_buffer = get_video_buffer,
  323. .get_audio_buffer = get_audio_buffer,
  324. .filter_frame = filter_frame,
  325. };
  326. pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str);
  327. ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
  328. }
  329. }
  330. }
  331. /* create output pads */
  332. for (type = 0; type < TYPE_ALL; type++) {
  333. for (str = 0; str < cat->nb_streams[type]; str++) {
  334. AVFilterPad pad = {
  335. .type = type,
  336. .config_props = config_output,
  337. .request_frame = request_frame,
  338. };
  339. pad.name = av_asprintf("out:%c%d", "va"[type], str);
  340. ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
  341. }
  342. }
  343. cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
  344. if (!cat->in)
  345. return AVERROR(ENOMEM);
  346. cat->nb_in_active = ctx->nb_outputs;
  347. return 0;
  348. }
  349. static av_cold void uninit(AVFilterContext *ctx)
  350. {
  351. ConcatContext *cat = ctx->priv;
  352. unsigned i;
  353. for (i = 0; i < ctx->nb_inputs; i++) {
  354. av_freep(&ctx->input_pads[i].name);
  355. ff_bufqueue_discard_all(&cat->in[i].queue);
  356. }
  357. for (i = 0; i < ctx->nb_outputs; i++)
  358. av_freep(&ctx->output_pads[i].name);
  359. av_free(cat->in);
  360. }
  361. AVFilter avfilter_avf_concat = {
  362. .name = "concat",
  363. .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
  364. .init = init,
  365. .uninit = uninit,
  366. .query_formats = query_formats,
  367. .priv_size = sizeof(ConcatContext),
  368. .inputs = NULL,
  369. .outputs = NULL,
  370. .priv_class = &concat_class,
  371. };