|
|
@@ -533,6 +533,13 @@ static void ffmpeg_cleanup(int ret) |
|
|
|
avcodec_free_context(&ost->enc_ctx); |
|
|
|
avcodec_parameters_free(&ost->ref_par); |
|
|
|
|
|
|
|
while (av_fifo_size(ost->muxing_queue)) { |
|
|
|
AVPacket pkt; |
|
|
|
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); |
|
|
|
av_packet_unref(&pkt); |
|
|
|
} |
|
|
|
av_fifo_free(ost->muxing_queue); |
|
|
|
|
|
|
|
av_freep(&output_streams[i]); |
|
|
|
} |
|
|
|
#if HAVE_PTHREADS |
|
|
@@ -635,11 +642,33 @@ static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) |
|
|
|
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost) |
|
|
|
{ |
|
|
|
AVFormatContext *s = of->ctx; |
|
|
|
AVStream *st = ost->st; |
|
|
|
int ret; |
|
|
|
|
|
|
|
if (!of->header_written) { |
|
|
|
AVPacket tmp_pkt; |
|
|
|
/* the muxer is not initialized yet, buffer the packet */ |
|
|
|
if (!av_fifo_space(ost->muxing_queue)) { |
|
|
|
int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue), |
|
|
|
ost->max_muxing_queue_size); |
|
|
|
if (new_size <= av_fifo_size(ost->muxing_queue)) { |
|
|
|
av_log(NULL, AV_LOG_ERROR, |
|
|
|
"Too many packets buffered for output stream %d:%d.\n", |
|
|
|
ost->file_index, ost->st->index); |
|
|
|
exit_program(1); |
|
|
|
} |
|
|
|
ret = av_fifo_realloc2(ost->muxing_queue, new_size); |
|
|
|
if (ret < 0) |
|
|
|
exit_program(1); |
|
|
|
} |
|
|
|
av_packet_move_ref(&tmp_pkt, pkt); |
|
|
|
av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL); |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) || |
|
|
|
(st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0)) |
|
|
|
pkt->pts = pkt->dts = AV_NOPTS_VALUE; |
|
|
@@ -752,7 +781,7 @@ static void close_output_stream(OutputStream *ost) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) |
|
|
|
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost) |
|
|
|
{ |
|
|
|
int ret = 0; |
|
|
|
|
|
|
@@ -800,10 +829,10 @@ static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) |
|
|
|
goto finish; |
|
|
|
idx++; |
|
|
|
} else |
|
|
|
write_packet(s, pkt, ost); |
|
|
|
write_packet(of, pkt, ost); |
|
|
|
} |
|
|
|
} else |
|
|
|
write_packet(s, pkt, ost); |
|
|
|
write_packet(of, pkt, ost); |
|
|
|
|
|
|
|
finish: |
|
|
|
if (ret < 0 && ret != AVERROR_EOF) { |
|
|
@@ -827,7 +856,7 @@ static int check_recording_time(OutputStream *ost) |
|
|
|
return 1; |
|
|
|
} |
|
|
|
|
|
|
|
static void do_audio_out(AVFormatContext *s, OutputStream *ost, |
|
|
|
static void do_audio_out(OutputFile *of, OutputStream *ost, |
|
|
|
AVFrame *frame) |
|
|
|
{ |
|
|
|
AVCodecContext *enc = ost->enc_ctx; |
|
|
@@ -878,7 +907,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, |
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base)); |
|
|
|
} |
|
|
|
|
|
|
|
output_packet(s, &pkt, ost); |
|
|
|
output_packet(of, &pkt, ost); |
|
|
|
} |
|
|
|
|
|
|
|
return; |
|
|
@@ -887,7 +916,7 @@ error: |
|
|
|
exit_program(1); |
|
|
|
} |
|
|
|
|
|
|
|
static void do_subtitle_out(AVFormatContext *s, |
|
|
|
static void do_subtitle_out(OutputFile *of, |
|
|
|
OutputStream *ost, |
|
|
|
InputStream *ist, |
|
|
|
AVSubtitle *sub) |
|
|
@@ -967,11 +996,11 @@ static void do_subtitle_out(AVFormatContext *s, |
|
|
|
pkt.pts += 90 * sub->end_display_time; |
|
|
|
} |
|
|
|
pkt.dts = pkt.pts; |
|
|
|
output_packet(s, &pkt, ost); |
|
|
|
output_packet(of, &pkt, ost); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
static void do_video_out(AVFormatContext *s, |
|
|
|
static void do_video_out(OutputFile *of, |
|
|
|
OutputStream *ost, |
|
|
|
AVFrame *next_picture, |
|
|
|
double sync_ipts) |
|
|
@@ -1020,10 +1049,10 @@ static void do_video_out(AVFormatContext *s, |
|
|
|
|
|
|
|
format_video_sync = video_sync_method; |
|
|
|
if (format_video_sync == VSYNC_AUTO) { |
|
|
|
if(!strcmp(s->oformat->name, "avi")) { |
|
|
|
if(!strcmp(of->ctx->oformat->name, "avi")) { |
|
|
|
format_video_sync = VSYNC_VFR; |
|
|
|
} else |
|
|
|
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR; |
|
|
|
format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR; |
|
|
|
if ( ist |
|
|
|
&& format_video_sync == VSYNC_CFR |
|
|
|
&& input_files[ist->file_index]->ctx->nb_streams == 1 |
|
|
@@ -1134,7 +1163,7 @@ static void do_video_out(AVFormatContext *s, |
|
|
|
return; |
|
|
|
|
|
|
|
#if FF_API_LAVF_FMT_RAWPICTURE |
|
|
|
if (s->oformat->flags & AVFMT_RAWPICTURE && |
|
|
|
if (of->ctx->oformat->flags & AVFMT_RAWPICTURE && |
|
|
|
enc->codec->id == AV_CODEC_ID_RAWVIDEO) { |
|
|
|
/* raw pictures are written as AVPicture structure to |
|
|
|
avoid any copies. We support temporarily the older |
|
|
@@ -1148,7 +1177,7 @@ static void do_video_out(AVFormatContext *s, |
|
|
|
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base); |
|
|
|
pkt.flags |= AV_PKT_FLAG_KEY; |
|
|
|
|
|
|
|
output_packet(s, &pkt, ost); |
|
|
|
output_packet(of, &pkt, ost); |
|
|
|
} else |
|
|
|
#endif |
|
|
|
{ |
|
|
@@ -1251,7 +1280,7 @@ static void do_video_out(AVFormatContext *s, |
|
|
|
} |
|
|
|
|
|
|
|
frame_size = pkt.size; |
|
|
|
output_packet(s, &pkt, ost); |
|
|
|
output_packet(of, &pkt, ost); |
|
|
|
|
|
|
|
/* if two pass, output log */ |
|
|
|
if (ost->logfile && enc->stats_out) { |
|
|
@@ -1379,7 +1408,7 @@ static int reap_filters(int flush) |
|
|
|
"Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); |
|
|
|
} else if (flush && ret == AVERROR_EOF) { |
|
|
|
if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO) |
|
|
|
do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE); |
|
|
|
do_video_out(of, ost, NULL, AV_NOPTS_VALUE); |
|
|
|
} |
|
|
|
break; |
|
|
|
} |
|
|
@@ -1419,7 +1448,7 @@ static int reap_filters(int flush) |
|
|
|
enc->time_base.num, enc->time_base.den); |
|
|
|
} |
|
|
|
|
|
|
|
do_video_out(of->ctx, ost, filtered_frame, float_pts); |
|
|
|
do_video_out(of, ost, filtered_frame, float_pts); |
|
|
|
break; |
|
|
|
case AVMEDIA_TYPE_AUDIO: |
|
|
|
if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && |
|
|
@@ -1428,7 +1457,7 @@ static int reap_filters(int flush) |
|
|
|
"Audio filter graph output is not normalized and encoder does not support parameter changes\n"); |
|
|
|
break; |
|
|
|
} |
|
|
|
do_audio_out(of->ctx, ost, filtered_frame); |
|
|
|
do_audio_out(of, ost, filtered_frame); |
|
|
|
break; |
|
|
|
default: |
|
|
|
// TODO support subtitle filters |
|
|
@@ -1758,7 +1787,7 @@ static void flush_encoders(void) |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
|
|
OutputStream *ost = output_streams[i]; |
|
|
|
AVCodecContext *enc = ost->enc_ctx; |
|
|
|
AVFormatContext *os = output_files[ost->file_index]->ctx; |
|
|
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
int stop_encoding = 0; |
|
|
|
|
|
|
|
if (!ost->encoding_needed) |
|
|
@@ -1767,7 +1796,7 @@ static void flush_encoders(void) |
|
|
|
if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) |
|
|
|
continue; |
|
|
|
#if FF_API_LAVF_FMT_RAWPICTURE |
|
|
|
if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO) |
|
|
|
if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO) |
|
|
|
continue; |
|
|
|
#endif |
|
|
|
|
|
|
@@ -1819,7 +1848,7 @@ static void flush_encoders(void) |
|
|
|
} |
|
|
|
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); |
|
|
|
pkt_size = pkt.size; |
|
|
|
output_packet(os, &pkt, ost); |
|
|
|
output_packet(of, &pkt, ost); |
|
|
|
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) { |
|
|
|
do_video_stats(ost, pkt_size); |
|
|
|
} |
|
|
@@ -1961,7 +1990,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p |
|
|
|
} |
|
|
|
#endif |
|
|
|
|
|
|
|
output_packet(of->ctx, &opkt, ost); |
|
|
|
output_packet(of, &opkt, ost); |
|
|
|
} |
|
|
|
|
|
|
|
int guess_input_channel_layout(InputStream *ist) |
|
|
@@ -2367,7 +2396,7 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE) |
|
|
|
continue; |
|
|
|
|
|
|
|
do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle); |
|
|
|
do_subtitle_out(output_files[ost->file_index], ost, ist, &subtitle); |
|
|
|
} |
|
|
|
|
|
|
|
out: |
|
|
@@ -2756,6 +2785,17 @@ static int check_init_output_file(OutputFile *of, int file_index) |
|
|
|
if (sdp_filename || want_sdp) |
|
|
|
print_sdp(); |
|
|
|
|
|
|
|
/* flush the muxing queues */ |
|
|
|
for (i = 0; i < of->ctx->nb_streams; i++) { |
|
|
|
OutputStream *ost = output_streams[of->ost_index + i]; |
|
|
|
|
|
|
|
while (av_fifo_size(ost->muxing_queue)) { |
|
|
|
AVPacket pkt; |
|
|
|
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL); |
|
|
|
write_packet(of, &pkt, ost); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|