|
|
|
@@ -550,6 +550,7 @@ static void ffmpeg_cleanup(int ret) |
|
|
|
av_frame_free(&ist->sub2video.frame); |
|
|
|
av_freep(&ist->filters); |
|
|
|
av_freep(&ist->hwaccel_device); |
|
|
|
av_freep(&ist->dts_buffer); |
|
|
|
|
|
|
|
avcodec_free_context(&ist->dec_ctx); |
|
|
|
|
|
|
|
@@ -1976,6 +1977,33 @@ static void check_decode_result(InputStream *ist, int *got_output, int ret) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2. |
|
|
|
// There is the following difference: if you got a frame, you must call |
|
|
|
// it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0 |
|
|
|
// (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet) |
|
|
|
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt) |
|
|
|
{ |
|
|
|
int ret; |
|
|
|
|
|
|
|
*got_frame = 0; |
|
|
|
|
|
|
|
if (pkt) { |
|
|
|
ret = avcodec_send_packet(avctx, pkt); |
|
|
|
// In particular, we don't expect AVERROR(EAGAIN), because we read all |
|
|
|
// decoded frames with avcodec_receive_frame() until done. |
|
|
|
if (ret < 0 && ret != AVERROR_EOF) |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
ret = avcodec_receive_frame(avctx, frame); |
|
|
|
if (ret < 0 && ret != AVERROR(EAGAIN)) |
|
|
|
return ret; |
|
|
|
if (ret >= 0) |
|
|
|
*got_frame = 1; |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
{ |
|
|
|
AVFrame *decoded_frame, *f; |
|
|
|
@@ -1990,7 +2018,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
decoded_frame = ist->decoded_frame; |
|
|
|
|
|
|
|
update_benchmark(NULL); |
|
|
|
ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt); |
|
|
|
ret = decode(avctx, decoded_frame, got_output, pkt); |
|
|
|
update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index); |
|
|
|
|
|
|
|
if (ret >= 0 && avctx->sample_rate <= 0) { |
|
|
|
@@ -1998,7 +2026,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
ret = AVERROR_INVALIDDATA; |
|
|
|
} |
|
|
|
|
|
|
|
check_decode_result(ist, got_output, ret); |
|
|
|
if (ret != AVERROR_EOF) |
|
|
|
check_decode_result(ist, got_output, ret); |
|
|
|
|
|
|
|
if (!*got_output || ret < 0) |
|
|
|
return ret; |
|
|
|
@@ -2066,14 +2095,13 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
} else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) { |
|
|
|
decoded_frame->pts = decoded_frame->pkt_pts; |
|
|
|
decoded_frame_tb = ist->st->time_base; |
|
|
|
} else if (pkt->pts != AV_NOPTS_VALUE) { |
|
|
|
} else if (pkt && pkt->pts != AV_NOPTS_VALUE) { |
|
|
|
decoded_frame->pts = pkt->pts; |
|
|
|
decoded_frame_tb = ist->st->time_base; |
|
|
|
}else { |
|
|
|
decoded_frame->pts = ist->dts; |
|
|
|
decoded_frame_tb = AV_TIME_BASE_Q; |
|
|
|
} |
|
|
|
pkt->pts = AV_NOPTS_VALUE; |
|
|
|
if (decoded_frame->pts != AV_NOPTS_VALUE) |
|
|
|
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts, |
|
|
|
(AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, |
|
|
|
@@ -2101,23 +2129,45 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
return err < 0 ? err : ret; |
|
|
|
} |
|
|
|
|
|
|
|
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof) |
|
|
|
{ |
|
|
|
AVFrame *decoded_frame, *f; |
|
|
|
int i, ret = 0, err = 0, resample_changed; |
|
|
|
int64_t best_effort_timestamp; |
|
|
|
int64_t dts = AV_NOPTS_VALUE; |
|
|
|
AVRational *frame_sample_aspect; |
|
|
|
AVPacket avpkt; |
|
|
|
|
|
|
|
// With fate-indeo3-2, we're getting 0-sized packets before EOF for some |
|
|
|
// reason. This seems like a semi-critical bug. Don't trigger EOF, and |
|
|
|
// skip the packet. |
|
|
|
if (!eof && pkt && pkt->size == 0) |
|
|
|
return 0; |
|
|
|
|
|
|
|
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc())) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc())) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
decoded_frame = ist->decoded_frame; |
|
|
|
pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); |
|
|
|
if (ist->dts != AV_NOPTS_VALUE) |
|
|
|
dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); |
|
|
|
if (pkt) { |
|
|
|
avpkt = *pkt; |
|
|
|
avpkt.dts = dts; // ffmpeg.c probably shouldn't do this |
|
|
|
} |
|
|
|
|
|
|
|
// The old code used to set dts on the drain packet, which does not work |
|
|
|
// with the new API anymore. |
|
|
|
if (eof) { |
|
|
|
void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0])); |
|
|
|
if (!new) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
ist->dts_buffer = new; |
|
|
|
ist->dts_buffer[ist->nb_dts_buffer++] = dts; |
|
|
|
} |
|
|
|
|
|
|
|
update_benchmark(NULL); |
|
|
|
ret = avcodec_decode_video2(ist->dec_ctx, |
|
|
|
decoded_frame, got_output, pkt); |
|
|
|
ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL); |
|
|
|
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); |
|
|
|
|
|
|
|
// The following line may be required in some cases where there is no parser |
|
|
|
@@ -2135,7 +2185,8 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
ist->st->codecpar->video_delay); |
|
|
|
} |
|
|
|
|
|
|
|
check_decode_result(ist, got_output, ret); |
|
|
|
if (ret != AVERROR_EOF) |
|
|
|
check_decode_result(ist, got_output, ret); |
|
|
|
|
|
|
|
if (*got_output && ret >= 0) { |
|
|
|
if (ist->dec_ctx->width != decoded_frame->width || |
|
|
|
@@ -2167,6 +2218,15 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format; |
|
|
|
|
|
|
|
best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame); |
|
|
|
|
|
|
|
if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) { |
|
|
|
best_effort_timestamp = ist->dts_buffer[0]; |
|
|
|
|
|
|
|
for (i = 0; i < ist->nb_dts_buffer - 1; i++) |
|
|
|
ist->dts_buffer[i] = ist->dts_buffer[i + 1]; |
|
|
|
ist->nb_dts_buffer--; |
|
|
|
} |
|
|
|
|
|
|
|
if(best_effort_timestamp != AV_NOPTS_VALUE) { |
|
|
|
int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
|
|
|
|
@@ -2185,8 +2245,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
ist->st->time_base.num, ist->st->time_base.den); |
|
|
|
} |
|
|
|
|
|
|
|
pkt->size = 0; |
|
|
|
|
|
|
|
if (ist->st->sample_aspect_ratio.num) |
|
|
|
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; |
|
|
|
|
|
|
|
@@ -2225,12 +2283,12 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) |
|
|
|
break; |
|
|
|
} else |
|
|
|
f = decoded_frame; |
|
|
|
ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH); |
|
|
|
if (ret == AVERROR_EOF) { |
|
|
|
ret = 0; /* ignore */ |
|
|
|
} else if (ret < 0) { |
|
|
|
err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH); |
|
|
|
if (err == AVERROR_EOF) { |
|
|
|
err = 0; /* ignore */ |
|
|
|
} else if (err < 0) { |
|
|
|
av_log(NULL, AV_LOG_FATAL, |
|
|
|
"Failed to inject frame into filter network: %s\n", av_err2str(ret)); |
|
|
|
"Failed to inject frame into filter network: %s\n", av_err2str(err)); |
|
|
|
exit_program(1); |
|
|
|
} |
|
|
|
} |
|
|
|
@@ -2315,7 +2373,8 @@ static int send_filter_eof(InputStream *ist) |
|
|
|
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) |
|
|
|
{ |
|
|
|
int ret = 0, i; |
|
|
|
int got_output = 0; |
|
|
|
int repeating = 0; |
|
|
|
int eof_reached = 0; |
|
|
|
|
|
|
|
AVPacket avpkt; |
|
|
|
if (!ist->saw_first_ts) { |
|
|
|
@@ -2338,84 +2397,99 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo |
|
|
|
av_init_packet(&avpkt); |
|
|
|
avpkt.data = NULL; |
|
|
|
avpkt.size = 0; |
|
|
|
goto handle_eof; |
|
|
|
} else { |
|
|
|
avpkt = *pkt; |
|
|
|
} |
|
|
|
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE) { |
|
|
|
if (pkt && pkt->dts != AV_NOPTS_VALUE) { |
|
|
|
ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) |
|
|
|
ist->next_pts = ist->pts = ist->dts; |
|
|
|
} |
|
|
|
|
|
|
|
// while we have more to decode or while the decoder did output something on EOF |
|
|
|
while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { |
|
|
|
int duration; |
|
|
|
handle_eof: |
|
|
|
while (ist->decoding_needed) { |
|
|
|
int duration = 0; |
|
|
|
int got_output = 0; |
|
|
|
|
|
|
|
ist->pts = ist->next_pts; |
|
|
|
ist->dts = ist->next_dts; |
|
|
|
|
|
|
|
switch (ist->dec_ctx->codec_type) { |
|
|
|
case AVMEDIA_TYPE_AUDIO: |
|
|
|
ret = decode_audio (ist, &avpkt, &got_output); |
|
|
|
ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output); |
|
|
|
break; |
|
|
|
case AVMEDIA_TYPE_VIDEO: |
|
|
|
ret = decode_video (ist, &avpkt, &got_output); |
|
|
|
if (avpkt.duration) { |
|
|
|
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { |
|
|
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame; |
|
|
|
duration = ((int64_t)AV_TIME_BASE * |
|
|
|
ist->dec_ctx->framerate.den * ticks) / |
|
|
|
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; |
|
|
|
} else |
|
|
|
duration = 0; |
|
|
|
ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt); |
|
|
|
if (!repeating || !pkt || got_output) { |
|
|
|
if (pkt && pkt->duration) { |
|
|
|
duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { |
|
|
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame; |
|
|
|
duration = ((int64_t)AV_TIME_BASE * |
|
|
|
ist->dec_ctx->framerate.den * ticks) / |
|
|
|
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; |
|
|
|
} |
|
|
|
|
|
|
|
if(ist->dts != AV_NOPTS_VALUE && duration) { |
|
|
|
ist->next_dts += duration; |
|
|
|
}else |
|
|
|
ist->next_dts = AV_NOPTS_VALUE; |
|
|
|
if(ist->dts != AV_NOPTS_VALUE && duration) { |
|
|
|
ist->next_dts += duration; |
|
|
|
}else |
|
|
|
ist->next_dts = AV_NOPTS_VALUE; |
|
|
|
} |
|
|
|
|
|
|
|
if (got_output) |
|
|
|
ist->next_pts += duration; //FIXME the duration is not correct in some cases |
|
|
|
break; |
|
|
|
case AVMEDIA_TYPE_SUBTITLE: |
|
|
|
if (repeating) |
|
|
|
break; |
|
|
|
ret = transcode_subtitles(ist, &avpkt, &got_output); |
|
|
|
if (!pkt && ret >= 0) |
|
|
|
ret = AVERROR_EOF; |
|
|
|
break; |
|
|
|
default: |
|
|
|
return -1; |
|
|
|
} |
|
|
|
|
|
|
|
if (ret == AVERROR_EOF) { |
|
|
|
eof_reached = 1; |
|
|
|
break; |
|
|
|
} |
|
|
|
|
|
|
|
if (ret < 0) { |
|
|
|
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n", |
|
|
|
ist->file_index, ist->st->index, av_err2str(ret)); |
|
|
|
if (exit_on_error) |
|
|
|
exit_program(1); |
|
|
|
// Decoding might not terminate if we're draining the decoder, and |
|
|
|
// the decoder keeps returning an error. |
|
|
|
// This should probably be considered a libavcodec issue. |
|
|
|
// Sample: fate-vsynth1-dnxhd-720p-hr-lb |
|
|
|
if (!pkt) |
|
|
|
eof_reached = 1; |
|
|
|
break; |
|
|
|
} |
|
|
|
|
|
|
|
avpkt.dts= |
|
|
|
avpkt.pts= AV_NOPTS_VALUE; |
|
|
|
if (!got_output) |
|
|
|
break; |
|
|
|
|
|
|
|
// touch data and size only if not EOF |
|
|
|
if (pkt) { |
|
|
|
if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) |
|
|
|
ret = avpkt.size; |
|
|
|
avpkt.data += ret; |
|
|
|
avpkt.size -= ret; |
|
|
|
} |
|
|
|
if (!got_output) { |
|
|
|
continue; |
|
|
|
} |
|
|
|
if (got_output && !pkt) |
|
|
|
// During draining, we might get multiple output frames in this loop. |
|
|
|
// ffmpeg.c does not drain the filter chain on configuration changes, |
|
|
|
// which means if we send multiple frames at once to the filters, and |
|
|
|
// one of those frames changes configuration, the buffered frames will |
|
|
|
// be lost. This can upset certain FATE tests. |
|
|
|
// Decode only 1 frame per call on EOF to appease these FATE tests. |
|
|
|
// The ideal solution would be to rewrite decoding to use the new |
|
|
|
// decoding API in a better way. |
|
|
|
if (!pkt) |
|
|
|
break; |
|
|
|
|
|
|
|
repeating = 1; |
|
|
|
} |
|
|
|
|
|
|
|
/* after flushing, send an EOF on all the filter inputs attached to the stream */ |
|
|
|
/* except when looping we need to flush but not to send an EOF */ |
|
|
|
if (!pkt && ist->decoding_needed && !got_output && !no_eof) { |
|
|
|
if (!pkt && ist->decoding_needed && eof_reached && !no_eof) { |
|
|
|
int ret = send_filter_eof(ist); |
|
|
|
if (ret < 0) { |
|
|
|
av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n"); |
|
|
|
@@ -2459,7 +2533,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo |
|
|
|
do_streamcopy(ist, ost, pkt); |
|
|
|
} |
|
|
|
|
|
|
|
return got_output; |
|
|
|
return !eof_reached; |
|
|
|
} |
|
|
|
|
|
|
|
static void print_sdp(void) |
|
|
|
|