dts/pts fixed for streamcopy dont use coded_frame->key_frame hack in muxers, use AVPacket.flags instead Originally committed as revision 3171 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.5
| @@ -458,7 +458,7 @@ static void do_audio_out(AVFormatContext *s, | |||
| if(enc->coded_frame) | |||
| pkt.pts= enc->coded_frame->pts; | |||
| pkt.flags |= PKT_FLAG_KEY; | |||
| av_write_frame(s, &pkt); | |||
| av_interleaved_write_frame(s, &pkt); | |||
| } | |||
| } else { | |||
| AVPacket pkt; | |||
| @@ -484,7 +484,7 @@ static void do_audio_out(AVFormatContext *s, | |||
| if(enc->coded_frame) | |||
| pkt.pts= enc->coded_frame->pts; | |||
| pkt.flags |= PKT_FLAG_KEY; | |||
| av_write_frame(s, &pkt); | |||
| av_interleaved_write_frame(s, &pkt); | |||
| } | |||
| } | |||
| @@ -771,7 +771,7 @@ static void do_video_out(AVFormatContext *s, | |||
| if(dec->coded_frame && dec->coded_frame->key_frame) | |||
| pkt.flags |= PKT_FLAG_KEY; | |||
| av_write_frame(s, &pkt); | |||
| av_interleaved_write_frame(s, &pkt); | |||
| enc->coded_frame = old_frame; | |||
| } else { | |||
| AVFrame big_picture; | |||
| @@ -807,7 +807,7 @@ static void do_video_out(AVFormatContext *s, | |||
| pkt.pts= enc->coded_frame->pts; | |||
| if(enc->coded_frame && enc->coded_frame->key_frame) | |||
| pkt.flags |= PKT_FLAG_KEY; | |||
| av_write_frame(s, &pkt); | |||
| av_interleaved_write_frame(s, &pkt); | |||
| *frame_size = ret; | |||
| //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d", | |||
| // enc->frame_number-1, enc->real_pict_num, ret, | |||
| @@ -1165,10 +1165,11 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||
| opkt.stream_index= ost->index; | |||
| opkt.data= data_buf; | |||
| opkt.size= data_size; | |||
| opkt.pts= ist->pts; //FIXME dts vs. pts | |||
| opkt.pts= pkt->pts; //FIXME ist->pts? | |||
| opkt.dts= pkt->dts; | |||
| opkt.flags= pkt->flags; | |||
| av_write_frame(os, &opkt); | |||
| av_interleaved_write_frame(os, &opkt); | |||
| ost->st->codec.frame_number++; | |||
| ost->frame_number++; | |||
| } | |||
| @@ -1633,7 +1634,10 @@ static int av_encode(AVFormatContext **output_files, | |||
| ost = ost_table[i]; | |||
| os = output_files[ost->file_index]; | |||
| ist = ist_table[ost->source_index]; | |||
| pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den; | |||
| if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO) | |||
| pts = (double)ost->sync_opts * ost->st->codec.frame_rate_base / ost->st->codec.frame_rate; | |||
| else | |||
| pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den; | |||
| if (!file_table[ist->file_index].eof_reached && | |||
| pts < pts_min) { | |||
| pts_min = pts; | |||
| @@ -586,7 +586,8 @@ static void put_payload_header( | |||
| int presentation_time, | |||
| int m_obj_size, | |||
| int m_obj_offset, | |||
| int payload_len | |||
| int payload_len, | |||
| int flags | |||
| ) | |||
| { | |||
| ASFContext *asf = s->priv_data; | |||
| @@ -594,7 +595,7 @@ static void put_payload_header( | |||
| int val; | |||
| val = stream->num; | |||
| if (s->streams[val - 1]->codec.coded_frame->key_frame) | |||
| if (flags & PKT_FLAG_KEY) | |||
| val |= ASF_PL_FLAG_KEY_FRAME; | |||
| put_byte(pb, val); | |||
| @@ -621,7 +622,8 @@ static void put_frame( | |||
| ASFStream *stream, | |||
| int timestamp, | |||
| const uint8_t *buf, | |||
| int m_obj_size | |||
| int m_obj_size, | |||
| int flags | |||
| ) | |||
| { | |||
| ASFContext *asf = s->priv_data; | |||
| @@ -662,7 +664,7 @@ static void put_frame( | |||
| else if (payload_len == (frag_len1 - 1)) | |||
| payload_len = frag_len1 - 2; //additional byte need to put padding length | |||
| put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len); | |||
| put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len, flags); | |||
| put_buffer(&asf->pb, buf, payload_len); | |||
| if (asf->multi_payloads_present) | |||
| @@ -706,7 +708,7 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt) | |||
| if (duration > asf->duration) | |||
| asf->duration = duration; | |||
| put_frame(s, stream, pkt->pts, pkt->data, pkt->size); | |||
| put_frame(s, stream, pkt->pts, pkt->data, pkt->size, pkt->flags); | |||
| return 0; | |||
| } | |||
| @@ -5,7 +5,7 @@ | |||
| extern "C" { | |||
| #endif | |||
| #define LIBAVFORMAT_BUILD 4615 | |||
| #define LIBAVFORMAT_BUILD 4616 | |||
| #define LIBAVFORMAT_VERSION_INT FFMPEG_VERSION_INT | |||
| #define LIBAVFORMAT_VERSION FFMPEG_VERSION | |||
| @@ -557,6 +557,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts | |||
| int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap); | |||
| int av_write_header(AVFormatContext *s); | |||
| int av_write_frame(AVFormatContext *s, AVPacket *pkt); | |||
| int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); | |||
| int av_write_trailer(AVFormatContext *s); | |||
| @@ -803,51 +803,6 @@ static void put_vcd_padding_sector(AVFormatContext *ctx) | |||
| s->packet_number++; | |||
| } | |||
| /* XXX: move that to upper layer */ | |||
| /* XXX: we assume that there are always 'max_b_frames' between | |||
| reference frames. A better solution would be to use the AVFrame pts | |||
| field */ | |||
| static void compute_pts_dts(AVStream *st, int64_t *ppts, int64_t *pdts, | |||
| int64_t timestamp) | |||
| { | |||
| int frame_delay; | |||
| int64_t pts, dts; | |||
| if (st->codec.codec_type == CODEC_TYPE_VIDEO && | |||
| st->codec.max_b_frames != 0) { | |||
| frame_delay = (st->codec.frame_rate_base * 90000LL) / | |||
| st->codec.frame_rate; | |||
| if (timestamp == 0) { | |||
| /* specific case for first frame : DTS just before */ | |||
| pts = timestamp; | |||
| dts = timestamp - frame_delay; | |||
| } else { | |||
| timestamp -= frame_delay; | |||
| if (st->codec.coded_frame->pict_type == FF_B_TYPE) { | |||
| /* B frames has identical pts/dts */ | |||
| pts = timestamp; | |||
| dts = timestamp; | |||
| } else { | |||
| /* a reference frame has a pts equal to the dts of the | |||
| _next_ one */ | |||
| dts = timestamp; | |||
| pts = timestamp + (st->codec.max_b_frames + 1) * frame_delay; | |||
| } | |||
| } | |||
| #if 1 | |||
| av_log(&st->codec, AV_LOG_DEBUG, "pts=%0.3f dts=%0.3f pict_type=%c\n", | |||
| pts / 90000.0, dts / 90000.0, | |||
| av_get_pict_type_char(st->codec.coded_frame->pict_type)); | |||
| #endif | |||
| } else { | |||
| pts = timestamp; | |||
| dts = timestamp; | |||
| } | |||
| *ppts = pts & ((1LL << 33) - 1); | |||
| *pdts = dts & ((1LL << 33) - 1); | |||
| } | |||
| static int64_t update_scr(AVFormatContext *ctx,int stream_index,int64_t pts) | |||
| { | |||
| MpegMuxContext *s = ctx->priv_data; | |||
| @@ -923,9 +878,6 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt) | |||
| int64_t pts, dts, new_start_pts, new_start_dts; | |||
| int len, avail_size; | |||
| //XXX/FIXME this is and always was broken | |||
| // compute_pts_dts(st, &pts, &dts, pkt->pts); | |||
| pts= pkt->pts; | |||
| dts= pkt->dts; | |||
| @@ -1395,7 +1347,7 @@ static int mpegps_read_packet(AVFormatContext *s, | |||
| pkt->dts = dts; | |||
| pkt->stream_index = st->index; | |||
| #if 0 | |||
| printf("%d: pts=%0.3f dts=%0.3f\n", | |||
| av_log(s, AV_LOG_DEBUG, "%d: pts=%0.3f dts=%0.3f\n", | |||
| pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0); | |||
| #endif | |||
| return 0; | |||
| @@ -324,7 +324,7 @@ static int rm_write_header(AVFormatContext *s) | |||
| return 0; | |||
| } | |||
| static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size) | |||
| static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags) | |||
| { | |||
| uint8_t *buf1; | |||
| RMContext *rm = s->priv_data; | |||
| @@ -335,7 +335,7 @@ static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size) | |||
| /* XXX: suppress this malloc */ | |||
| buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) ); | |||
| write_packet_header(s, stream, size, stream->enc->coded_frame->key_frame); | |||
| write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY)); | |||
| /* for AC3, the words seems to be reversed */ | |||
| for(i=0;i<size;i+=2) { | |||
| @@ -349,12 +349,12 @@ static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size) | |||
| return 0; | |||
| } | |||
| static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size) | |||
| static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int flags) | |||
| { | |||
| RMContext *rm = s->priv_data; | |||
| ByteIOContext *pb = &s->pb; | |||
| StreamInfo *stream = rm->video_stream; | |||
| int key_frame = stream->enc->coded_frame->key_frame; | |||
| int key_frame = !!(flags & PKT_FLAG_KEY); | |||
| /* XXX: this is incorrect: should be a parameter */ | |||
| @@ -393,9 +393,9 @@ static int rm_write_packet(AVFormatContext *s, AVPacket *pkt) | |||
| { | |||
| if (s->streams[pkt->stream_index]->codec.codec_type == | |||
| CODEC_TYPE_AUDIO) | |||
| return rm_write_audio(s, pkt->data, pkt->size); | |||
| return rm_write_audio(s, pkt->data, pkt->size, pkt->flags); | |||
| else | |||
| return rm_write_video(s, pkt->data, pkt->size); | |||
| return rm_write_video(s, pkt->data, pkt->size, pkt->flags); | |||
| } | |||
| static int rm_write_trailer(AVFormatContext *s) | |||
| @@ -528,8 +528,7 @@ static int get_audio_frame_size(AVCodecContext *enc, int size) | |||
| /* return the frame duration in seconds, return 0 if not available */ | |||
| static void compute_frame_duration(int *pnum, int *pden, | |||
| AVFormatContext *s, AVStream *st, | |||
| static void compute_frame_duration(int *pnum, int *pden, AVStream *st, | |||
| AVCodecParserContext *pc, AVPacket *pkt) | |||
| { | |||
| int frame_size; | |||
| @@ -577,7 +576,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, | |||
| } | |||
| if (pkt->duration == 0) { | |||
| compute_frame_duration(&num, &den, s, st, pc, pkt); | |||
| compute_frame_duration(&num, &den, st, pc, pkt); | |||
| if (den && num) { | |||
| pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); | |||
| } | |||
| @@ -604,7 +603,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, | |||
| else st->cur_dts = 0; | |||
| } | |||
| // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts); | |||
| // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); | |||
| /* interpolate PTS and DTS if they are not present */ | |||
| if (presentation_delayed) { | |||
| /* DTS = decompression time stamp */ | |||
| @@ -1865,28 +1864,12 @@ int av_write_header(AVFormatContext *s) | |||
| return 0; | |||
| } | |||
| /** | |||
| * Write a packet to an output media file. The packet shall contain | |||
| * one audio or video frame. | |||
| * | |||
| * @param s media file handle | |||
| * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... | |||
| * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. | |||
| */ | |||
| int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| { | |||
| AVStream *st; | |||
| int64_t pts_mask; | |||
| int ret, frame_size; | |||
| int b_frames; | |||
| if(pkt->stream_index<0) | |||
| return -1; | |||
| st = s->streams[pkt->stream_index]; | |||
| //FIXME merge with compute_pkt_fields | |||
| static void compute_pkt_fields2(AVStream *st, AVPacket *pkt){ | |||
| int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames); | |||
| int num, den, frame_size; | |||
| b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames); | |||
| // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size); | |||
| // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index); | |||
| /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) | |||
| return -1;*/ | |||
| @@ -1898,6 +1881,12 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| /* duration field */ | |||
| pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); | |||
| if (pkt->duration == 0) { | |||
| compute_frame_duration(&num, &den, st, NULL, pkt); | |||
| if (den && num) { | |||
| pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); | |||
| } | |||
| } | |||
| //XXX/FIXME this is a temporary hack until all encoders output pts | |||
| if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){ | |||
| @@ -1910,9 +1899,7 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){ | |||
| if(b_frames){ | |||
| if(st->last_IP_pts == AV_NOPTS_VALUE){ | |||
| st->last_IP_pts= -av_rescale(1, | |||
| st->codec.frame_rate_base*(int64_t)st->time_base.den, | |||
| st->codec.frame_rate *(int64_t)st->time_base.num); | |||
| st->last_IP_pts= -pkt->duration; | |||
| } | |||
| if(st->last_IP_pts < pkt->pts){ | |||
| pkt->dts= st->last_IP_pts; | |||
| @@ -1923,19 +1910,10 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| pkt->dts= pkt->pts; | |||
| } | |||
| // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts); | |||
| // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts); | |||
| st->cur_dts= pkt->dts; | |||
| st->pts.val= pkt->dts; | |||
| pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1; | |||
| pkt->pts &= pts_mask; | |||
| pkt->dts &= pts_mask; | |||
| ret = s->oformat->write_packet(s, pkt); | |||
| if (ret < 0) | |||
| return ret; | |||
| /* update pts */ | |||
| switch (st->codec.codec_type) { | |||
| case CODEC_TYPE_AUDIO: | |||
| @@ -1953,7 +1931,106 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| default: | |||
| break; | |||
| } | |||
| return ret; | |||
| } | |||
| static void truncate_ts(AVStream *st, AVPacket *pkt){ | |||
| int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1; | |||
| if(pkt->dts < 0) | |||
| pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here | |||
| pkt->pts &= pts_mask; | |||
| pkt->dts &= pts_mask; | |||
| } | |||
| /** | |||
| * Write a packet to an output media file. The packet shall contain | |||
| * one audio or video frame. | |||
| * | |||
| * @param s media file handle | |||
| * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... | |||
| * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. | |||
| */ | |||
| int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| { | |||
| compute_pkt_fields2(s->streams[pkt->stream_index], pkt); | |||
| truncate_ts(s->streams[pkt->stream_index], pkt); | |||
| return s->oformat->write_packet(s, pkt); | |||
| } | |||
| /** | |||
| * Writes a packet to an output media file ensuring correct interleaving. | |||
| * The packet shall contain one audio or video frame. | |||
| * If the packets are already correctly interleaved the application should | |||
| * call av_write_frame() instead as its slightly faster, its also important | |||
| * to keep in mind that non interlaved input will need huge amounts | |||
| * of memory to interleave with this, so its prefereable to interleave at the | |||
| * demuxer level | |||
| * | |||
| * @param s media file handle | |||
| * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ... | |||
| * @return < 0 if error, = 0 if OK, 1 if end of stream wanted. | |||
| */ | |||
| int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ | |||
| AVPacketList *pktl, **next_point, *this_pktl; | |||
| int stream_count=0; | |||
| int streams[MAX_STREAMS]; | |||
| AVStream *st= s->streams[ pkt->stream_index]; | |||
| compute_pkt_fields2(st, pkt); | |||
| if(pkt->dts == AV_NOPTS_VALUE) | |||
| return -1; | |||
| assert(pkt->destruct != av_destruct_packet); //FIXME | |||
| this_pktl = av_mallocz(sizeof(AVPacketList)); | |||
| this_pktl->pkt= *pkt; | |||
| av_dup_packet(&this_pktl->pkt); | |||
| next_point = &s->packet_buffer; | |||
| while(*next_point){ | |||
| AVStream *st2= s->streams[ (*next_point)->pkt.stream_index]; | |||
| int64_t left= st2->time_base.num * st ->time_base.den; | |||
| int64_t right= st ->time_base.num * st2->time_base.den; | |||
| if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow | |||
| break; | |||
| next_point= &(*next_point)->next; | |||
| } | |||
| this_pktl->next= *next_point; | |||
| *next_point= this_pktl; | |||
| memset(streams, 0, sizeof(streams)); | |||
| pktl= s->packet_buffer; | |||
| while(pktl){ | |||
| //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts); | |||
| if(streams[ pktl->pkt.stream_index ] == 0) | |||
| stream_count++; | |||
| streams[ pktl->pkt.stream_index ]++; | |||
| pktl= pktl->next; | |||
| } | |||
| while(s->nb_streams == stream_count){ | |||
| int ret; | |||
| pktl= s->packet_buffer; | |||
| //av_log(s, AV_LOG_DEBUG, "write st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts); | |||
| truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt); | |||
| ret= s->oformat->write_packet(s, &pktl->pkt); | |||
| s->packet_buffer= pktl->next; | |||
| if((--streams[ pktl->pkt.stream_index ]) == 0) | |||
| stream_count--; | |||
| av_free_packet(&pktl->pkt); | |||
| av_freep(&pktl); | |||
| if(ret<0) | |||
| return ret; | |||
| } | |||
| return 0; | |||
| } | |||
| /** | |||
| @@ -1965,6 +2042,24 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt) | |||
| int av_write_trailer(AVFormatContext *s) | |||
| { | |||
| int ret; | |||
| while(s->packet_buffer){ | |||
| int ret; | |||
| AVPacketList *pktl= s->packet_buffer; | |||
| //av_log(s, AV_LOG_DEBUG, "write_trailer st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts); | |||
| truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt); | |||
| ret= s->oformat->write_packet(s, &pktl->pkt); | |||
| s->packet_buffer= pktl->next; | |||
| av_free_packet(&pktl->pkt); | |||
| av_freep(&pktl); | |||
| if(ret<0) | |||
| return ret; | |||
| } | |||
| ret = s->oformat->write_trailer(s); | |||
| av_freep(&s->priv_data); | |||
| return ret; | |||
| @@ -7,9 +7,9 @@ a09d8460b207c4a67a26842c70fbb060 *./data/b-libav.asf | |||
| ./data/b-libav.asf CRC=4b9f25a1 | |||
| be8eb1b5705c8105e4727258e448cb24 *./data/b-libav.rm | |||
| 356950 ./data/b-libav.rm | |||
| e826aa1637ff15144ab484c1efca7fe7 *./data/b-libav.mpg | |||
| 382976 ./data/b-libav.mpg | |||
| ./data/b-libav.mpg CRC=eda0e29e | |||
| 4edcd572ffc30b7a7b95b6a38f157b20 *./data/b-libav.mpg | |||
| 385024 ./data/b-libav.mpg | |||
| ./data/b-libav.mpg CRC=8b9ae29e | |||
| 01a4130e776b8955fa99e477113e94fd *./data/b-libav.swf | |||
| 41743 ./data/b-libav.swf | |||
| ./data/b-libav.swf CRC=eaaf4640 | |||