Patch by Jean-Daniel Dupas, devlists shadowlab org Originally committed as revision 22744 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.6
@@ -563,7 +563,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx | |||||
int a= av_bitstream_filter_filter(bsfc, avctx, NULL, | int a= av_bitstream_filter_filter(bsfc, avctx, NULL, | ||||
&new_pkt.data, &new_pkt.size, | &new_pkt.data, &new_pkt.size, | ||||
pkt->data, pkt->size, | pkt->data, pkt->size, | ||||
pkt->flags & PKT_FLAG_KEY); | |||||
pkt->flags & AV_PKT_FLAG_KEY); | |||||
if(a>0){ | if(a>0){ | ||||
av_free_packet(pkt); | av_free_packet(pkt); | ||||
new_pkt.destruct= av_destruct_packet; | new_pkt.destruct= av_destruct_packet; | ||||
@@ -768,7 +768,7 @@ need_realloc: | |||||
pkt.size= ret; | pkt.size= ret; | ||||
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) | if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) | ||||
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); | pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | ||||
ost->sync_opts += enc->frame_size; | ost->sync_opts += enc->frame_size; | ||||
@@ -803,7 +803,7 @@ need_realloc: | |||||
pkt.size= ret; | pkt.size= ret; | ||||
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) | if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) | ||||
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); | pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | ||||
} | } | ||||
} | } | ||||
@@ -1077,7 +1077,7 @@ static void do_video_out(AVFormatContext *s, | |||||
pkt.data= (uint8_t *)final_picture; | pkt.data= (uint8_t *)final_picture; | ||||
pkt.size= sizeof(AVPicture); | pkt.size= sizeof(AVPicture); | ||||
pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); | pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | ||||
enc->coded_frame = old_frame; | enc->coded_frame = old_frame; | ||||
@@ -1125,7 +1125,7 @@ static void do_video_out(AVFormatContext *s, | |||||
pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/ | pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/ | ||||
if(enc->coded_frame->key_frame) | if(enc->coded_frame->key_frame) | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]); | ||||
*frame_size = ret; | *frame_size = ret; | ||||
video_size += ret; | video_size += ret; | ||||
@@ -1507,7 +1507,7 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||||
av_init_packet(&opkt); | av_init_packet(&opkt); | ||||
if ((!ost->frame_number && !(pkt->flags & PKT_FLAG_KEY)) && !copy_initial_nonkeyframes) | |||||
if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes) | |||||
continue; | continue; | ||||
/* no reencoding needed : output the packet directly */ | /* no reencoding needed : output the packet directly */ | ||||
@@ -1515,7 +1515,7 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||||
avcodec_get_frame_defaults(&avframe); | avcodec_get_frame_defaults(&avframe); | ||||
ost->st->codec->coded_frame= &avframe; | ost->st->codec->coded_frame= &avframe; | ||||
avframe.key_frame = pkt->flags & PKT_FLAG_KEY; | |||||
avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY; | |||||
if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) | if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) | ||||
audio_size += data_size; | audio_size += data_size; | ||||
@@ -1544,7 +1544,7 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||||
&& ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO | && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO | ||||
&& ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO | && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO | ||||
) { | ) { | ||||
if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & PKT_FLAG_KEY)) | |||||
if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY)) | |||||
opkt.destruct= av_destruct_packet; | opkt.destruct= av_destruct_packet; | ||||
} else { | } else { | ||||
opkt.data = data_buf; | opkt.data = data_buf; | ||||
@@ -1627,7 +1627,7 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||||
av_exit(1); | av_exit(1); | ||||
} | } | ||||
audio_size += ret; | audio_size += ret; | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
break; | break; | ||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); | ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); | ||||
@@ -1637,7 +1637,7 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||||
} | } | ||||
video_size += ret; | video_size += ret; | ||||
if(enc->coded_frame && enc->coded_frame->key_frame) | if(enc->coded_frame && enc->coded_frame->key_frame) | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
if (ost->logfile && enc->stats_out) { | if (ost->logfile && enc->stats_out) { | ||||
fprintf(ost->logfile, "%s", enc->stats_out); | fprintf(ost->logfile, "%s", enc->stats_out); | ||||
} | } | ||||
@@ -2199,7 +2199,7 @@ static int http_prepare_data(HTTPContext *c) | |||||
c->switch_pending = 0; | c->switch_pending = 0; | ||||
for(i=0;i<c->stream->nb_streams;i++) { | for(i=0;i<c->stream->nb_streams;i++) { | ||||
if (c->switch_feed_streams[i] == pkt.stream_index) | if (c->switch_feed_streams[i] == pkt.stream_index) | ||||
if (pkt.flags & PKT_FLAG_KEY) | |||||
if (pkt.flags & AV_PKT_FLAG_KEY) | |||||
do_switch_stream(c, i); | do_switch_stream(c, i); | ||||
if (c->switch_feed_streams[i] >= 0) | if (c->switch_feed_streams[i] >= 0) | ||||
c->switch_pending = 1; | c->switch_pending = 1; | ||||
@@ -2209,7 +2209,7 @@ static int http_prepare_data(HTTPContext *c) | |||||
if (c->feed_streams[i] == pkt.stream_index) { | if (c->feed_streams[i] == pkt.stream_index) { | ||||
AVStream *st = c->fmt_in->streams[source_index]; | AVStream *st = c->fmt_in->streams[source_index]; | ||||
pkt.stream_index = i; | pkt.stream_index = i; | ||||
if (pkt.flags & PKT_FLAG_KEY && | |||||
if (pkt.flags & AV_PKT_FLAG_KEY && | |||||
(st->codec->codec_type == AVMEDIA_TYPE_VIDEO || | (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || | ||||
c->stream->nb_streams == 1)) | c->stream->nb_streams == 1)) | ||||
c->got_key_frame = 1; | c->got_key_frame = 1; | ||||
@@ -3445,7 +3445,7 @@ attribute_deprecated int avcodec_decode_video(AVCodecContext *avctx, AVFrame *pi | |||||
* @param[in] avpkt The input AVpacket containing the input buffer. | * @param[in] avpkt The input AVpacket containing the input buffer. | ||||
* You can create such packet with av_init_packet() and by then setting | * You can create such packet with av_init_packet() and by then setting | ||||
* data and size, some decoders might in addition need other fields like | * data and size, some decoders might in addition need other fields like | ||||
* flags&PKT_FLAG_KEY. All decoders are designed to use the least | |||||
* flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least | |||||
* fields possible. | * fields possible. | ||||
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. | * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. | ||||
* @return On error a negative value is returned, otherwise the number of bytes | * @return On error a negative value is returned, otherwise the number of bytes | ||||
@@ -599,7 +599,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
exit_loop: | exit_loop: | ||||
/* handle p-frames only if a predecessor frame is available */ | /* handle p-frames only if a predecessor frame is available */ | ||||
if(s->last_picture->data[0] != NULL) { | if(s->last_picture->data[0] != NULL) { | ||||
if(!(avpkt->flags & PKT_FLAG_KEY)) { | |||||
if(!(avpkt->flags & AV_PKT_FLAG_KEY)) { | |||||
int i, j; | int i, j; | ||||
uint8_t *pd = s->current_picture->data[0]; | uint8_t *pd = s->current_picture->data[0]; | ||||
uint8_t *pd_last = s->last_picture->data[0]; | uint8_t *pd_last = s->last_picture->data[0]; | ||||
@@ -129,7 +129,7 @@ static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap, | |||||
av_init_packet(&dc1394->packet); | av_init_packet(&dc1394->packet); | ||||
dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); | dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); | ||||
dc1394->packet.stream_index = vst->index; | dc1394->packet.stream_index = vst->index; | ||||
dc1394->packet.flags |= PKT_FLAG_KEY; | |||||
dc1394->packet.flags |= AV_PKT_FLAG_KEY; | |||||
dc1394->current_frame = 0; | dc1394->current_frame = 0; | ||||
dc1394->fps = fps->frame_rate; | dc1394->fps = fps->frame_rate; | ||||
@@ -219,7 +219,7 @@ repeat: | |||||
if (pkt->size < 0) | if (pkt->size < 0) | ||||
return pkt->size; | return pkt->size; | ||||
if (p->base_record + anm->record == 0) | if (p->base_record + anm->record == 0) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
anm->record++; | anm->record++; | ||||
return 0; | return 0; | ||||
@@ -888,12 +888,12 @@ static int ff_asf_parse_packet(AVFormatContext *s, ByteIOContext *pb, AVPacket * | |||||
asf_st->pkt.pos = | asf_st->pkt.pos = | ||||
asf_st->packet_pos= asf->packet_pos; | asf_st->packet_pos= asf->packet_pos; | ||||
//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n", | //printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n", | ||||
//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & PKT_FLAG_KEY, | |||||
//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY, | |||||
//s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf->packet_obj_size); | //s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf->packet_obj_size); | ||||
if (s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO) | if (s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO) | ||||
asf->packet_key_frame = 1; | asf->packet_key_frame = 1; | ||||
if (asf->packet_key_frame) | if (asf->packet_key_frame) | ||||
asf_st->pkt.flags |= PKT_FLAG_KEY; | |||||
asf_st->pkt.flags |= AV_PKT_FLAG_KEY; | |||||
} | } | ||||
/* read data */ | /* read data */ | ||||
@@ -1068,7 +1068,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos, | |||||
pts= pkt->pts; | pts= pkt->pts; | ||||
av_free_packet(pkt); | av_free_packet(pkt); | ||||
if(pkt->flags&PKT_FLAG_KEY){ | |||||
if(pkt->flags&AV_PKT_FLAG_KEY){ | |||||
i= pkt->stream_index; | i= pkt->stream_index; | ||||
asf_st= s->streams[i]->priv_data; | asf_st= s->streams[i]->priv_data; | ||||
@@ -668,7 +668,7 @@ static void put_payload_header( | |||||
int val; | int val; | ||||
val = stream->num; | val = stream->num; | ||||
if (flags & PKT_FLAG_KEY) | |||||
if (flags & AV_PKT_FLAG_KEY) | |||||
val |= ASF_PL_FLAG_KEY_FRAME; | val |= ASF_PL_FLAG_KEY_FRAME; | ||||
put_byte(pb, val); | put_byte(pb, val); | ||||
@@ -771,7 +771,7 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
stream = &asf->streams[pkt->stream_index]; | stream = &asf->streams[pkt->stream_index]; | ||||
if(codec->codec_type == AVMEDIA_TYPE_AUDIO) | if(codec->codec_type == AVMEDIA_TYPE_AUDIO) | ||||
flags &= ~PKT_FLAG_KEY; | |||||
flags &= ~AV_PKT_FLAG_KEY; | |||||
pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts; | pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts; | ||||
assert(pts != AV_NOPTS_VALUE); | assert(pts != AV_NOPTS_VALUE); | ||||
@@ -782,7 +782,7 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
put_frame(s, stream, s->streams[pkt->stream_index], pkt->dts, pkt->data, pkt->size, flags); | put_frame(s, stream, s->streams[pkt->stream_index], pkt->dts, pkt->data, pkt->size, flags); | ||||
/* check index */ | /* check index */ | ||||
if ((!asf->is_streamed) && (flags & PKT_FLAG_KEY)) { | |||||
if ((!asf->is_streamed) && (flags & AV_PKT_FLAG_KEY)) { | |||||
start_sec = (int)(duration / INT64_C(10000000)); | start_sec = (int)(duration / INT64_C(10000000)); | ||||
if (start_sec != (int)(asf->last_indexed_pts / INT64_C(10000000))) { | if (start_sec != (int)(asf->last_indexed_pts / INT64_C(10000000))) { | ||||
for(i=asf->nb_index_count;i<start_sec;i++) { | for(i=asf->nb_index_count;i<start_sec;i++) { | ||||
@@ -165,7 +165,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
end= strchr(p, '\n'); | end= strchr(p, '\n'); | ||||
av_new_packet(pkt, end ? end-p+1 : strlen(p)); | av_new_packet(pkt, end ? end-p+1 : strlen(p)); | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->pos= p - ass->event_buffer + s->streams[0]->codec->extradata_size; | pkt->pos= p - ass->event_buffer + s->streams[0]->codec->extradata_size; | ||||
pkt->pts= pkt->dts= get_pts(p); | pkt->pts= pkt->dts= get_pts(p); | ||||
memcpy(pkt->data, p, pkt->size); | memcpy(pkt->data, p, pkt->size); | ||||
@@ -777,7 +777,7 @@ resync: | |||||
size = dv_produce_packet(avi->dv_demux, pkt, | size = dv_produce_packet(avi->dv_demux, pkt, | ||||
pkt->data, pkt->size); | pkt->data, pkt->size); | ||||
pkt->destruct = dstr; | pkt->destruct = dstr; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
} else { | } else { | ||||
/* XXX: How to handle B-frames in AVI? */ | /* XXX: How to handle B-frames in AVI? */ | ||||
pkt->dts = ast->frame_offset; | pkt->dts = ast->frame_offset; | ||||
@@ -797,10 +797,10 @@ resync: | |||||
if(index >= 0 && e->timestamp == ast->frame_offset){ | if(index >= 0 && e->timestamp == ast->frame_offset){ | ||||
if (e->flags & AVINDEX_KEYFRAME) | if (e->flags & AVINDEX_KEYFRAME) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
} | } | ||||
} else { | } else { | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
} | } | ||||
if(ast->sample_size) | if(ast->sample_size) | ||||
ast->frame_offset += pkt->size; | ast->frame_offset += pkt->size; | ||||
@@ -885,7 +885,7 @@ resync: | |||||
if( (st->discard >= AVDISCARD_DEFAULT && size==0) | if( (st->discard >= AVDISCARD_DEFAULT && size==0) | ||||
/*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & PKT_FLAG_KEY))*/ //FIXME needs a little reordering | |||||
/*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering | |||||
|| st->discard >= AVDISCARD_ALL){ | || st->discard >= AVDISCARD_ALL){ | ||||
if(ast->sample_size) ast->frame_offset += pkt->size; | if(ast->sample_size) ast->frame_offset += pkt->size; | ||||
else ast->frame_offset++; | else ast->frame_offset++; | ||||
@@ -540,7 +540,7 @@ static int avi_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
} | } | ||||
avi_stream2fourcc(&tag[0], stream_index, enc->codec_type); | avi_stream2fourcc(&tag[0], stream_index, enc->codec_type); | ||||
if(pkt->flags&PKT_FLAG_KEY) | |||||
if(pkt->flags&AV_PKT_FLAG_KEY) | |||||
flags = 0x10; | flags = 0x10; | ||||
if (enc->codec_type == AVMEDIA_TYPE_AUDIO) { | if (enc->codec_type == AVMEDIA_TYPE_AUDIO) { | ||||
avist->audio_strm_length += size; | avist->audio_strm_length += size; | ||||
@@ -113,7 +113,7 @@ avs_read_video_packet(AVFormatContext * s, AVPacket * pkt, | |||||
pkt->size = ret + palette_size; | pkt->size = ret + palette_size; | ||||
pkt->stream_index = avs->st_video->index; | pkt->stream_index = avs->st_video->index; | ||||
if (sub_type == 0) | if (sub_type == 0) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -134,7 +134,7 @@ static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt) | |||||
return ret; | return ret; | ||||
pkt->stream_index = avs->st_audio->index; | pkt->stream_index = avs->st_audio->index; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return size; | return size; | ||||
} | } | ||||
@@ -233,7 +233,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
return ret; | return ret; | ||||
pkt->stream_index = 0; | pkt->stream_index = 0; | ||||
pkt->pts = bink->video_pts++; | pkt->pts = bink->video_pts++; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
/* -1 instructs the next call to read_packet() to read the next frame */ | /* -1 instructs the next call to read_packet() to read the next frame */ | ||||
bink->current_track = -1; | bink->current_track = -1; | ||||
@@ -126,7 +126,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
ret = voc_get_packet(s, pkt, c93->audio, datasize - 26); | ret = voc_get_packet(s, pkt, c93->audio, datasize - 26); | ||||
if (ret > 0) { | if (ret > 0) { | ||||
pkt->stream_index = 1; | pkt->stream_index = 1; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return ret; | return ret; | ||||
} | } | ||||
} | } | ||||
@@ -182,7 +182,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
/* only the first frame is guaranteed to not reference previous frames */ | /* only the first frame is guaranteed to not reference previous frames */ | ||||
if (c93->current_block == 0 && c93->current_frame == 0) { | if (c93->current_block == 0 && c93->current_frame == 0) { | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->data[0] |= C93_FIRST_FRAME; | pkt->data[0] |= C93_FIRST_FRAME; | ||||
} | } | ||||
return 0; | return 0; | ||||
@@ -221,7 +221,7 @@ static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame) | |||||
c->audio_pkt[i].size = 0; | c->audio_pkt[i].size = 0; | ||||
c->audio_pkt[i].data = c->audio_buf[i]; | c->audio_pkt[i].data = c->audio_buf[i]; | ||||
c->audio_pkt[i].stream_index = c->ast[i]->index; | c->audio_pkt[i].stream_index = c->ast[i]->index; | ||||
c->audio_pkt[i].flags |= PKT_FLAG_KEY; | |||||
c->audio_pkt[i].flags |= AV_PKT_FLAG_KEY; | |||||
} | } | ||||
c->ast[i]->codec->sample_rate = dv_audio_frequency[freq]; | c->ast[i]->codec->sample_rate = dv_audio_frequency[freq]; | ||||
c->ast[i]->codec->channels = 2; | c->ast[i]->codec->channels = 2; | ||||
@@ -355,7 +355,7 @@ int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt, | |||||
av_init_packet(pkt); | av_init_packet(pkt); | ||||
pkt->data = buf; | pkt->data = buf; | ||||
pkt->size = size; | pkt->size = size; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->stream_index = c->vst->id; | pkt->stream_index = c->vst->id; | ||||
pkt->pts = c->frames; | pkt->pts = c->frames; | ||||
@@ -520,7 +520,7 @@ static int ea_read_packet(AVFormatContext *s, | |||||
case pQGT_TAG: | case pQGT_TAG: | ||||
case TGQs_TAG: | case TGQs_TAG: | ||||
case MADk_TAG: | case MADk_TAG: | ||||
key = PKT_FLAG_KEY; | |||||
key = AV_PKT_FLAG_KEY; | |||||
case MVIf_TAG: | case MVIf_TAG: | ||||
case fVGT_TAG: | case fVGT_TAG: | ||||
case MADm_TAG: | case MADm_TAG: | ||||
@@ -537,7 +537,7 @@ static int ea_read_packet(AVFormatContext *s, | |||||
case MV0K_TAG: | case MV0K_TAG: | ||||
case MPCh_TAG: | case MPCh_TAG: | ||||
case pIQT_TAG: | case pIQT_TAG: | ||||
key = PKT_FLAG_KEY; | |||||
key = AV_PKT_FLAG_KEY; | |||||
case MV0F_TAG: | case MV0F_TAG: | ||||
get_video_packet: | get_video_packet: | ||||
ret = av_get_packet(pb, pkt, chunk_size); | ret = av_get_packet(pb, pkt, chunk_size); | ||||
@@ -432,7 +432,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
} | } | ||||
pkt->pos = url_ftell(s->pb); | pkt->pos = url_ftell(s->pb); | ||||
if (ffm->header[1] & FLAG_KEY_FRAME) | if (ffm->header[1] & FLAG_KEY_FRAME) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
ffm->read_state = READ_HEADER; | ffm->read_state = READ_HEADER; | ||||
if (ffm_read_data(s, pkt->data, size, 0) != size) { | if (ffm_read_data(s, pkt->data, size, 0) != size) { | ||||
@@ -210,7 +210,7 @@ static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
/* packet size & key_frame */ | /* packet size & key_frame */ | ||||
header[0] = pkt->stream_index; | header[0] = pkt->stream_index; | ||||
header[1] = 0; | header[1] = 0; | ||||
if (pkt->flags & PKT_FLAG_KEY) | |||||
if (pkt->flags & AV_PKT_FLAG_KEY) | |||||
header[1] |= FLAG_KEY_FRAME; | header[1] |= FLAG_KEY_FRAME; | ||||
AV_WB24(header+2, pkt->size); | AV_WB24(header+2, pkt->size); | ||||
AV_WB24(header+5, pkt->duration); | AV_WB24(header+5, pkt->duration); | ||||
@@ -87,7 +87,7 @@ static int read_packet(AVFormatContext *s, | |||||
url_fskip(s->pb, st->codec->width * film->leading * 4); | url_fskip(s->pb, st->codec->width * film->leading * 4); | ||||
if (pkt->size < 0) | if (pkt->size < 0) | ||||
return pkt->size; | return pkt->size; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -437,7 +437,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
pkt->stream_index = st->index; | pkt->stream_index = st->index; | ||||
if (is_audio || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)) | if (is_audio || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -351,7 +351,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
return -1; | return -1; | ||||
} | } | ||||
flags |= pkt->flags & PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; | |||||
flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; | |||||
} else { | } else { | ||||
assert(enc->codec_type == AVMEDIA_TYPE_AUDIO); | assert(enc->codec_type == AVMEDIA_TYPE_AUDIO); | ||||
flags = get_audio_flags(enc); | flags = get_audio_flags(enc); | ||||
@@ -263,7 +263,7 @@ static int iff_read_packet(AVFormatContext *s, | |||||
} | } | ||||
if(iff->sent_bytes == 0) | if(iff->sent_bytes == 0) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
if(s->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { | if(s->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { | ||||
iff->sent_bytes += PACKET_SIZE; | iff->sent_bytes += PACKET_SIZE; | ||||
@@ -292,7 +292,7 @@ static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) | |||||
av_new_packet(pkt, size[0] + size[1] + size[2]); | av_new_packet(pkt, size[0] + size[1] + size[2]); | ||||
pkt->stream_index = 0; | pkt->stream_index = 0; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->size= 0; | pkt->size= 0; | ||||
for(i=0; i<3; i++){ | for(i=0; i<3; i++){ | ||||
@@ -128,7 +128,7 @@ static int nut_write_packet(AVFormatContext * avf, AVPacket * pkt) { | |||||
p.len = pkt->size; | p.len = pkt->size; | ||||
p.stream = pkt->stream_index; | p.stream = pkt->stream_index; | ||||
p.pts = pkt->pts; | p.pts = pkt->pts; | ||||
p.flags = pkt->flags & PKT_FLAG_KEY ? NUT_FLAG_KEY : 0; | |||||
p.flags = pkt->flags & AV_PKT_FLAG_KEY ? NUT_FLAG_KEY : 0; | |||||
p.next_pts = 0; | p.next_pts = 0; | ||||
nut_write_frame_reorder(priv->nut, &p, pkt->data); | nut_write_frame_reorder(priv->nut, &p, pkt->data); | ||||
@@ -269,7 +269,7 @@ static int nut_read_packet(AVFormatContext * avf, AVPacket * pkt) { | |||||
return -1; | return -1; | ||||
} | } | ||||
if (pd.flags & NUT_FLAG_KEY) pkt->flags |= PKT_FLAG_KEY; | |||||
if (pd.flags & NUT_FLAG_KEY) pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->pts = pd.pts; | pkt->pts = pd.pts; | ||||
pkt->stream_index = pd.stream; | pkt->stream_index = pd.stream; | ||||
pkt->pos = url_ftell(avf->pb); | pkt->pos = url_ftell(avf->pb); | ||||
@@ -104,7 +104,7 @@ static int lmlm4_read_packet(AVFormatContext *s, AVPacket *pkt) { | |||||
switch (frame_type) { | switch (frame_type) { | ||||
case LMLM4_I_FRAME: | case LMLM4_I_FRAME: | ||||
pkt->flags = PKT_FLAG_KEY; | |||||
pkt->flags = AV_PKT_FLAG_KEY; | |||||
case LMLM4_P_FRAME: | case LMLM4_P_FRAME: | ||||
case LMLM4_B_FRAME: | case LMLM4_B_FRAME: | ||||
pkt->stream_index = 0; | pkt->stream_index = 0; | ||||
@@ -1547,7 +1547,7 @@ static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data, | |||||
flags = *data++; | flags = *data++; | ||||
size -= 3; | size -= 3; | ||||
if (is_keyframe == -1) | if (is_keyframe == -1) | ||||
is_keyframe = flags & 0x80 ? PKT_FLAG_KEY : 0; | |||||
is_keyframe = flags & 0x80 ? AV_PKT_FLAG_KEY : 0; | |||||
if (cluster_time != (uint64_t)-1 | if (cluster_time != (uint64_t)-1 | ||||
&& (block_time >= 0 || cluster_time >= -block_time)) { | && (block_time >= 0 || cluster_time >= -block_time)) { | ||||
@@ -866,7 +866,7 @@ static int mkv_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
MatroskaMuxContext *mkv = s->priv_data; | MatroskaMuxContext *mkv = s->priv_data; | ||||
ByteIOContext *pb = s->pb; | ByteIOContext *pb = s->pb; | ||||
AVCodecContext *codec = s->streams[pkt->stream_index]->codec; | AVCodecContext *codec = s->streams[pkt->stream_index]->codec; | ||||
int keyframe = !!(pkt->flags & PKT_FLAG_KEY); | |||||
int keyframe = !!(pkt->flags & AV_PKT_FLAG_KEY); | |||||
int duration = pkt->duration; | int duration = pkt->duration; | ||||
int ret; | int ret; | ||||
int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts; | int64_t ts = mkv->tracks[pkt->stream_index].write_dts ? pkt->dts : pkt->pts; | ||||
@@ -2344,7 +2344,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
} | } | ||||
if (st->discard == AVDISCARD_ALL) | if (st->discard == AVDISCARD_ALL) | ||||
goto retry; | goto retry; | ||||
pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0; | |||||
pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? AV_PKT_FLAG_KEY : 0; | |||||
pkt->pos = sample->pos; | pkt->pos = sample->pos; | ||||
dprintf(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", | dprintf(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", | ||||
pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration); | pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration); | ||||
@@ -1987,7 +1987,7 @@ static int mov_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
trk->flags |= MOV_TRACK_CTTS; | trk->flags |= MOV_TRACK_CTTS; | ||||
trk->cluster[trk->entry].cts = pkt->pts - pkt->dts; | trk->cluster[trk->entry].cts = pkt->pts - pkt->dts; | ||||
trk->cluster[trk->entry].flags = 0; | trk->cluster[trk->entry].flags = 0; | ||||
if (pkt->flags & PKT_FLAG_KEY) { | |||||
if (pkt->flags & AV_PKT_FLAG_KEY) { | |||||
if (mov->mode == MODE_MOV && enc->codec_id == CODEC_ID_MPEG2VIDEO) { | if (mov->mode == MODE_MOV && enc->codec_id == CODEC_ID_MPEG2VIDEO) { | ||||
mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags); | mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags); | ||||
if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE) | if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE) | ||||
@@ -1156,7 +1156,7 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt) | |||||
int64_t pts, dts; | int64_t pts, dts; | ||||
PacketDesc *pkt_desc; | PacketDesc *pkt_desc; | ||||
const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE); | const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE); | ||||
const int is_iframe = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & PKT_FLAG_KEY); | |||||
const int is_iframe = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & AV_PKT_FLAG_KEY); | |||||
pts= pkt->pts; | pts= pkt->pts; | ||||
dts= pkt->dts; | dts= pkt->dts; | ||||
@@ -125,7 +125,7 @@ static int msnwc_tcp_read_packet(AVFormatContext *ctx, AVPacket *pkt) | |||||
/* Some aMsn generated videos (or was it Mercury Messenger?) don't set | /* Some aMsn generated videos (or was it Mercury Messenger?) don't set | ||||
* this bit and rely on the codec to get keyframe information */ | * this bit and rely on the codec to get keyframe information */ | ||||
if(keyframe&1) | if(keyframe&1) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return HEADER_SIZE + size; | return HEADER_SIZE + size; | ||||
} | } | ||||
@@ -620,7 +620,7 @@ null_chunk_retry: | |||||
av_get_packet(pb, pkt, vsize); | av_get_packet(pb, pkt, vsize); | ||||
pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; | pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; | ||||
pkt->dts = nst->frame_offset; | pkt->dts = nst->frame_offset; | ||||
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ | |||||
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ | |||||
/* | /* | ||||
for (i = 0; i < MIN(8, vsize); i++) | for (i = 0; i < MIN(8, vsize); i++) | ||||
PRINT(("NSV video: [%d] = %02x\n", i, pkt->data[i])); | PRINT(("NSV video: [%d] = %02x\n", i, pkt->data[i])); | ||||
@@ -660,7 +660,7 @@ null_chunk_retry: | |||||
} | } | ||||
av_get_packet(pb, pkt, asize); | av_get_packet(pb, pkt, asize); | ||||
pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; | pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; | ||||
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ | |||||
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ | |||||
if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { | if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { | ||||
/* on a nsvs frame we have new information on a/v sync */ | /* on a nsvs frame we have new information on a/v sync */ | ||||
pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); | pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); | ||||
@@ -754,7 +754,7 @@ static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code){ | |||||
pkt->stream_index = stream_id; | pkt->stream_index = stream_id; | ||||
if (stc->last_flags & FLAG_KEY) | if (stc->last_flags & FLAG_KEY) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->pts = pts; | pkt->pts = pts; | ||||
return 0; | return 0; | ||||
@@ -603,7 +603,7 @@ static int write_header(AVFormatContext *s){ | |||||
static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){ | static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){ | ||||
int flags= 0; | int flags= 0; | ||||
if(pkt->flags & PKT_FLAG_KEY ) flags |= FLAG_KEY; | |||||
if(pkt->flags & AV_PKT_FLAG_KEY ) flags |= FLAG_KEY; | |||||
if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID; | if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID; | ||||
if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB; | if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB; | ||||
if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS; | if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS; | ||||
@@ -644,7 +644,7 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt){ | |||||
FrameCode *fc; | FrameCode *fc; | ||||
int64_t coded_pts; | int64_t coded_pts; | ||||
int best_length, frame_code, flags, needed_flags, i, header_idx, best_header_idx; | int best_length, frame_code, flags, needed_flags, i, header_idx, best_header_idx; | ||||
int key_frame = !!(pkt->flags & PKT_FLAG_KEY); | |||||
int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY); | |||||
int store_sp=0; | int store_sp=0; | ||||
int ret; | int ret; | ||||
@@ -220,7 +220,7 @@ static int nuv_packet(AVFormatContext *s, AVPacket *pkt) { | |||||
return ret; | return ret; | ||||
// HACK: we have no idea if it is a keyframe, | // HACK: we have no idea if it is a keyframe, | ||||
// but if we mark none seeking will not work at all. | // but if we mark none seeking will not work at all. | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->pos = pos; | pkt->pos = pos; | ||||
pkt->pts = AV_RL32(&hdr[4]); | pkt->pts = AV_RL32(&hdr[4]); | ||||
pkt->stream_index = ctx->v_id; | pkt->stream_index = ctx->v_id; | ||||
@@ -240,7 +240,7 @@ static int nuv_packet(AVFormatContext *s, AVPacket *pkt) { | |||||
break; | break; | ||||
} | } | ||||
ret = av_get_packet(pb, pkt, size); | ret = av_get_packet(pb, pkt, size); | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
pkt->pos = pos; | pkt->pos = pos; | ||||
pkt->pts = AV_RL32(&hdr[4]); | pkt->pts = AV_RL32(&hdr[4]); | ||||
pkt->stream_index = ctx->a_id; | pkt->stream_index = ctx->a_id; | ||||
@@ -544,7 +544,7 @@ retry: | |||||
// pflags might not be set until after this | // pflags might not be set until after this | ||||
pts = ogg_calc_pts(s, idx, &dts); | pts = ogg_calc_pts(s, idx, &dts); | ||||
if (os->keyframe_seek && !(os->pflags & PKT_FLAG_KEY)) | |||||
if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY)) | |||||
goto retry; | goto retry; | ||||
os->keyframe_seek = 0; | os->keyframe_seek = 0; | ||||
@@ -594,7 +594,7 @@ ogg_read_timestamp (AVFormatContext * s, int stream_index, int64_t * pos_arg, | |||||
while (url_ftell(bc) < pos_limit && !ogg_packet(s, &i, NULL, NULL, pos_arg)) { | while (url_ftell(bc) < pos_limit && !ogg_packet(s, &i, NULL, NULL, pos_arg)) { | ||||
if (i == stream_index) { | if (i == stream_index) { | ||||
pts = ogg_calc_pts(s, i, NULL); | pts = ogg_calc_pts(s, i, NULL); | ||||
if (os->keyframe_seek && !(os->pflags & PKT_FLAG_KEY)) | |||||
if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY)) | |||||
pts = AV_NOPTS_VALUE; | pts = AV_NOPTS_VALUE; | ||||
} | } | ||||
if (pts != AV_NOPTS_VALUE) | if (pts != AV_NOPTS_VALUE) | ||||
@@ -256,7 +256,7 @@ static int ogg_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
if (st->codec->codec_id == CODEC_ID_THEORA) { | if (st->codec->codec_id == CODEC_ID_THEORA) { | ||||
int64_t pts = oggstream->vrev < 1 ? pkt->pts : pkt->pts + pkt->duration; | int64_t pts = oggstream->vrev < 1 ? pkt->pts : pkt->pts + pkt->duration; | ||||
int pframe_count; | int pframe_count; | ||||
if (pkt->flags & PKT_FLAG_KEY) | |||||
if (pkt->flags & AV_PKT_FLAG_KEY) | |||||
oggstream->last_kf_pts = pts; | oggstream->last_kf_pts = pts; | ||||
pframe_count = pts - oggstream->last_kf_pts; | pframe_count = pts - oggstream->last_kf_pts; | ||||
// prevent frame count from overflow if key frame flag is not set | // prevent frame count from overflow if key frame flag is not set | ||||
@@ -59,7 +59,7 @@ static uint64_t dirac_gptopts(AVFormatContext *s, int idx, uint64_t granule, | |||||
int64_t pts = dts + ((gp >> 9) & 0x1fff); | int64_t pts = dts + ((gp >> 9) & 0x1fff); | ||||
if (!dist) | if (!dist) | ||||
os->pflags |= PKT_FLAG_KEY; | |||||
os->pflags |= AV_PKT_FLAG_KEY; | |||||
if (dts_out) | if (dts_out) | ||||
*dts_out = dts; | *dts_out = dts; | ||||
@@ -93,7 +93,7 @@ static uint64_t old_dirac_gptopts(AVFormatContext *s, int idx, uint64_t gp, | |||||
uint64_t pframe = gp & 0x3fffffff; | uint64_t pframe = gp & 0x3fffffff; | ||||
if (!pframe) | if (!pframe) | ||||
os->pflags |= PKT_FLAG_KEY; | |||||
os->pflags |= AV_PKT_FLAG_KEY; | |||||
return iframe + pframe; | return iframe + pframe; | ||||
} | } | ||||
@@ -143,7 +143,7 @@ ogm_packet(AVFormatContext *s, int idx) | |||||
int lb; | int lb; | ||||
if(*p & 8) | if(*p & 8) | ||||
os->pflags |= PKT_FLAG_KEY; | |||||
os->pflags |= AV_PKT_FLAG_KEY; | |||||
lb = ((*p & 2) << 1) | ((*p >> 6) & 3); | lb = ((*p & 2) << 1) | ((*p >> 6) & 3); | ||||
os->pstart += lb + 1; | os->pstart += lb + 1; | ||||
@@ -136,7 +136,7 @@ theora_gptopts(AVFormatContext *ctx, int idx, uint64_t gp, int64_t *dts) | |||||
iframe++; | iframe++; | ||||
if(!pframe) | if(!pframe) | ||||
os->pflags |= PKT_FLAG_KEY; | |||||
os->pflags |= AV_PKT_FLAG_KEY; | |||||
if (dts) | if (dts) | ||||
*dts = iframe + pframe; | *dts = iframe + pframe; | ||||
@@ -159,7 +159,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) | |||||
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) | if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) | ||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
pkt.stream_index= st->index; | pkt.stream_index= st->index; | ||||
pkt.data= audio_outbuf; | pkt.data= audio_outbuf; | ||||
@@ -368,7 +368,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | |||||
AVPacket pkt; | AVPacket pkt; | ||||
av_init_packet(&pkt); | av_init_packet(&pkt); | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
pkt.stream_index= st->index; | pkt.stream_index= st->index; | ||||
pkt.data= (uint8_t *)picture; | pkt.data= (uint8_t *)picture; | ||||
pkt.size= sizeof(AVPicture); | pkt.size= sizeof(AVPicture); | ||||
@@ -385,7 +385,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | |||||
if (c->coded_frame->pts != AV_NOPTS_VALUE) | if (c->coded_frame->pts != AV_NOPTS_VALUE) | ||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | ||||
if(c->coded_frame->key_frame) | if(c->coded_frame->key_frame) | ||||
pkt.flags |= PKT_FLAG_KEY; | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
pkt.stream_index= st->index; | pkt.stream_index= st->index; | ||||
pkt.data= video_outbuf; | pkt.data= video_outbuf; | ||||
pkt.size= out_size; | pkt.size= out_size; | ||||
@@ -804,7 +804,7 @@ ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb, | |||||
pkt->pts= timestamp; | pkt->pts= timestamp; | ||||
if (flags & 2) | if (flags & 2) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0; | return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0; | ||||
} | } | ||||
@@ -828,7 +828,7 @@ ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb, | |||||
rm->audio_pkt_cnt--; | rm->audio_pkt_cnt--; | ||||
if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) { | if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) { | ||||
ast->audiotimestamp = AV_NOPTS_VALUE; | ast->audiotimestamp = AV_NOPTS_VALUE; | ||||
pkt->flags = PKT_FLAG_KEY; | |||||
pkt->flags = AV_PKT_FLAG_KEY; | |||||
} else | } else | ||||
pkt->flags = 0; | pkt->flags = 0; | ||||
pkt->stream_index = st->index; | pkt->stream_index = st->index; | ||||
@@ -346,7 +346,7 @@ static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int | |||||
/* XXX: suppress this malloc */ | /* XXX: suppress this malloc */ | ||||
buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) ); | buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) ); | ||||
write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY)); | |||||
write_packet_header(s, stream, size, !!(flags & AV_PKT_FLAG_KEY)); | |||||
/* for AC-3, the words seem to be reversed */ | /* for AC-3, the words seem to be reversed */ | ||||
for(i=0;i<size;i+=2) { | for(i=0;i<size;i+=2) { | ||||
@@ -365,7 +365,7 @@ static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int | |||||
RMMuxContext *rm = s->priv_data; | RMMuxContext *rm = s->priv_data; | ||||
ByteIOContext *pb = s->pb; | ByteIOContext *pb = s->pb; | ||||
StreamInfo *stream = rm->video_stream; | StreamInfo *stream = rm->video_stream; | ||||
int key_frame = !!(flags & PKT_FLAG_KEY); | |||||
int key_frame = !!(flags & AV_PKT_FLAG_KEY); | |||||
/* XXX: this is incorrect: should be a parameter */ | /* XXX: this is incorrect: should be a parameter */ | ||||
@@ -344,7 +344,7 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
// None of the Escape formats have keyframes, and the ADPCM | // None of the Escape formats have keyframes, and the ADPCM | ||||
// format used doesn't have keyframes. | // format used doesn't have keyframes. | ||||
if (rpl->chunk_number == 0 && rpl->frame_in_part == 0) | if (rpl->chunk_number == 0 && rpl->frame_in_part == 0) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -162,7 +162,7 @@ static void search_hi_lo_keyframes(AVFormatContext *s, | |||||
// Evaluate key frames with known TS (or any frames, if AVSEEK_FLAG_ANY set). | // Evaluate key frames with known TS (or any frames, if AVSEEK_FLAG_ANY set). | ||||
if (pts != AV_NOPTS_VALUE && | if (pts != AV_NOPTS_VALUE && | ||||
((flg & PKT_FLAG_KEY) || (flags & AVSEEK_FLAG_ANY))) { | |||||
((flg & AV_PKT_FLAG_KEY) || (flags & AVSEEK_FLAG_ANY))) { | |||||
if (flags & AVSEEK_FLAG_BYTE) { | if (flags & AVSEEK_FLAG_BYTE) { | ||||
// for byte seeking, use position as timestamp | // for byte seeking, use position as timestamp | ||||
ts = pos; | ts = pos; | ||||
@@ -216,7 +216,7 @@ static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
c->curstrm = 0; | c->curstrm = 0; | ||||
} | } | ||||
if(!c->cur_frame || c->curstrm) | if(!c->cur_frame || c->curstrm) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
if (c->curstrm == -1) | if (c->curstrm == -1) | ||||
c->cur_frame++; | c->cur_frame++; | ||||
}else{ | }else{ | ||||
@@ -156,7 +156,7 @@ static int tmv_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
pkt->stream_index = tmv->stream_index; | pkt->stream_index = tmv->stream_index; | ||||
tmv->stream_index ^= 1; | tmv->stream_index ^= 1; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -986,14 +986,14 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, | |||||
/* update flags */ | /* update flags */ | ||||
if(is_intra_only(st->codec)) | if(is_intra_only(st->codec)) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
else if (pc) { | else if (pc) { | ||||
pkt->flags = 0; | pkt->flags = 0; | ||||
/* keyframe computation */ | /* keyframe computation */ | ||||
if (pc->key_frame == 1) | if (pc->key_frame == 1) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE) | else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE) | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
} | } | ||||
if (pc) | if (pc) | ||||
pkt->convergence_duration = pc->convergence_duration; | pkt->convergence_duration = pc->convergence_duration; | ||||
@@ -1018,7 +1018,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) | |||||
compute_pkt_fields(s, st, NULL, pkt); | compute_pkt_fields(s, st, NULL, pkt); | ||||
s->cur_st = NULL; | s->cur_st = NULL; | ||||
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && | if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && | ||||
(pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { | |||||
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { | |||||
ff_reduce_index(s, st->index); | ff_reduce_index(s, st->index); | ||||
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); | av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); | ||||
} | } | ||||
@@ -1045,7 +1045,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) | |||||
pkt->destruct = NULL; | pkt->destruct = NULL; | ||||
compute_pkt_fields(s, st, st->parser, pkt); | compute_pkt_fields(s, st, st->parser, pkt); | ||||
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){ | |||||
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){ | |||||
ff_reduce_index(s, st->index); | ff_reduce_index(s, st->index); | ||||
av_add_index_entry(st, st->parser->frame_offset, pkt->dts, | av_add_index_entry(st, st->parser->frame_offset, pkt->dts, | ||||
0, 0, AVINDEX_KEYFRAME); | 0, 0, AVINDEX_KEYFRAME); | ||||
@@ -1621,7 +1621,7 @@ static int av_seek_frame_generic(AVFormatContext *s, | |||||
break; | break; | ||||
av_free_packet(&pkt); | av_free_packet(&pkt); | ||||
if(stream_index == pkt.stream_index){ | if(stream_index == pkt.stream_index){ | ||||
if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp) | |||||
if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp) | |||||
break; | break; | ||||
} | } | ||||
} | } | ||||
@@ -3381,7 +3381,7 @@ static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int | |||||
#undef fprintf | #undef fprintf | ||||
#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) | #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) | ||||
PRINT("stream #%d:\n", pkt->stream_index); | PRINT("stream #%d:\n", pkt->stream_index); | ||||
PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0)); | |||||
PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0)); | |||||
PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE); | PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE); | ||||
/* DTS is _always_ valid after av_read_frame() */ | /* DTS is _always_ valid after av_read_frame() */ | ||||
PRINT(" dts="); | PRINT(" dts="); | ||||
@@ -103,7 +103,7 @@ static int vc1t_read_packet(AVFormatContext *s, | |||||
return AVERROR(EIO); | return AVERROR(EIO); | ||||
if(s->streams[0]->time_base.den == 1000) | if(s->streams[0]->time_base.den == 1000) | ||||
pkt->pts = pts; | pkt->pts = pts; | ||||
pkt->flags |= keyframe ? PKT_FLAG_KEY : 0; | |||||
pkt->flags |= keyframe ? AV_PKT_FLAG_KEY : 0; | |||||
pkt->pos -= 8; | pkt->pos -= 8; | ||||
return pkt->size; | return pkt->size; | ||||
@@ -58,7 +58,7 @@ static int vc1test_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
if (!pkt->size) | if (!pkt->size) | ||||
return 0; | return 0; | ||||
put_le32(pb, pkt->size | ((pkt->flags & PKT_FLAG_KEY) ? 0x80000000 : 0)); | |||||
put_le32(pb, pkt->size | ((pkt->flags & AV_PKT_FLAG_KEY) ? 0x80000000 : 0)); | |||||
put_le32(pb, pkt->pts); | put_le32(pb, pkt->pts); | ||||
put_buffer(pb, pkt->data, pkt->size); | put_buffer(pb, pkt->data, pkt->size); | ||||
put_flush_packet(pb); | put_flush_packet(pb); | ||||
@@ -127,7 +127,7 @@ static int yop_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
yop->video_packet.data = NULL; | yop->video_packet.data = NULL; | ||||
yop->video_packet.size = 0; | yop->video_packet.size = 0; | ||||
pkt->data[0] = yop->odd_frame; | pkt->data[0] = yop->odd_frame; | ||||
pkt->flags |= PKT_FLAG_KEY; | |||||
pkt->flags |= AV_PKT_FLAG_KEY; | |||||
yop->odd_frame ^= 1; | yop->odd_frame ^= 1; | ||||
return pkt->size; | return pkt->size; | ||||
} | } | ||||
@@ -99,8 +99,8 @@ int main(int argc, char **argv) | |||||
while ((err = av_read_frame(fctx, &pkt)) >= 0) { | while ((err = av_read_frame(fctx, &pkt)) >= 0) { | ||||
int fd; | int fd; | ||||
snprintf(pktfilename, PATH_MAX-1, fntemplate, pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_'); | |||||
printf(PKTFILESUFF"\n", pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_'); | |||||
snprintf(pktfilename, PATH_MAX-1, fntemplate, pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & AV_PKT_FLAG_KEY)?'K':'_'); | |||||
printf(PKTFILESUFF"\n", pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & AV_PKT_FLAG_KEY)?'K':'_'); | |||||
//printf("open(\"%s\")\n", pktfilename); | //printf("open(\"%s\")\n", pktfilename); | ||||
if (!nowrite) { | if (!nowrite) { | ||||
fd = open(pktfilename, O_WRONLY|O_CREAT, 0644); | fd = open(pktfilename, O_WRONLY|O_CREAT, 0644); | ||||