The sample aspect ratio is a per-frame property, so it makes sense to define it in AVFrame rather than in the codec/stream context. Simplify application-level sample aspect ratio information extraction, and allow further simplifications.tags/n0.8
| @@ -912,6 +912,7 @@ int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame, | |||||
| frame->top_field_first = picref->video->top_field_first; | frame->top_field_first = picref->video->top_field_first; | ||||
| frame->key_frame = picref->video->key_frame; | frame->key_frame = picref->video->key_frame; | ||||
| frame->pict_type = picref->video->pict_type; | frame->pict_type = picref->video->pict_type; | ||||
| frame->sample_aspect_ratio = picref->video->pixel_aspect; | |||||
| return 1; | return 1; | ||||
| } | } | ||||
| @@ -13,6 +13,9 @@ libavutil: 2011-04-18 | |||||
| API changes, most recent first: | API changes, most recent first: | ||||
| 2011-05-01 - xxxxxxx - lavc 53.3.0 - AVFrame | |||||
| Add a sample_aspect_ratio field to AVFrame. | |||||
| 2011-05-01 - xxxxxxx - lavc 53.2.0 - AVFrame | 2011-05-01 - xxxxxxx - lavc 53.2.0 - AVFrame | ||||
| Add a pkt_pos field to AVFrame. | Add a pkt_pos field to AVFrame. | ||||
| @@ -1631,13 +1631,12 @@ static int output_packet(AVInputStream *ist, int ist_index, | |||||
| for(i=0;i<nb_ostreams;i++) { | for(i=0;i<nb_ostreams;i++) { | ||||
| ost = ost_table[i]; | ost = ost_table[i]; | ||||
| if (ost->input_video_filter && ost->source_index == ist_index) { | if (ost->input_video_filter && ost->source_index == ist_index) { | ||||
| AVRational sar; | |||||
| if (ist->st->sample_aspect_ratio.num) sar = ist->st->sample_aspect_ratio; | |||||
| else sar = ist->st->codec->sample_aspect_ratio; | |||||
| if (!picture.sample_aspect_ratio.num) | |||||
| picture.sample_aspect_ratio = ist->st->sample_aspect_ratio; | |||||
| // add it to be filtered | // add it to be filtered | ||||
| av_vsrc_buffer_add_frame2(ost->input_video_filter, &picture, | av_vsrc_buffer_add_frame2(ost->input_video_filter, &picture, | ||||
| ist->pts, | ist->pts, | ||||
| sar, ist->st->codec->width, ist->st->codec->height, | |||||
| ist->st->codec->width, ist->st->codec->height, | |||||
| ist->st->codec->pix_fmt, ""); //TODO user setable params | ist->st->codec->pix_fmt, ""); //TODO user setable params | ||||
| } | } | ||||
| } | } | ||||
| @@ -1688,7 +1688,7 @@ static int input_request_frame(AVFilterLink *link) | |||||
| picref->pts = pts; | picref->pts = pts; | ||||
| picref->pos = priv->frame->pkt_pos; | picref->pos = priv->frame->pkt_pos; | ||||
| picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio; | |||||
| picref->video->pixel_aspect = priv->frame->sample_aspect_ratio; | |||||
| avfilter_start_frame(link, picref); | avfilter_start_frame(link, picref); | ||||
| avfilter_draw_slice(link, 0, link->h, 1); | avfilter_draw_slice(link, 0, link->h, 1); | ||||
| avfilter_end_frame(link); | avfilter_end_frame(link); | ||||
| @@ -1011,6 +1011,13 @@ typedef struct AVPanScan{ | |||||
| * - decoding: Read by user.\ | * - decoding: Read by user.\ | ||||
| */\ | */\ | ||||
| int64_t pkt_pos;\ | int64_t pkt_pos;\ | ||||
| \ | |||||
| /**\ | |||||
| * reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified | |||||
| * - encoding: unused\ | |||||
| * - decoding: Read by user.\ | |||||
| */\ | |||||
| AVRational sample_aspect_ratio;\ | |||||
| #define FF_QSCALE_TYPE_MPEG1 0 | #define FF_QSCALE_TYPE_MPEG1 0 | ||||
| @@ -455,6 +455,7 @@ void avcodec_get_frame_defaults(AVFrame *pic){ | |||||
| pic->pts = pic->best_effort_timestamp = AV_NOPTS_VALUE; | pic->pts = pic->best_effort_timestamp = AV_NOPTS_VALUE; | ||||
| pic->pkt_pos = -1; | pic->pkt_pos = -1; | ||||
| pic->key_frame= 1; | pic->key_frame= 1; | ||||
| pic->sample_aspect_ratio = (AVRational){0, 1}; | |||||
| } | } | ||||
| AVFrame *avcodec_alloc_frame(void){ | AVFrame *avcodec_alloc_frame(void){ | ||||
| @@ -737,6 +738,8 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi | |||||
| avpkt); | avpkt); | ||||
| picture->pkt_dts= avpkt->dts; | picture->pkt_dts= avpkt->dts; | ||||
| picture->pkt_pos= avpkt->pos; | picture->pkt_pos= avpkt->pos; | ||||
| if (!picture->sample_aspect_ratio.num) | |||||
| picture->sample_aspect_ratio = avctx->sample_aspect_ratio; | |||||
| } | } | ||||
| emms_c(); //needed to avoid an emms_c() call before every return; | emms_c(); //needed to avoid an emms_c() call before every return; | ||||
| @@ -21,7 +21,7 @@ | |||||
| #define AVCODEC_VERSION_H | #define AVCODEC_VERSION_H | ||||
| #define LIBAVCODEC_VERSION_MAJOR 53 | #define LIBAVCODEC_VERSION_MAJOR 53 | ||||
| #define LIBAVCODEC_VERSION_MINOR 2 | |||||
| #define LIBAVCODEC_VERSION_MINOR 3 | |||||
| #define LIBAVCODEC_VERSION_MICRO 0 | #define LIBAVCODEC_VERSION_MICRO 0 | ||||
| #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ | #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ | ||||
| @@ -39,7 +39,7 @@ typedef struct { | |||||
| } BufferSourceContext; | } BufferSourceContext; | ||||
| int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame, | int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame, | ||||
| int64_t pts, AVRational pixel_aspect, int width, | |||||
| int64_t pts, int width, | |||||
| int height, enum PixelFormat pix_fmt, | int height, enum PixelFormat pix_fmt, | ||||
| const char *sws_param) | const char *sws_param) | ||||
| { | { | ||||
| @@ -104,20 +104,20 @@ int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame, | |||||
| c->frame.top_field_first = frame->top_field_first; | c->frame.top_field_first = frame->top_field_first; | ||||
| c->frame.key_frame = frame->key_frame; | c->frame.key_frame = frame->key_frame; | ||||
| c->frame.pict_type = frame->pict_type; | c->frame.pict_type = frame->pict_type; | ||||
| c->frame.sample_aspect_ratio = frame->sample_aspect_ratio; | |||||
| c->pts = pts; | c->pts = pts; | ||||
| c->pixel_aspect = pixel_aspect; | |||||
| c->has_frame = 1; | c->has_frame = 1; | ||||
| return 0; | return 0; | ||||
| } | } | ||||
| int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame, | int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame, | ||||
| int64_t pts, AVRational pixel_aspect) | |||||
| int64_t pts) | |||||
| { | { | ||||
| BufferSourceContext *c = buffer_filter->priv; | BufferSourceContext *c = buffer_filter->priv; | ||||
| return av_vsrc_buffer_add_frame2(buffer_filter, frame, | return av_vsrc_buffer_add_frame2(buffer_filter, frame, | ||||
| pts, pixel_aspect, c->w, | |||||
| pts, c->w, | |||||
| c->h, c->pix_fmt, ""); | c->h, c->pix_fmt, ""); | ||||
| } | } | ||||
| @@ -190,7 +190,7 @@ static int request_frame(AVFilterLink *link) | |||||
| picref->format, link->w, link->h); | picref->format, link->w, link->h); | ||||
| picref->pts = c->pts; | picref->pts = c->pts; | ||||
| picref->video->pixel_aspect = c->pixel_aspect; | |||||
| picref->video->pixel_aspect = c->frame.sample_aspect_ratio; | |||||
| picref->video->interlaced = c->frame.interlaced_frame; | picref->video->interlaced = c->frame.interlaced_frame; | ||||
| picref->video->top_field_first = c->frame.top_field_first; | picref->video->top_field_first = c->frame.top_field_first; | ||||
| picref->video->key_frame = c->frame.key_frame; | picref->video->key_frame = c->frame.key_frame; | ||||
| @@ -31,10 +31,10 @@ | |||||
| #include "avfilter.h" | #include "avfilter.h" | ||||
| int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame, | int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame, | ||||
| int64_t pts, AVRational pixel_aspect); | |||||
| int64_t pts); | |||||
| int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame, | int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame, | ||||
| int64_t pts, AVRational pixel_aspect, int width, | |||||
| int64_t pts, int width, | |||||
| int height, enum PixelFormat pix_fmt, | int height, enum PixelFormat pix_fmt, | ||||
| const char *sws_param); | const char *sws_param); | ||||
| @@ -247,8 +247,8 @@ static int movie_get_frame(AVFilterLink *outlink) | |||||
| movie->frame->pkt_dts : movie->frame->pkt_pts; | movie->frame->pkt_dts : movie->frame->pkt_pts; | ||||
| movie->picref->pos = movie->frame->pkt_pos; | movie->picref->pos = movie->frame->pkt_pos; | ||||
| movie->picref->video->pixel_aspect = st->sample_aspect_ratio.num ? | |||||
| st->sample_aspect_ratio : movie->codec_ctx->sample_aspect_ratio; | |||||
| if (!movie->frame->sample_aspect_ratio.num) | |||||
| movie->picref->video->pixel_aspect = st->sample_aspect_ratio; | |||||
| movie->picref->video->interlaced = movie->frame->interlaced_frame; | movie->picref->video->interlaced = movie->frame->interlaced_frame; | ||||
| movie->picref->video->top_field_first = movie->frame->top_field_first; | movie->picref->video->top_field_first = movie->frame->top_field_first; | ||||
| movie->picref->video->key_frame = movie->frame->key_frame; | movie->picref->video->key_frame = movie->frame->key_frame; | ||||