Signed-off-by: Michael Niedermayer <michaelni@gmx.at>tags/n2.0
| @@ -49,8 +49,8 @@ av_cold int ffv1_common_init(AVCodecContext *avctx) | |||||
| s->avctx = avctx; | s->avctx = avctx; | ||||
| s->flags = avctx->flags; | s->flags = avctx->flags; | ||||
| avcodec_get_frame_defaults(&s->picture); | |||||
| s->picture.f = avcodec_alloc_frame(); | |||||
| s->last_picture.f = av_frame_alloc(); | |||||
| ff_dsputil_init(&s->dsp, avctx); | ff_dsputil_init(&s->dsp, avctx); | ||||
| s->width = avctx->width; | s->width = avctx->width; | ||||
| @@ -192,7 +192,13 @@ av_cold int ffv1_close(AVCodecContext *avctx) | |||||
| FFV1Context *s = avctx->priv_data; | FFV1Context *s = avctx->priv_data; | ||||
| int i, j; | int i, j; | ||||
| av_frame_unref(&s->last_picture); | |||||
| if (s->picture.f) | |||||
| ff_thread_release_buffer(avctx, &s->picture); | |||||
| av_frame_free(&s->picture.f); | |||||
| if (s->last_picture.f) | |||||
| ff_thread_release_buffer(avctx, &s->last_picture); | |||||
| av_frame_free(&s->last_picture.f); | |||||
| for (j = 0; j < s->slice_count; j++) { | for (j = 0; j < s->slice_count; j++) { | ||||
| FFV1Context *fs = s->slice_context[j]; | FFV1Context *fs = s->slice_context[j]; | ||||
| @@ -41,6 +41,7 @@ | |||||
| #include "mathops.h" | #include "mathops.h" | ||||
| #include "put_bits.h" | #include "put_bits.h" | ||||
| #include "rangecoder.h" | #include "rangecoder.h" | ||||
| #include "thread.h" | |||||
| #ifdef __INTEL_COMPILER | #ifdef __INTEL_COMPILER | ||||
| #undef av_flatten | #undef av_flatten | ||||
| @@ -89,7 +90,7 @@ typedef struct FFV1Context { | |||||
| int transparency; | int transparency; | ||||
| int flags; | int flags; | ||||
| int picture_number; | int picture_number; | ||||
| AVFrame picture, last_picture; | |||||
| ThreadFrame picture, last_picture; | |||||
| AVFrame *cur; | AVFrame *cur; | ||||
| int plane_count; | int plane_count; | ||||
| @@ -38,7 +38,6 @@ | |||||
| #include "golomb.h" | #include "golomb.h" | ||||
| #include "mathops.h" | #include "mathops.h" | ||||
| #include "ffv1.h" | #include "ffv1.h" | ||||
| #include "thread.h" | |||||
| static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, | static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, | ||||
| int is_signed) | int is_signed) | ||||
| @@ -734,14 +733,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac | |||||
| int buf_size = avpkt->size; | int buf_size = avpkt->size; | ||||
| FFV1Context *f = avctx->priv_data; | FFV1Context *f = avctx->priv_data; | ||||
| RangeCoder *const c = &f->slice_context[0]->c; | RangeCoder *const c = &f->slice_context[0]->c; | ||||
| ThreadFrame frame = { .f = data }; | |||||
| int i, ret; | int i, ret; | ||||
| uint8_t keystate = 128; | uint8_t keystate = 128; | ||||
| const uint8_t *buf_p; | const uint8_t *buf_p; | ||||
| AVFrame *const p = data; | |||||
| AVFrame *p; | |||||
| f->cur = p; | |||||
| f->avctx = avctx; | |||||
| if (f->last_picture.f) | |||||
| ff_thread_release_buffer(avctx, &f->last_picture); | |||||
| FFSWAP(ThreadFrame, f->picture, f->last_picture); | |||||
| f->cur = p = f->picture.f; | |||||
| ff_init_range_decoder(c, buf, buf_size); | ff_init_range_decoder(c, buf, buf_size); | ||||
| ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); | ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); | ||||
| @@ -762,7 +763,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac | |||||
| p->key_frame = 0; | p->key_frame = 0; | ||||
| } | } | ||||
| if ((ret = ff_thread_get_buffer(avctx, &frame, AV_GET_BUFFER_FLAG_REF)) < 0) | |||||
| if ((ret = ff_thread_get_buffer(avctx, &f->picture, AV_GET_BUFFER_FLAG_REF)) < 0) | |||||
| return ret; | return ret; | ||||
| if (avctx->debug & FF_DEBUG_PICT_INFO) | if (avctx->debug & FF_DEBUG_PICT_INFO) | ||||
| @@ -818,7 +819,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac | |||||
| for (i = f->slice_count - 1; i >= 0; i--) { | for (i = f->slice_count - 1; i >= 0; i--) { | ||||
| FFV1Context *fs = f->slice_context[i]; | FFV1Context *fs = f->slice_context[i]; | ||||
| int j; | int j; | ||||
| if (fs->slice_damaged && f->last_picture.data[0]) { | |||||
| if (fs->slice_damaged && f->last_picture.f->data[0]) { | |||||
| const uint8_t *src[4]; | const uint8_t *src[4]; | ||||
| uint8_t *dst[4]; | uint8_t *dst[4]; | ||||
| for (j = 0; j < 4; j++) { | for (j = 0; j < 4; j++) { | ||||
| @@ -826,11 +827,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac | |||||
| int sv = (j==1 || j==2) ? f->chroma_v_shift : 0; | int sv = (j==1 || j==2) ? f->chroma_v_shift : 0; | ||||
| dst[j] = p->data[j] + p->linesize[j]* | dst[j] = p->data[j] + p->linesize[j]* | ||||
| (fs->slice_y>>sv) + (fs->slice_x>>sh); | (fs->slice_y>>sv) + (fs->slice_x>>sh); | ||||
| src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]* | |||||
| src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j]* | |||||
| (fs->slice_y>>sv) + (fs->slice_x>>sh); | (fs->slice_y>>sv) + (fs->slice_x>>sh); | ||||
| } | } | ||||
| av_image_copy(dst, p->linesize, (const uint8_t **)src, | av_image_copy(dst, p->linesize, (const uint8_t **)src, | ||||
| f->last_picture.linesize, | |||||
| f->last_picture.f->linesize, | |||||
| avctx->pix_fmt, | avctx->pix_fmt, | ||||
| fs->slice_width, | fs->slice_width, | ||||
| fs->slice_height); | fs->slice_height); | ||||
| @@ -839,10 +840,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac | |||||
| f->picture_number++; | f->picture_number++; | ||||
| av_frame_unref(&f->last_picture); | |||||
| if ((ret = av_frame_ref(&f->last_picture, p)) < 0) | |||||
| return ret; | |||||
| if (f->last_picture.f) | |||||
| ff_thread_release_buffer(avctx, &f->last_picture); | |||||
| f->cur = NULL; | f->cur = NULL; | ||||
| if ((ret = av_frame_ref(data, f->picture.f)) < 0) | |||||
| return ret; | |||||
| *got_frame = 1; | *got_frame = 1; | ||||
| @@ -794,7 +794,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
| if ((ret = ffv1_allocate_initial_states(s)) < 0) | if ((ret = ffv1_allocate_initial_states(s)) < 0) | ||||
| return ret; | return ret; | ||||
| avctx->coded_frame = &s->picture; | |||||
| avctx->coded_frame = s->picture.f; | |||||
| if (!s->transparency) | if (!s->transparency) | ||||
| s->plane_count = 2; | s->plane_count = 2; | ||||
| avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); | avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); | ||||
| @@ -939,12 +939,12 @@ static void encode_slice_header(FFV1Context *f, FFV1Context *fs) | |||||
| put_symbol(c, state, f->plane[j].quant_table_index, 0); | put_symbol(c, state, f->plane[j].quant_table_index, 0); | ||||
| av_assert0(f->plane[j].quant_table_index == f->avctx->context_model); | av_assert0(f->plane[j].quant_table_index == f->avctx->context_model); | ||||
| } | } | ||||
| if (!f->picture.interlaced_frame) | |||||
| if (!f->picture.f->interlaced_frame) | |||||
| put_symbol(c, state, 3, 0); | put_symbol(c, state, 3, 0); | ||||
| else | else | ||||
| put_symbol(c, state, 1 + !f->picture.top_field_first, 0); | |||||
| put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0); | |||||
| put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0); | |||||
| put_symbol(c, state, 1 + !f->picture.f->top_field_first, 0); | |||||
| put_symbol(c, state, f->picture.f->sample_aspect_ratio.num, 0); | |||||
| put_symbol(c, state, f->picture.f->sample_aspect_ratio.den, 0); | |||||
| } | } | ||||
| static int encode_slice(AVCodecContext *c, void *arg) | static int encode_slice(AVCodecContext *c, void *arg) | ||||
| @@ -955,7 +955,7 @@ static int encode_slice(AVCodecContext *c, void *arg) | |||||
| int height = fs->slice_height; | int height = fs->slice_height; | ||||
| int x = fs->slice_x; | int x = fs->slice_x; | ||||
| int y = fs->slice_y; | int y = fs->slice_y; | ||||
| AVFrame *const p = &f->picture; | |||||
| AVFrame *const p = f->picture.f; | |||||
| const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1; | const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1; | ||||
| if (p->key_frame) | if (p->key_frame) | ||||
| @@ -1002,7 +1002,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
| { | { | ||||
| FFV1Context *f = avctx->priv_data; | FFV1Context *f = avctx->priv_data; | ||||
| RangeCoder *const c = &f->slice_context[0]->c; | RangeCoder *const c = &f->slice_context[0]->c; | ||||
| AVFrame *const p = &f->picture; | |||||
| AVFrame *const p = f->picture.f; | |||||
| int used_count = 0; | int used_count = 0; | ||||
| uint8_t keystate = 128; | uint8_t keystate = 128; | ||||
| uint8_t *buf_p; | uint8_t *buf_p; | ||||
| @@ -1015,7 +1015,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
| ff_init_range_encoder(c, pkt->data, pkt->size); | ff_init_range_encoder(c, pkt->data, pkt->size); | ||||
| ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); | ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); | ||||
| *p = *pict; | |||||
| av_frame_ref(p, pict); | |||||
| p->pict_type = AV_PICTURE_TYPE_I; | p->pict_type = AV_PICTURE_TYPE_I; | ||||
| if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) { | if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) { | ||||