Browse Source

flashsv: use the AVFrame API properly.

tags/n2.2-rc1
Anton Khirnov 12 years ago
parent
commit
042aec41ae
1 changed files with 33 additions and 28 deletions
  1. +33
    -28
      libavcodec/flashsv.c

+ 33
- 28
libavcodec/flashsv.c View File

@@ -50,7 +50,7 @@ typedef struct BlockInfo {


typedef struct FlashSVContext { typedef struct FlashSVContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
int image_width, image_height; int image_width, image_height;
int block_width, block_height; int block_width, block_height;
uint8_t *tmpblock; uint8_t *tmpblock;
@@ -99,6 +99,19 @@ static int decode_hybrid(const uint8_t *sptr, uint8_t *dptr, int dx, int dy,
return sptr - orig_src; return sptr - orig_src;
} }


static av_cold int flashsv_decode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
av_frame_free(&s->frame);

/* free the tmpblock */
av_free(s->tmpblock);

return 0;
}

static av_cold int flashsv_decode_init(AVCodecContext *avctx) static av_cold int flashsv_decode_init(AVCodecContext *avctx)
{ {
FlashSVContext *s = avctx->priv_data; FlashSVContext *s = avctx->priv_data;
@@ -114,7 +127,12 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx)
return 1; return 1;
} }
avctx->pix_fmt = AV_PIX_FMT_BGR24; avctx->pix_fmt = AV_PIX_FMT_BGR24;
avcodec_get_frame_defaults(&s->frame);

s->frame = av_frame_alloc();
if (!s->frame) {
flashsv_decode_end(avctx);
return AVERROR(ENOMEM);
}


return 0; return 0;
} }
@@ -202,18 +220,18 @@ static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
/* Flash Screen Video stores the image upside down, so copy /* Flash Screen Video stores the image upside down, so copy
* lines to destination in reverse order. */ * lines to destination in reverse order. */
for (k = 1; k <= s->diff_height; k++) { for (k = 1; k <= s->diff_height; k++) {
memcpy(s->frame.data[0] + x_pos * 3 +
(s->image_height - y_pos - k) * s->frame.linesize[0],
memcpy(s->frame->data[0] + x_pos * 3 +
(s->image_height - y_pos - k) * s->frame->linesize[0],
line, width * 3); line, width * 3);
/* advance source pointer to next line */ /* advance source pointer to next line */
line += width * 3; line += width * 3;
} }
} else { } else {
/* hybrid 15-bit/palette mode */ /* hybrid 15-bit/palette mode */
decode_hybrid(s->tmpblock, s->frame.data[0],
decode_hybrid(s->tmpblock, s->frame->data[0],
s->image_height - (y_pos + 1 + s->diff_height), s->image_height - (y_pos + 1 + s->diff_height),
x_pos, s->diff_height, width, x_pos, s->diff_height, width,
s->frame.linesize[0], s->pal);
s->frame->linesize[0], s->pal);
} }
skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */ skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */
return 0; return 0;
@@ -333,7 +351,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->image_width, s->image_height, s->block_width, s->block_height, s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part); h_blocks, v_blocks, h_part, v_part);


if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@@ -360,7 +378,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->diff_height = cur_blk_height; s->diff_height = cur_blk_height;


if (8 * size > get_bits_left(&gb)) { if (8 * size > get_bits_left(&gb)) {
av_frame_unref(&s->frame);
av_frame_unref(s->frame);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }


@@ -420,11 +438,11 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,


if (has_diff) { if (has_diff) {
int k; int k;
int off = (s->image_height - y_pos - 1) * s->frame.linesize[0];
int off = (s->image_height - y_pos - 1) * s->frame->linesize[0];


for (k = 0; k < cur_blk_height; k++) { for (k = 0; k < cur_blk_height; k++) {
int x = off - k * s->frame.linesize[0] + x_pos * 3;
memcpy(s->frame.data[0] + x, s->keyframe + x,
int x = off - k * s->frame->linesize[0] + x_pos * 3;
memcpy(s->frame->data[0] + x, s->keyframe + x,
cur_blk_width * 3); cur_blk_width * 3);
} }
} }
@@ -442,17 +460,17 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
} }
if (s->is_keyframe && s->ver == 2) { if (s->is_keyframe && s->ver == 2) {
if (!s->keyframe) { if (!s->keyframe) {
s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height);
s->keyframe = av_malloc(s->frame->linesize[0] * avctx->height);
if (!s->keyframe) { if (!s->keyframe) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n"); av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
} }
memcpy(s->keyframe, s->frame.data[0],
s->frame.linesize[0] * avctx->height);
memcpy(s->keyframe, s->frame->data[0],
s->frame->linesize[0] * avctx->height);
} }


if ((ret = av_frame_ref(data, &s->frame)) < 0)
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret; return ret;


*got_frame = 1; *got_frame = 1;
@@ -465,19 +483,6 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
return buf_size; return buf_size;
} }


static av_cold int flashsv_decode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
av_frame_unref(&s->frame);

/* free the tmpblock */
av_free(s->tmpblock);

return 0;
}

#if CONFIG_FLASHSV_DECODER #if CONFIG_FLASHSV_DECODER
AVCodec ff_flashsv_decoder = { AVCodec ff_flashsv_decoder = {
.name = "flashsv", .name = "flashsv",


Loading…
Cancel
Save