diff --git a/libavcodec/aacps.c b/libavcodec/aacps.c index 3b5aa58109..c2afec0af5 100644 --- a/libavcodec/aacps.c +++ b/libavcodec/aacps.c @@ -275,6 +275,10 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps err: ps->start = 0; skip_bits_long(gb_host, bits_left); + memset(ps->iid_par, 0, sizeof(ps->iid_par)); + memset(ps->icc_par, 0, sizeof(ps->icc_par)); + memset(ps->ipd_par, 0, sizeof(ps->ipd_par)); + memset(ps->opd_par, 0, sizeof(ps->opd_par)); return bits_left; } diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index 73fa030e6e..46743e7bbd 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -404,9 +404,12 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice) if (tmpk <= 16) x = range_decode_bits(ctx, tmpk); - else { + else if (tmpk <= 32) { x = range_decode_bits(ctx, 16); x |= (range_decode_bits(ctx, tmpk - 16) << 16); + } else { + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); + return AVERROR_INVALIDDATA; } x += overflow << tmpk; } else { diff --git a/libavcodec/bytestream.h b/libavcodec/bytestream.h index 71c70aac84..2d9ca47508 100644 --- a/libavcodec/bytestream.h +++ b/libavcodec/bytestream.h @@ -1,6 +1,7 @@ /* * Bytestream functions * copyright (c) 2006 Baptiste Coudurier + * Copyright (c) 2012 Aneesh Dogra (lionaneesh) * * This file is part of FFmpeg. * @@ -23,6 +24,7 @@ #define AVCODEC_BYTESTREAM_H #include + #include "libavutil/common.h" #include "libavutil/intreadwrite.h" @@ -30,35 +32,57 @@ typedef struct { const uint8_t *buffer, *buffer_end, *buffer_start; } GetByteContext; -#define DEF_T(type, name, bytes, read, write) \ -static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\ - (*b) += bytes;\ - return read(*b - bytes);\ -}\ -static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\ - write(*b, value);\ - (*b) += bytes;\ -}\ -static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)\ -{\ - return bytestream_get_ ## name(&g->buffer);\ -}\ -static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\ -{\ - if (g->buffer_end - g->buffer < bytes)\ - return 0;\ - return bytestream2_get_ ## name ## u(g);\ -}\ -static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\ -{\ - if (g->buffer_end - g->buffer < bytes)\ - return 0;\ - return read(g->buffer);\ -} - -#define DEF(name, bytes, read, write) \ +typedef struct { + uint8_t *buffer, *buffer_end, *buffer_start; + int eof; +} PutByteContext; + +#define DEF_T(type, name, bytes, read, write) \ +static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \ +{ \ + (*b) += bytes; \ + return read(*b - bytes); \ +} \ +static av_always_inline void bytestream_put_ ## name(uint8_t **b, \ + const type value) \ +{ \ + write(*b, value); \ + (*b) += bytes; \ +} \ +static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \ + const type value) \ +{ \ + bytestream_put_ ## name(&p->buffer, value); \ +} \ +static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \ + const type value) \ +{ \ + if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \ + write(p->buffer, value); \ + p->buffer += bytes; \ + } else \ + p->eof = 1; \ +} \ +static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \ +{ \ + return bytestream_get_ ## name(&g->buffer); \ +} \ +static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \ +{ \ + if (g->buffer_end - g->buffer < bytes) \ + return 0; \ + return bytestream2_get_ ## name ## u(g); \ +} \ +static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \ +{ \ + if (g->buffer_end - g->buffer < bytes) \ + return 0; \ + return read(g->buffer); \ +} + +#define DEF(name, bytes, read, write) \ DEF_T(unsigned int, name, bytes, read, write) -#define DEF64(name, bytes, read, write) \ +#define DEF64(name, bytes, read, write) \ DEF_T(uint64_t, name, bytes, read, write) DEF64(le64, 8, AV_RL64, AV_WL64) @@ -112,11 +136,22 @@ DEF (byte, 1, AV_RB8 , AV_WB8 ) #endif static av_always_inline void bytestream2_init(GetByteContext *g, - const uint8_t *buf, int buf_size) + const uint8_t *buf, + int buf_size) { - g->buffer = buf; + g->buffer = buf; g->buffer_start = buf; - g->buffer_end = buf + buf_size; + g->buffer_end = buf + buf_size; +} + +static av_always_inline void bytestream2_init_writer(PutByteContext *p, + uint8_t *buf, + int buf_size) +{ + p->buffer = buf; + p->buffer_start = buf; + p->buffer_end = buf + buf_size; + p->eof = 0; } static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g) @@ -124,32 +159,61 @@ static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext * return g->buffer_end - g->buffer; } +static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p) +{ + return p->buffer_end - p->buffer; +} + static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size) { g->buffer += FFMIN(g->buffer_end - g->buffer, size); } +static av_always_inline void bytestream2_skipu(GetByteContext *g, + unsigned int size) +{ + g->buffer += size; +} + +static av_always_inline void bytestream2_skip_p(PutByteContext *p, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + p->buffer += size2; +} + static av_always_inline int bytestream2_tell(GetByteContext *g) { return (int)(g->buffer - g->buffer_start); } -static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, +static av_always_inline int bytestream2_tell_p(PutByteContext *p) +{ + return (int)(p->buffer - p->buffer_start); +} + +static av_always_inline int bytestream2_seek(GetByteContext *g, + int offset, int whence) { switch (whence) { case SEEK_CUR: - offset = av_clip(offset, -(g->buffer - g->buffer_start), - g->buffer_end - g->buffer); + offset = av_clip(offset, -(g->buffer - g->buffer_start), + g->buffer_end - g->buffer); g->buffer += offset; break; case SEEK_END: - offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0); + offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0); g->buffer = g->buffer_end + offset; break; case SEEK_SET: - offset = av_clip(offset, 0, g->buffer_end - g->buffer_start); + offset = av_clip(offset, 0, g->buffer_end - g->buffer_start); g->buffer = g->buffer_start + offset; break; default: @@ -158,6 +222,37 @@ static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, return bytestream2_tell(g); } +static av_always_inline int bytestream2_seek_p(PutByteContext *p, + int offset, + int whence) +{ + p->eof = 0; + switch (whence) { + case SEEK_CUR: + if (p->buffer_end - p->buffer < offset) + p->eof = 1; + offset = av_clip(offset, -(p->buffer - p->buffer_start), + p->buffer_end - p->buffer); + p->buffer += offset; + break; + case SEEK_END: + if (offset > 0) + p->eof = 1; + offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0); + p->buffer = p->buffer_end + offset; + break; + case SEEK_SET: + if (p->buffer_end - p->buffer_start < offset) + p->eof = 1; + offset = av_clip(offset, 0, p->buffer_end - p->buffer_start); + p->buffer = p->buffer_start + offset; + break; + default: + return AVERROR(EINVAL); + } + return bytestream2_tell_p(p); +} + static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size) @@ -168,14 +263,78 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, return size2; } -static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size) +static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, + uint8_t *dst, + unsigned int size) +{ + memcpy(dst, g->buffer, size); + g->buffer += size; + return size; +} + +static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + int size2; + if (p->eof) + return 0; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memcpy(p->buffer, src, size2); + p->buffer += size2; + return size2; +} + +static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + memcpy(p->buffer, src, size); + p->buffer += size; + return size; +} + +static av_always_inline void bytestream2_set_buffer(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memset(p->buffer, c, size2); + p->buffer += size2; +} + +static av_always_inline void bytestream2_set_bufferu(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + memset(p->buffer, c, size); + p->buffer += size; +} + +static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p) +{ + return p->eof; +} + +static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, + uint8_t *dst, + unsigned int size) { memcpy(dst, *b, size); (*b) += size; return size; } -static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size) +static av_always_inline void bytestream_put_buffer(uint8_t **b, + const uint8_t *src, + unsigned int size) { memcpy(*b, src, size); (*b) += size; diff --git a/libavcodec/indeo4.c b/libavcodec/indeo4.c index 573718e374..3e8a3988d6 100644 --- a/libavcodec/indeo4.c +++ b/libavcodec/indeo4.c @@ -372,7 +372,8 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band, if (!get_bits1(&ctx->gb) || ctx->frame_type == FRAMETYPE_INTRA) { transform_id = get_bits(&ctx->gb, 5); - if (!transforms[transform_id].inv_trans) { + if (transform_id >= FF_ARRAY_ELEMS(transforms) || + !transforms[transform_id].inv_trans) { av_log_ask_for_sample(avctx, "Unimplemented transform: %d!\n", transform_id); return AVERROR_PATCHWELCOME; } diff --git a/libavcodec/lagarith.c b/libavcodec/lagarith.c index 43027cff4e..729b9d0f88 100644 --- a/libavcodec/lagarith.c +++ b/libavcodec/lagarith.c @@ -247,24 +247,26 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf, { int L, TL; - /* Left pixel is actually prev_row[width] */ - L = buf[width - stride - 1]; if (!line) { /* Left prediction only for first line */ L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]); - return; - } else if (line == 1) { - /* Second line, left predict first pixel, the rest of the line is median predicted - * NOTE: In the case of RGB this pixel is top predicted */ - TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L; } else { - /* Top left is 2 rows back, last pixel */ - TL = buf[width - (2 * stride) - 1]; - } + /* Left pixel is actually prev_row[width] */ + L = buf[width - stride - 1]; + + if (line == 1) { + /* Second line, left predict first pixel, the rest of the line is median predicted + * NOTE: In the case of RGB this pixel is top predicted */ + TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L; + } else { + /* Top left is 2 rows back, last pixel */ + TL = buf[width - (2 * stride) - 1]; + } - add_lag_median_prediction(buf, buf - stride, buf, - width, &L, &TL); + add_lag_median_prediction(buf, buf - stride, buf, + width, &L, &TL); + } } static int lag_decode_line(LagarithContext *l, lag_rac *rac, @@ -310,13 +312,13 @@ handle_zeros: } static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst, - const uint8_t *src, int width, - int esc_count) + const uint8_t *src, const uint8_t *src_end, + int width, int esc_count) { int i = 0; int count; uint8_t zero_run = 0; - const uint8_t *start = src; + const uint8_t *src_start = src; uint8_t mask1 = -(esc_count < 2); uint8_t mask2 = -(esc_count < 3); uint8_t *end = dst + (width - 2); @@ -333,6 +335,8 @@ output_zeros: i = 0; while (!zero_run && dst + i < end) { i++; + if (src + i >= src_end) + return AVERROR_INVALIDDATA; zero_run = !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2)); } @@ -348,9 +352,10 @@ output_zeros: } else { memcpy(dst, src, i); src += i; + dst += i; } } - return start - src; + return src_start - src; } @@ -366,6 +371,7 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst, int esc_count = src[0]; GetBitContext gb; lag_rac rac; + const uint8_t *src_end = src + src_size; rac.avctx = l->avctx; l->zeros = 0; @@ -396,10 +402,16 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst, esc_count -= 4; if (esc_count > 0) { /* Zero run coding only, no range coding. */ - for (i = 0; i < height; i++) - src += lag_decode_zero_run_line(l, dst + (i * stride), src, - width, esc_count); + for (i = 0; i < height; i++) { + int res = lag_decode_zero_run_line(l, dst + (i * stride), src, + src_end, width, esc_count); + if (res < 0) + return res; + src += res; + } } else { + if (src_size < width * height) + return AVERROR_INVALIDDATA; // buffer not big enough /* Plane is stored uncompressed */ for (i = 0; i < height; i++) { memcpy(dst + (i * stride), src, width); @@ -500,11 +512,19 @@ static int lag_decode_frame(AVCodecContext *avctx, } for (i = 0; i < 4; i++) srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride; + if (offset_ry >= buf_size || + offset_gu >= buf_size || + offset_bv >= buf_size || + offs[3] >= buf_size) { + av_log(avctx, AV_LOG_ERROR, + "Invalid frame offsets\n"); + return AVERROR_INVALIDDATA; + } for (i = 0; i < 4; i++) lag_decode_arith_plane(l, srcs[i], avctx->width, avctx->height, -l->rgb_stride, buf + offs[i], - buf_size); + buf_size - offs[i]); dst = p->data[0]; for (i = 0; i < 4; i++) srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height; @@ -576,15 +596,23 @@ static int lag_decode_frame(AVCodecContext *avctx, return -1; } + if (offset_ry >= buf_size || + offset_gu >= buf_size || + offset_bv >= buf_size) { + av_log(avctx, AV_LOG_ERROR, + "Invalid frame offsets\n"); + return AVERROR_INVALIDDATA; + } + lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height, p->linesize[0], buf + offset_ry, - buf_size); + buf_size - offset_ry); lag_decode_arith_plane(l, p->data[2], avctx->width / 2, avctx->height / 2, p->linesize[2], - buf + offset_gu, buf_size); + buf + offset_gu, buf_size - offset_gu); lag_decode_arith_plane(l, p->data[1], avctx->width / 2, avctx->height / 2, p->linesize[1], - buf + offset_bv, buf_size); + buf + offset_bv, buf_size - offset_bv); break; default: av_log(avctx, AV_LOG_ERROR, diff --git a/libavcodec/lagarithrac.c b/libavcodec/lagarithrac.c index 56c1d0bcc0..4aba32335b 100644 --- a/libavcodec/lagarithrac.c +++ b/libavcodec/lagarithrac.c @@ -32,15 +32,16 @@ void lag_rac_init(lag_rac *l, GetBitContext *gb, int length) { - int i, j; + int i, j, left; /* According to reference decoder "1st byte is garbage", * however, it gets skipped by the call to align_get_bits() */ align_get_bits(gb); + left = get_bits_left(gb) >> 3; l->bytestream_start = l->bytestream = gb->buffer + get_bits_count(gb) / 8; - l->bytestream_end = l->bytestream_start + length; + l->bytestream_end = l->bytestream_start + FFMIN(length, left); l->range = 0x80; l->low = *l->bytestream >> 1; diff --git a/libavcodec/lzw.c b/libavcodec/lzw.c index 185a05d6ab..aed1a43fac 100644 --- a/libavcodec/lzw.c +++ b/libavcodec/lzw.c @@ -101,9 +101,14 @@ void ff_lzw_decode_tail(LZWState *p) struct LZWState *s = (struct LZWState *)p; if(s->mode == FF_LZW_GIF) { - while(s->pbuf < s->ebuf && s->bs>0){ - s->pbuf += s->bs; - s->bs = *s->pbuf++; + while (s->bs > 0) { + if (s->pbuf + s->bs >= s->ebuf) { + s->pbuf = s->ebuf; + break; + } else { + s->pbuf += s->bs; + s->bs = *s->pbuf++; + } } }else s->pbuf= s->ebuf; diff --git a/libavcodec/mimic.c b/libavcodec/mimic.c index 652dd1bb5d..c357ce600a 100644 --- a/libavcodec/mimic.c +++ b/libavcodec/mimic.c @@ -259,8 +259,8 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs, int index = (ctx->cur_index+backref)&15; uint8_t *p = ctx->flipped_ptrs[index].data[0]; - ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0); - if(p) { + if (index != ctx->cur_index && p) { + ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0); p += src - ctx->flipped_ptrs[ctx->prev_index].data[plane]; ctx->dsp.put_pixels_tab[1][0](dst, p, stride, 8); @@ -310,6 +310,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data, int width, height; int quality, num_coeffs; int swap_buf_size = buf_size - MIMIC_HEADER_SIZE; + int res; if(buf_size < MIMIC_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "insufficient data\n"); @@ -376,10 +377,10 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data, swap_buf_size>>2); init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3); - if(!decode(ctx, quality, num_coeffs, !is_pframe)) { - if (avctx->active_thread_type&FF_THREAD_FRAME) - ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0); - else { + res = decode(ctx, quality, num_coeffs, !is_pframe); + ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0); + if (!res) { + if (!(avctx->active_thread_type & FF_THREAD_FRAME)) { ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]); return -1; } diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 04c149a99b..a02a77d16f 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -1394,8 +1394,7 @@ void MPV_frame_end(MpegEncContext *s) s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr; if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) { - ff_thread_report_progress((AVFrame *) s->current_picture_ptr, - s->mb_height - 1, 0); + ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0); } } diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c index e015985d69..5229026ade 100644 --- a/libavcodec/rawdec.c +++ b/libavcodec/rawdec.c @@ -139,6 +139,7 @@ static int raw_decode(AVCodecContext *avctx, int buf_size = avpkt->size; int linesize_align = 4; RawVideoContext *context = avctx->priv_data; + int res; AVFrame * frame = (AVFrame *) data; AVPicture * picture = (AVPicture *) data; @@ -185,7 +186,9 @@ static int raw_decode(AVCodecContext *avctx, avctx->codec_tag == MKTAG('A', 'V', 'u', 'p')) buf += buf_size - context->length; - avpicture_fill(picture, buf, avctx->pix_fmt, avctx->width, avctx->height); + if ((res = avpicture_fill(picture, buf, avctx->pix_fmt, + avctx->width, avctx->height)) < 0) + return res; if((avctx->pix_fmt==PIX_FMT_PAL8 && buf_size < context->length) || (avctx->pix_fmt!=PIX_FMT_PAL8 && (av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){ diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c index a241e9663f..0770ef4271 100644 --- a/libavcodec/truemotion2.c +++ b/libavcodec/truemotion2.c @@ -25,6 +25,7 @@ */ #include "avcodec.h" +#include "bytestream.h" #include "get_bits.h" #include "dsputil.h" @@ -59,7 +60,9 @@ typedef struct TM2Context{ int *clast; /* data for current and previous frame */ + int *Y1_base, *U1_base, *V1_base, *Y2_base, *U2_base, *V2_base; int *Y1, *U1, *V1, *Y2, *U2, *V2; + int y_stride, uv_stride; int cur; } TM2Context; @@ -130,7 +133,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code) /* check for correct codes parameters */ if((huff.val_bits < 1) || (huff.val_bits > 32) || - (huff.max_bits < 0) || (huff.max_bits > 32)) { + (huff.max_bits < 0) || (huff.max_bits > 25)) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n", huff.val_bits, huff.max_bits); return -1; @@ -251,13 +254,14 @@ static int tm2_read_deltas(TM2Context *ctx, int stream_id) { static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size) { int i; - int cur = 0; int skip = 0; - int len, toks; + int len, toks, pos; TM2Codes codes; + GetByteContext gb; /* get stream length in dwords */ - len = AV_RB32(buf); buf += 4; cur += 4; + bytestream2_init(&gb, buf, buf_size); + len = bytestream2_get_be32(&gb); skip = len * 4 + 4; if(len == 0) @@ -268,36 +272,37 @@ static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, i return -1; } - toks = AV_RB32(buf); buf += 4; cur += 4; + toks = bytestream2_get_be32(&gb); if(toks & 1) { - len = AV_RB32(buf); buf += 4; cur += 4; + len = bytestream2_get_be32(&gb); if(len == TM2_ESCAPE) { - len = AV_RB32(buf); buf += 4; cur += 4; + len = bytestream2_get_be32(&gb); } if(len > 0) { - if (skip <= cur) + pos = bytestream2_tell(&gb); + if (skip <= pos) return -1; - init_get_bits(&ctx->gb, buf, (skip - cur) * 8); + init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8); if(tm2_read_deltas(ctx, stream_id) == -1) return -1; - buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; - cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; + bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2); } } /* skip unused fields */ - if(AV_RB32(buf) == TM2_ESCAPE) { - buf += 4; cur += 4; /* some unknown length - could be escaped too */ + len = bytestream2_get_be32(&gb); + if(len == TM2_ESCAPE) { /* some unknown length - could be escaped too */ + bytestream2_skip(&gb, 8); /* unused by decoder */ + } else { + bytestream2_skip(&gb, 4); /* unused by decoder */ } - buf += 4; cur += 4; - buf += 4; cur += 4; /* unused by decoder */ - if (skip <= cur) + pos = bytestream2_tell(&gb); + if (skip <= pos) return -1; - init_get_bits(&ctx->gb, buf, (skip - cur) * 8); + init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8); if(tm2_build_huff_table(ctx, &codes) == -1) return -1; - buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; - cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; + bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2); toks >>= 1; /* check if we have sane number of tokens */ @@ -308,21 +313,33 @@ static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, i } ctx->tokens[stream_id] = av_realloc(ctx->tokens[stream_id], toks * sizeof(int)); ctx->tok_lens[stream_id] = toks; - len = AV_RB32(buf); buf += 4; cur += 4; + len = bytestream2_get_be32(&gb); if(len > 0) { - if (skip <= cur) + pos = bytestream2_tell(&gb); + if (skip <= pos) return -1; - init_get_bits(&ctx->gb, buf, (skip - cur) * 8); + init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8); for(i = 0; i < toks; i++) { if (get_bits_left(&ctx->gb) <= 0) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks); return -1; } ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes); + if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) { + av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n", + ctx->tokens[stream_id][i], stream_id, i); + return AVERROR_INVALIDDATA; + } } } else { - for(i = 0; i < toks; i++) + for(i = 0; i < toks; i++) { ctx->tokens[stream_id][i] = codes.recode[0]; + if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) { + av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n", + ctx->tokens[stream_id][i], stream_id, i); + return AVERROR_INVALIDDATA; + } + } } tm2_free_codes(&codes); @@ -347,9 +364,9 @@ static inline int GET_TOK(TM2Context *ctx,int type) { int *Y, *U, *V;\ int Ystride, Ustride, Vstride;\ \ - Ystride = ctx->avctx->width;\ - Vstride = (ctx->avctx->width + 1) >> 1;\ - Ustride = (ctx->avctx->width + 1) >> 1;\ + Ystride = ctx->y_stride;\ + Vstride = ctx->uv_stride;\ + Ustride = ctx->uv_stride;\ Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\ V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\ U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\ @@ -637,6 +654,8 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b mx = GET_TOK(ctx, TM2_MOT); my = GET_TOK(ctx, TM2_MOT); + mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4); + my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4); Yo += my * oYstride + mx; Uo += (my >> 1) * oUstride + (mx >> 1); @@ -677,15 +696,12 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) { int i, j; - int bw, bh; + int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1; int type; int keyframe = 1; int *Y, *U, *V; uint8_t *dst; - bw = ctx->avctx->width >> 2; - bh = ctx->avctx->height >> 2; - for(i = 0; i < TM2_NUM_STREAMS; i++) ctx->tok_ptrs[i] = 0; @@ -738,17 +754,54 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) U = (ctx->cur?ctx->U2:ctx->U1); V = (ctx->cur?ctx->V2:ctx->V1); dst = p->data[0]; - for(j = 0; j < ctx->avctx->height; j++){ - for(i = 0; i < ctx->avctx->width; i++){ + for(j = 0; j < h; j++){ + for(i = 0; i < w; i++){ int y = Y[i], u = U[i >> 1], v = V[i >> 1]; dst[3*i+0] = av_clip_uint8(y + v); dst[3*i+1] = av_clip_uint8(y); dst[3*i+2] = av_clip_uint8(y + u); } - Y += ctx->avctx->width; + + /* horizontal edge extension */ + Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0]; + Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1]; + + /* vertical edge extension */ + if (j == 0) { + memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride); + memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride); + memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride); + memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride); + } else if (j == h - 1) { + memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride); + memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride); + memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride); + memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride); + } + + Y += ctx->y_stride; if (j & 1) { - U += ctx->avctx->width >> 1; - V += ctx->avctx->width >> 1; + /* horizontal edge extension */ + U[-2] = U[-1] = U[0]; + V[-2] = V[-1] = V[0]; + U[cw + 1] = U[cw] = U[cw - 1]; + V[cw + 1] = V[cw] = V[cw - 1]; + + /* vertical edge extension */ + if (j == 1) { + memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride); + memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride); + memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride); + memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride); + } else if (j == h - 1) { + memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride); + memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride); + memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride); + memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride); + } + + U += ctx->uv_stride; + V += ctx->uv_stride; } dst += p->linesize[0]; } @@ -765,7 +818,7 @@ static int decode_frame(AVCodecContext *avctx, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; + int buf_size = avpkt->size & ~3; TM2Context * const l = avctx->priv_data; AVFrame * const p= (AVFrame*)&l->pic; int i, skip, t; @@ -790,9 +843,12 @@ static int decode_frame(AVCodecContext *avctx, } for(i = 0; i < TM2_NUM_STREAMS; i++){ - t = tm2_read_stream(l, l->buffer + skip, tm2_stream_order[i], buf_size); - if(t == -1){ - return -1; + if (skip >= buf_size) { + return AVERROR_INVALIDDATA; + } + t = tm2_read_stream(l, l->buffer + skip, tm2_stream_order[i], buf_size - skip); + if(t < 0){ + return t; } skip += t; } @@ -811,7 +867,7 @@ static int decode_frame(AVCodecContext *avctx, static av_cold int decode_init(AVCodecContext *avctx){ TM2Context * const l = avctx->priv_data; - int i; + int i, w = avctx->width, h = avctx->height; if((avctx->width & 3) || (avctx->height & 3)){ av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n"); @@ -825,21 +881,46 @@ static av_cold int decode_init(AVCodecContext *avctx){ dsputil_init(&l->dsp, avctx); - l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); - l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); + l->last = av_malloc(4 * sizeof(*l->last) * (w >> 2)); + l->clast = av_malloc(4 * sizeof(*l->clast) * (w >> 2)); for(i = 0; i < TM2_NUM_STREAMS; i++) { l->tokens[i] = NULL; l->tok_lens[i] = 0; } - l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height); - l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); - l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); - l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height); - l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); - l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); + w += 8; + h += 8; + l->Y1_base = av_malloc(sizeof(*l->Y1_base) * w * h); + l->Y2_base = av_malloc(sizeof(*l->Y2_base) * w * h); + l->y_stride = w; + w = (w + 1) >> 1; + h = (h + 1) >> 1; + l->U1_base = av_malloc(sizeof(*l->U1_base) * w * h); + l->V1_base = av_malloc(sizeof(*l->V1_base) * w * h); + l->U2_base = av_malloc(sizeof(*l->U2_base) * w * h); + l->V2_base = av_malloc(sizeof(*l->V1_base) * w * h); + l->uv_stride = w; l->cur = 0; + if (!l->Y1_base || !l->Y2_base || !l->U1_base || + !l->V1_base || !l->U2_base || !l->V2_base || + !l->last || !l->clast) { + av_freep(l->Y1_base); + av_freep(l->Y2_base); + av_freep(l->U1_base); + av_freep(l->U2_base); + av_freep(l->V1_base); + av_freep(l->V2_base); + av_freep(l->last); + av_freep(l->clast); + return AVERROR(ENOMEM); + } + l->Y1 = l->Y1_base + l->y_stride * 4 + 4; + l->Y2 = l->Y2_base + l->y_stride * 4 + 4; + l->U1 = l->U1_base + l->uv_stride * 2 + 2; + l->U2 = l->U2_base + l->uv_stride * 2 + 2; + l->V1 = l->V1_base + l->uv_stride * 2 + 2; + l->V2 = l->V2_base + l->uv_stride * 2 + 2; return 0; } @@ -854,12 +935,12 @@ static av_cold int decode_end(AVCodecContext *avctx){ for(i = 0; i < TM2_NUM_STREAMS; i++) av_free(l->tokens[i]); if(l->Y1){ - av_free(l->Y1); - av_free(l->U1); - av_free(l->V1); - av_free(l->Y2); - av_free(l->U2); - av_free(l->V2); + av_free(l->Y1_base); + av_free(l->U1_base); + av_free(l->V1_base); + av_free(l->Y2_base); + av_free(l->U2_base); + av_free(l->V2_base); } av_freep(&l->buffer); l->buffer_size = 0; diff --git a/libavcodec/utvideo.c b/libavcodec/utvideo.c index a45f13fa54..dbedb1e8c6 100644 --- a/libavcodec/utvideo.c +++ b/libavcodec/utvideo.c @@ -358,13 +358,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - const uint8_t *buf_end = buf + buf_size; UtvideoContext *c = avctx->priv_data; - const uint8_t *ptr; int i, j; const uint8_t *plane_start[5]; int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size; int ret; + GetByteContext gb; if (c->pic.data[0]) ff_thread_release_buffer(avctx, &c->pic); @@ -379,20 +378,21 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac ff_thread_finish_setup(avctx); /* parse plane structure to retrieve frame flags and validate slice offsets */ - ptr = buf; + bytestream2_init(&gb, buf, buf_size); for (i = 0; i < c->planes; i++) { - plane_start[i] = ptr; - if (buf_end - ptr < 256 + 4 * c->slices) { + plane_start[i] = gb.buffer; + if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) { av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); return AVERROR_INVALIDDATA; } - ptr += 256; + bytestream2_skipu(&gb, 256); slice_start = 0; slice_end = 0; for (j = 0; j < c->slices; j++) { - slice_end = bytestream_get_le32(&ptr); + slice_end = bytestream2_get_le32u(&gb); slice_size = slice_end - slice_start; - if (slice_size < 0) { + if (slice_end <= 0 || slice_size <= 0 || + bytestream2_get_bytes_left(&gb) < slice_end) { av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); return AVERROR_INVALIDDATA; } @@ -400,18 +400,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac max_slice_size = FFMAX(max_slice_size, slice_size); } plane_size = slice_end; - if (buf_end - ptr < plane_size) { - av_log(avctx, AV_LOG_ERROR, "Plane size is bigger than available data\n"); - return AVERROR_INVALIDDATA; - } - ptr += plane_size; + bytestream2_skipu(&gb, plane_size); } - plane_start[c->planes] = ptr; - if (buf_end - ptr < c->frame_info_size) { + plane_start[c->planes] = gb.buffer; + if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); return AVERROR_INVALIDDATA; } - c->frame_info = AV_RL32(ptr); + c->frame_info = bytestream2_get_le32u(&gb); av_log(avctx, AV_LOG_DEBUG, "frame information flags %X\n", c->frame_info); c->frame_pred = (c->frame_info >> 8) & 3; diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c index 64884fcc50..c53e7a5095 100644 --- a/libavcodec/vc1.c +++ b/libavcodec/vc1.c @@ -493,7 +493,7 @@ static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb) int nr, dr; nr = get_bits(gb, 8); dr = get_bits(gb, 4); - if (nr && nr < 8 && dr && dr < 3) { + if (nr > 0 && nr < 8 && dr > 0 && dr < 3) { v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1]; v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000; } diff --git a/libavcodec/vc1data.c b/libavcodec/vc1data.c index 7f979ba109..1fff617de6 100644 --- a/libavcodec/vc1data.c +++ b/libavcodec/vc1data.c @@ -84,7 +84,7 @@ const uint8_t ff_vc1_mbmode_intfrp[2][15][4] = { } }; -const int ff_vc1_fps_nr[5] = { 24, 25, 30, 50, 60 }, +const int ff_vc1_fps_nr[7] = { 24, 25, 30, 50, 60, 48, 72 }, ff_vc1_fps_dr[2] = { 1000, 1001 }; const uint8_t ff_vc1_pquant_table[3][32] = { /* Implicit quantizer */ diff --git a/libavcodec/vc1data.h b/libavcodec/vc1data.h index f11122504b..5511d60de4 100644 --- a/libavcodec/vc1data.h +++ b/libavcodec/vc1data.h @@ -41,7 +41,7 @@ extern const int ff_vc1_ttfrm_to_tt[4]; extern const uint8_t ff_vc1_mv_pmode_table[2][5]; extern const uint8_t ff_vc1_mv_pmode_table2[2][4]; -extern const int ff_vc1_fps_nr[5], ff_vc1_fps_dr[2]; +extern const int ff_vc1_fps_nr[7], ff_vc1_fps_dr[2]; extern const uint8_t ff_vc1_pquant_table[3][32]; /* MBMODE table for interlaced frame P-picture */ diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 548164ae5d..e1e82e1ae0 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -2512,6 +2512,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t *dc_val; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; int q1, q2 = 0; + int dqscale_index; wrap = s->block_wrap[n]; dc_val = s->dc_val[0] + s->block_index[n]; @@ -2524,15 +2525,18 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, a = dc_val[ - wrap]; /* scale predictors if needed */ q1 = s->current_picture.f.qscale_table[mb_pos]; + dqscale_index = s->y_dc_scale_table[q1] - 1; + if (dqscale_index < 0) + return 0; if (c_avail && (n != 1 && n != 3)) { q2 = s->current_picture.f.qscale_table[mb_pos - 1]; if (q2 && q2 != q1) - c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; + c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18; } if (a_avail && (n != 2 && n != 3)) { q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride]; if (q2 && q2 != q1) - a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; + a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18; } if (a_avail && c_avail && (n != 3)) { int off = mb_pos; @@ -2542,7 +2546,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, off -= s->mb_stride; q2 = s->current_picture.f.qscale_table[off]; if (q2 && q2 != q1) - b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; + b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18; } if (a_avail && c_avail) { @@ -2959,6 +2963,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; + if (q1 < 1) + return AVERROR_INVALIDDATA; if (dc_pred_dir) { // left for (k = 1; k < 8; k++) block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; @@ -3001,6 +3007,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, if (q2 && q1 != q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; + if (q1 < 1) + return AVERROR_INVALIDDATA; for (k = 1; k < 8; k++) ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; } @@ -3011,6 +3019,8 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, if (q2 && q1 != q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; + if (q1 < 1) + return AVERROR_INVALIDDATA; for (k = 1; k < 8; k++) ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; } @@ -3169,6 +3179,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; + if (q1 < 1) + return AVERROR_INVALIDDATA; if (dc_pred_dir) { // left for (k = 1; k < 8; k++) block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; @@ -3211,6 +3223,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, if (q2 && q1 != q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; + if (q1 < 1) + return AVERROR_INVALIDDATA; for (k = 1; k < 8; k++) ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; } @@ -3221,6 +3235,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, if (q2 && q1 != q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; + if (q1 < 1) + return AVERROR_INVALIDDATA; for (k = 1; k < 8; k++) ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18; } diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c index ae99c6d9c1..08d419dd73 100644 --- a/libavcodec/vqavideo.c +++ b/libavcodec/vqavideo.c @@ -70,10 +70,10 @@ #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "avcodec.h" +#include "bytestream.h" #define PALETTE_COUNT 256 #define VQA_HEADER_SIZE 0x2A -#define CHUNK_PREAMBLE_SIZE 8 /* allocate the maximum vector space, regardless of the file version: * (0xFF00 codebook vectors + 0x100 solid pixel vectors) * (4x4 pixels/block) */ @@ -94,9 +94,7 @@ typedef struct VqaContext { AVCodecContext *avctx; AVFrame frame; - - const unsigned char *buf; - int size; + GetByteContext gb; uint32_t palette[PALETTE_COUNT]; @@ -123,7 +121,6 @@ typedef struct VqaContext { static av_cold int vqa_decode_init(AVCodecContext *avctx) { VqaContext *s = avctx->priv_data; - unsigned char *vqa_header; int i, j, codebook_index; s->avctx = avctx; @@ -136,21 +133,20 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) } /* load up the VQA parameters from the header */ - vqa_header = (unsigned char *)s->avctx->extradata; - s->vqa_version = vqa_header[0]; + s->vqa_version = s->avctx->extradata[0]; if (s->vqa_version < 1 || s->vqa_version > 3) { av_log(s->avctx, AV_LOG_ERROR, " VQA video: unsupported version %d\n", s->vqa_version); return -1; } - s->width = AV_RL16(&vqa_header[6]); - s->height = AV_RL16(&vqa_header[8]); + s->width = AV_RL16(&s->avctx->extradata[6]); + s->height = AV_RL16(&s->avctx->extradata[8]); if(av_image_check_size(s->width, s->height, 0, avctx)){ s->width= s->height= 0; return -1; } - s->vector_width = vqa_header[10]; - s->vector_height = vqa_header[11]; - s->partial_count = s->partial_countdown = vqa_header[13]; + s->vector_width = s->avctx->extradata[10]; + s->vector_height = s->avctx->extradata[11]; + s->partial_count = s->partial_countdown = s->avctx->extradata[13]; /* the vector dimensions have to meet very stringent requirements */ if ((s->vector_width != 4) || @@ -194,90 +190,88 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: next op would overflow dest_index\n"); \ av_log(NULL, AV_LOG_ERROR, " VQA video: current dest_index = %d, count = %d, dest_size = %d\n", \ dest_index, count, dest_size); \ - return; \ + return AVERROR_INVALIDDATA; \ + } + +#define CHECK_COPY(idx) \ + if (idx < 0 || idx + count > dest_size) { \ + av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: next op would overflow dest_index\n"); \ + av_log(NULL, AV_LOG_ERROR, " VQA video: current src_pos = %d, count = %d, dest_size = %d\n", \ + src_pos, count, dest_size); \ + return AVERROR_INVALIDDATA; \ } -static void decode_format80(const unsigned char *src, int src_size, + +static int decode_format80(GetByteContext *gb, int src_size, unsigned char *dest, int dest_size, int check_size) { - int src_index = 0; int dest_index = 0; - int count; + int count, opcode, start; int src_pos; unsigned char color; int i; - while (src_index < src_size) { - - av_dlog(NULL, " opcode %02X: ", src[src_index]); + start = bytestream2_tell(gb); + while (bytestream2_tell(gb) - start < src_size) { + opcode = bytestream2_get_byte(gb); + av_dlog(NULL, " opcode %02X: ", opcode); /* 0x80 means that frame is finished */ - if (src[src_index] == 0x80) - return; + if (opcode == 0x80) + return 0; if (dest_index >= dest_size) { av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\n", dest_index, dest_size); - return; + return AVERROR_INVALIDDATA; } - if (src[src_index] == 0xFF) { + if (opcode == 0xFF) { - src_index++; - count = AV_RL16(&src[src_index]); - src_index += 2; - src_pos = AV_RL16(&src[src_index]); - src_index += 2; + count = bytestream2_get_le16(gb); + src_pos = bytestream2_get_le16(gb); av_dlog(NULL, "(1) copy %X bytes from absolute pos %X\n", count, src_pos); CHECK_COUNT(); - if (src_pos + count > dest_size) - return; + CHECK_COPY(src_pos); for (i = 0; i < count; i++) dest[dest_index + i] = dest[src_pos + i]; dest_index += count; - } else if (src[src_index] == 0xFE) { + } else if (opcode == 0xFE) { - src_index++; - count = AV_RL16(&src[src_index]); - src_index += 2; - color = src[src_index++]; + count = bytestream2_get_le16(gb); + color = bytestream2_get_byte(gb); av_dlog(NULL, "(2) set %X bytes to %02X\n", count, color); CHECK_COUNT(); memset(&dest[dest_index], color, count); dest_index += count; - } else if ((src[src_index] & 0xC0) == 0xC0) { + } else if ((opcode & 0xC0) == 0xC0) { - count = (src[src_index++] & 0x3F) + 3; - src_pos = AV_RL16(&src[src_index]); - src_index += 2; + count = (opcode & 0x3F) + 3; + src_pos = bytestream2_get_le16(gb); av_dlog(NULL, "(3) copy %X bytes from absolute pos %X\n", count, src_pos); CHECK_COUNT(); - if (src_pos + count > dest_size) - return; + CHECK_COPY(src_pos); for (i = 0; i < count; i++) dest[dest_index + i] = dest[src_pos + i]; dest_index += count; - } else if (src[src_index] > 0x80) { + } else if (opcode > 0x80) { - count = src[src_index++] & 0x3F; + count = opcode & 0x3F; av_dlog(NULL, "(4) copy %X bytes from source to dest\n", count); CHECK_COUNT(); - memcpy(&dest[dest_index], &src[src_index], count); - src_index += count; + bytestream2_get_buffer(gb, &dest[dest_index], count); dest_index += count; } else { - count = ((src[src_index] & 0x70) >> 4) + 3; - src_pos = AV_RB16(&src[src_index]) & 0x0FFF; - src_index += 2; + count = ((opcode & 0x70) >> 4) + 3; + src_pos = bytestream2_get_byte(gb) | ((opcode & 0x0F) << 8); av_dlog(NULL, "(5) copy %X bytes from relpos %X\n", count, src_pos); CHECK_COUNT(); - if (dest_index < src_pos) - return; + CHECK_COPY(dest_index - src_pos); for (i = 0; i < count; i++) dest[dest_index + i] = dest[dest_index - src_pos + i]; dest_index += count; @@ -292,9 +286,11 @@ static void decode_format80(const unsigned char *src, int src_size, if (dest_index < dest_size) av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n", dest_index, dest_size); + + return 0; // let's display what we decoded anyway } -static void vqa_decode_chunk(VqaContext *s) +static int vqa_decode_chunk(VqaContext *s) { unsigned int chunk_type; unsigned int chunk_size; @@ -303,6 +299,7 @@ static void vqa_decode_chunk(VqaContext *s) int i; unsigned char r, g, b; int index_shift; + int res; int cbf0_chunk = -1; int cbfz_chunk = -1; @@ -322,17 +319,11 @@ static void vqa_decode_chunk(VqaContext *s) int hibytes = s->decode_buffer_size / 2; /* first, traverse through the frame and find the subchunks */ - while (index + CHUNK_PREAMBLE_SIZE <= s->size) { - unsigned next_index; + while (bytestream2_get_bytes_left(&s->gb) >= 8) { - chunk_type = AV_RB32(&s->buf[index]); - chunk_size = AV_RB32(&s->buf[index + 4]); - byte_skip = chunk_size & 0x01; - next_index = index + CHUNK_PREAMBLE_SIZE + chunk_size + byte_skip; - if (next_index > s->size) { - av_log(s->avctx, AV_LOG_ERROR, "Dropping incomplete chunk\n"); - break; - } + chunk_type = bytestream2_get_be32u(&s->gb); + index = bytestream2_tell(&s->gb); + chunk_size = bytestream2_get_be32u(&s->gb); switch (chunk_type) { @@ -373,7 +364,9 @@ static void vqa_decode_chunk(VqaContext *s) chunk_type); break; } - index = next_index; + + byte_skip = chunk_size & 0x01; + bytestream2_skip(&s->gb, chunk_size + byte_skip); } /* next, deal with the palette */ @@ -381,7 +374,7 @@ static void vqa_decode_chunk(VqaContext *s) /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CPL0 and CPLZ chunks\n"); - return; + return AVERROR_INVALIDDATA; } /* decompress the palette chunk */ @@ -394,19 +387,19 @@ static void vqa_decode_chunk(VqaContext *s) /* convert the RGB palette into the machine's endian format */ if (cpl0_chunk != -1) { - chunk_size = AV_RB32(&s->buf[cpl0_chunk + 4]); + bytestream2_seek(&s->gb, cpl0_chunk, SEEK_SET); + chunk_size = bytestream2_get_be32(&s->gb); /* sanity check the palette size */ - if (chunk_size / 3 > 256) { + if (chunk_size / 3 > 256 || chunk_size > bytestream2_get_bytes_left(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found a palette chunk with %d colors\n", chunk_size / 3); - return; + return AVERROR_INVALIDDATA; } - cpl0_chunk += CHUNK_PREAMBLE_SIZE; for (i = 0; i < chunk_size / 3; i++) { /* scale by 4 to transform 6-bit palette -> 8-bit */ - r = s->buf[cpl0_chunk++] * 4; - g = s->buf[cpl0_chunk++] * 4; - b = s->buf[cpl0_chunk++] * 4; + r = bytestream2_get_byteu(&s->gb) * 4; + g = bytestream2_get_byteu(&s->gb) * 4; + b = bytestream2_get_byteu(&s->gb) * 4; s->palette[i] = 0xFF << 24 | r << 16 | g << 8 | b; s->palette[i] |= s->palette[i] >> 6 & 0x30303; } @@ -417,31 +410,32 @@ static void vqa_decode_chunk(VqaContext *s) /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CBF0 and CBFZ chunks\n"); - return; + return AVERROR_INVALIDDATA; } /* decompress the full codebook chunk */ if (cbfz_chunk != -1) { - chunk_size = AV_RB32(&s->buf[cbfz_chunk + 4]); - cbfz_chunk += CHUNK_PREAMBLE_SIZE; - decode_format80(&s->buf[cbfz_chunk], chunk_size, - s->codebook, s->codebook_size, 0); + bytestream2_seek(&s->gb, cbfz_chunk, SEEK_SET); + chunk_size = bytestream2_get_be32(&s->gb); + if ((res = decode_format80(&s->gb, chunk_size, s->codebook, + s->codebook_size, 0)) < 0) + return res; } /* copy a full codebook */ if (cbf0_chunk != -1) { - chunk_size = AV_RB32(&s->buf[cbf0_chunk + 4]); + bytestream2_seek(&s->gb, cbf0_chunk, SEEK_SET); + chunk_size = bytestream2_get_be32(&s->gb); /* sanity check the full codebook size */ if (chunk_size > MAX_CODEBOOK_SIZE) { av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: CBF0 chunk too large (0x%X bytes)\n", chunk_size); - return; + return AVERROR_INVALIDDATA; } - cbf0_chunk += CHUNK_PREAMBLE_SIZE; - memcpy(s->codebook, &s->buf[cbf0_chunk], chunk_size); + bytestream2_get_buffer(&s->gb, s->codebook, chunk_size); } /* decode the frame */ @@ -449,13 +443,14 @@ static void vqa_decode_chunk(VqaContext *s) /* something is wrong if there is no VPTZ chunk */ av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: no VPTZ chunk found\n"); - return; + return AVERROR_INVALIDDATA; } - chunk_size = AV_RB32(&s->buf[vptz_chunk + 4]); - vptz_chunk += CHUNK_PREAMBLE_SIZE; - decode_format80(&s->buf[vptz_chunk], chunk_size, - s->decode_buffer, s->decode_buffer_size, 1); + bytestream2_seek(&s->gb, vptz_chunk, SEEK_SET); + chunk_size = bytestream2_get_be32(&s->gb); + if ((res = decode_format80(&s->gb, chunk_size, + s->decode_buffer, s->decode_buffer_size, 1)) < 0) + return res; /* render the final PAL8 frame */ if (s->vector_height == 4) @@ -519,17 +514,17 @@ static void vqa_decode_chunk(VqaContext *s) if ((cbp0_chunk != -1) && (cbpz_chunk != -1)) { /* a chunk should not have both chunk types */ av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CBP0 and CBPZ chunks\n"); - return; + return AVERROR_INVALIDDATA; } if (cbp0_chunk != -1) { - chunk_size = AV_RB32(&s->buf[cbp0_chunk + 4]); - cbp0_chunk += CHUNK_PREAMBLE_SIZE; + bytestream2_seek(&s->gb, cbp0_chunk, SEEK_SET); + chunk_size = bytestream2_get_be32(&s->gb); /* accumulate partial codebook */ - memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index], - &s->buf[cbp0_chunk], chunk_size); + bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index], + chunk_size); s->next_codebook_buffer_index += chunk_size; s->partial_countdown--; @@ -547,39 +542,39 @@ static void vqa_decode_chunk(VqaContext *s) if (cbpz_chunk != -1) { - chunk_size = AV_RB32(&s->buf[cbpz_chunk + 4]); - cbpz_chunk += CHUNK_PREAMBLE_SIZE; + bytestream2_seek(&s->gb, cbpz_chunk, SEEK_SET); + chunk_size = bytestream2_get_be32(&s->gb); /* accumulate partial codebook */ - memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index], - &s->buf[cbpz_chunk], chunk_size); + bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index], + chunk_size); s->next_codebook_buffer_index += chunk_size; s->partial_countdown--; if (s->partial_countdown == 0) { + GetByteContext gb; + bytestream2_init(&gb, s->next_codebook_buffer, s->next_codebook_buffer_index); /* decompress codebook */ - decode_format80(s->next_codebook_buffer, - s->next_codebook_buffer_index, - s->codebook, s->codebook_size, 0); + if ((res = decode_format80(&gb, s->next_codebook_buffer_index, + s->codebook, s->codebook_size, 0)) < 0) + return res; /* reset accounting */ s->next_codebook_buffer_index = 0; s->partial_countdown = s->partial_count; } } + + return 0; } static int vqa_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; VqaContext *s = avctx->priv_data; - - s->buf = buf; - s->size = buf_size; + int res; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); @@ -589,7 +584,9 @@ static int vqa_decode_frame(AVCodecContext *avctx, return -1; } - vqa_decode_chunk(s); + bytestream2_init(&s->gb, avpkt->data, avpkt->size); + if ((res = vqa_decode_chunk(s)) < 0) + return res; /* make the palette available on the way out */ memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4); @@ -599,7 +596,7 @@ static int vqa_decode_frame(AVCodecContext *avctx, *(AVFrame*)data = s->frame; /* report that the buffer was completely consumed */ - return buf_size; + return avpkt->size; } static av_cold int vqa_decode_end(AVCodecContext *avctx) diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c index 2be0cf25f2..b35eaa7224 100644 --- a/libavcodec/wmavoice.c +++ b/libavcodec/wmavoice.c @@ -1440,8 +1440,7 @@ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx, int pitch[MAX_BLOCKS], last_block_pitch; /* Parse frame type ("frame header"), see frame_descs */ - int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)], - block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks; + int bd_idx = s->vbm_tree[get_vlc2(gb, frame_type_vlc.table, 6, 3)], block_nsamples; if (bd_idx < 0) { av_log(ctx, AV_LOG_ERROR, @@ -1449,6 +1448,8 @@ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx, return -1; } + block_nsamples = MAX_FRAMESIZE / frame_descs[bd_idx].n_blocks; + /* Pitch calculation for ACB_TYPE_ASYMMETRIC ("pitch-per-frame") */ if (frame_descs[bd_idx].acb_type == ACB_TYPE_ASYMMETRIC) { /* Pitch is provided per frame, which is interpreted as the pitch of diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c index 0deafc6e3e..27685ee218 100644 --- a/libavformat/asfdec.c +++ b/libavformat/asfdec.c @@ -1100,6 +1100,8 @@ static int ff_asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pk //printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size); asf_st->pkt.size = 0; asf_st->pkt.data = 0; + asf_st->pkt.side_data_elems = 0; + asf_st->pkt.side_data = NULL; break; // packet completed } } diff --git a/libavformat/mov.c b/libavformat/mov.c index 9da8eab84f..95bc3dee40 100644 --- a/libavformat/mov.c +++ b/libavformat/mov.c @@ -1767,6 +1767,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st) unsigned int stps_index = 0; unsigned int i, j; uint64_t stream_size = 0; + AVIndexEntry *mem; /* adjust first dts according to edit list */ if ((sc->empty_duration || sc->start_time) && mov->time_scale > 0) { @@ -1796,12 +1797,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st) if (!sc->sample_count) return; - if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries)) + if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries) return; - st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries)); - if (!st->index_entries) + mem = av_realloc(st->index_entries, (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries)); + if (!mem) return; - st->index_entries_allocated_size = sc->sample_count*sizeof(*st->index_entries); + st->index_entries = mem; + st->index_entries_allocated_size = (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries); for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; @@ -1885,12 +1887,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st) } av_dlog(mov->fc, "chunk count %d\n", total); - if (total >= UINT_MAX / sizeof(*st->index_entries)) + if (total >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries) return; - st->index_entries = av_malloc(total*sizeof(*st->index_entries)); - if (!st->index_entries) + mem = av_realloc(st->index_entries, (st->nb_index_entries + total) * sizeof(*st->index_entries)); + if (!mem) return; - st->index_entries_allocated_size = total*sizeof(*st->index_entries); + st->index_entries = mem; + st->index_entries_allocated_size = (st->nb_index_entries + total) * sizeof(*st->index_entries); // populate index for (i = 0; i < sc->chunk_count; i++) { @@ -2831,7 +2834,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt) pkt->stream_index = sc->ffindex; pkt->dts = sample->timestamp; - if (sc->ctts_data) { + if (sc->ctts_data && sc->ctts_index < sc->ctts_count) { pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration; /* update ctts context */ sc->ctts_sample++; diff --git a/libavformat/xwma.c b/libavformat/xwma.c index a4e53fa2d2..44de49a159 100644 --- a/libavformat/xwma.c +++ b/libavformat/xwma.c @@ -115,6 +115,17 @@ static int xwma_read_header(AVFormatContext *s, AVFormatParameters *ap) } } + if (!st->codec->channels) { + av_log(s, AV_LOG_WARNING, "Invalid channel count: %d\n", + st->codec->channels); + return AVERROR_INVALIDDATA; + } + if (!st->codec->bits_per_coded_sample) { + av_log(s, AV_LOG_WARNING, "Invalid bits_per_coded_sample: %d\n", + st->codec->bits_per_coded_sample); + return AVERROR_INVALIDDATA; + } + /* set the sample rate */ avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); diff --git a/tests/ref/fate/vqa-cc b/tests/ref/fate/vqa-cc index fdc7e72eeb..e15e727fa1 100644 --- a/tests/ref/fate/vqa-cc +++ b/tests/ref/fate/vqa-cc @@ -68,7 +68,6 @@ 1, 218996, 2940, 0xac8bb6c8 0, 222000, 192000, 0xb58c1566 1, 224996, 2940, 0xa503c73b -0, 228000, 192000, 0xb58c1566 1, 230996, 2940, 0x7cd588a3 1, 236996, 2940, 0xa6974b04 1, 242996, 2940, 0xbf448241