* commit '9dbbda235d93d628777b986e502213f1ed390973': vb: return meaningful error codes. ptx: return meaningful error codes. tiff: return meaningful error codes. vqavideo: return meaningful error codes. mss2: return meaningful error codes. v210dec: return meaningful error codes indeo2: cosmetics, reformat Conflicts: libavcodec/indeo2.c libavcodec/tiff.c libavcodec/v210dec.c libavcodec/vb.c libavcodec/vqavideo.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n1.2
| @@ -47,8 +47,8 @@ static inline int ir2_get_code(GetBitContext *gb) | |||||
| return get_vlc2(gb, ir2_vlc.table, CODE_VLC_BITS, 1) + 1; | return get_vlc2(gb, ir2_vlc.table, CODE_VLC_BITS, 1) + 1; | ||||
| } | } | ||||
| static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst, int stride, | |||||
| const uint8_t *table) | |||||
| static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst, | |||||
| int stride, const uint8_t *table) | |||||
| { | { | ||||
| int i; | int i; | ||||
| int j; | int j; | ||||
| @@ -56,15 +56,15 @@ static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst | |||||
| int c; | int c; | ||||
| int t; | int t; | ||||
| if(width&1) | |||||
| if (width & 1) | |||||
| return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
| /* first line contain absolute values, other lines contain deltas */ | /* first line contain absolute values, other lines contain deltas */ | ||||
| while (out < width){ | |||||
| while (out < width) { | |||||
| c = ir2_get_code(&ctx->gb); | c = ir2_get_code(&ctx->gb); | ||||
| if(c >= 0x80) { /* we have a run */ | |||||
| if (c >= 0x80) { /* we have a run */ | |||||
| c -= 0x7F; | c -= 0x7F; | ||||
| if(out + c*2 > width) | |||||
| if (out + c*2 > width) | |||||
| return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
| for (i = 0; i < c * 2; i++) | for (i = 0; i < c * 2; i++) | ||||
| dst[out++] = 0x80; | dst[out++] = 0x80; | ||||
| @@ -75,25 +75,25 @@ static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst | |||||
| } | } | ||||
| dst += stride; | dst += stride; | ||||
| for (j = 1; j < height; j++){ | |||||
| for (j = 1; j < height; j++) { | |||||
| out = 0; | out = 0; | ||||
| while (out < width){ | |||||
| while (out < width) { | |||||
| c = ir2_get_code(&ctx->gb); | c = ir2_get_code(&ctx->gb); | ||||
| if(c >= 0x80) { /* we have a skip */ | |||||
| if (c >= 0x80) { /* we have a skip */ | |||||
| c -= 0x7F; | c -= 0x7F; | ||||
| if(out + c*2 > width) | |||||
| if (out + c*2 > width) | |||||
| return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
| for (i = 0; i < c * 2; i++) { | for (i = 0; i < c * 2; i++) { | ||||
| dst[out] = dst[out - stride]; | dst[out] = dst[out - stride]; | ||||
| out++; | out++; | ||||
| } | } | ||||
| } else { /* add two deltas from table */ | } else { /* add two deltas from table */ | ||||
| t = dst[out - stride] + (table[c * 2] - 128); | |||||
| t= av_clip_uint8(t); | |||||
| t = dst[out - stride] + (table[c * 2] - 128); | |||||
| t = av_clip_uint8(t); | |||||
| dst[out] = t; | dst[out] = t; | ||||
| out++; | out++; | ||||
| t = dst[out - stride] + (table[(c * 2) + 1] - 128); | |||||
| t= av_clip_uint8(t); | |||||
| t = dst[out - stride] + (table[(c * 2) + 1] - 128); | |||||
| t = av_clip_uint8(t); | |||||
| dst[out] = t; | dst[out] = t; | ||||
| out++; | out++; | ||||
| } | } | ||||
| @@ -103,31 +103,31 @@ static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| static int ir2_decode_plane_inter(Ir2Context *ctx, int width, int height, uint8_t *dst, int stride, | |||||
| const uint8_t *table) | |||||
| static int ir2_decode_plane_inter(Ir2Context *ctx, int width, int height, uint8_t *dst, | |||||
| int stride, const uint8_t *table) | |||||
| { | { | ||||
| int j; | int j; | ||||
| int out = 0; | int out = 0; | ||||
| int c; | int c; | ||||
| int t; | int t; | ||||
| if(width&1) | |||||
| if (width & 1) | |||||
| return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
| for (j = 0; j < height; j++){ | |||||
| for (j = 0; j < height; j++) { | |||||
| out = 0; | out = 0; | ||||
| while (out < width){ | |||||
| while (out < width) { | |||||
| c = ir2_get_code(&ctx->gb); | c = ir2_get_code(&ctx->gb); | ||||
| if(c >= 0x80) { /* we have a skip */ | |||||
| c -= 0x7F; | |||||
| if (c >= 0x80) { /* we have a skip */ | |||||
| c -= 0x7F; | |||||
| out += c * 2; | out += c * 2; | ||||
| } else { /* add two deltas from table */ | } else { /* add two deltas from table */ | ||||
| t = dst[out] + (((table[c * 2] - 128)*3) >> 2); | |||||
| t= av_clip_uint8(t); | |||||
| t = dst[out] + (((table[c * 2] - 128)*3) >> 2); | |||||
| t = av_clip_uint8(t); | |||||
| dst[out] = t; | dst[out] = t; | ||||
| out++; | out++; | ||||
| t = dst[out] + (((table[(c * 2) + 1] - 128)*3) >> 2); | |||||
| t= av_clip_uint8(t); | |||||
| t = dst[out] + (((table[(c * 2) + 1] - 128)*3) >> 2); | |||||
| t = av_clip_uint8(t); | |||||
| dst[out] = t; | dst[out] = t; | ||||
| out++; | out++; | ||||
| } | } | ||||
| @@ -141,11 +141,11 @@ static int ir2_decode_frame(AVCodecContext *avctx, | |||||
| void *data, int *got_frame, | void *data, int *got_frame, | ||||
| AVPacket *avpkt) | AVPacket *avpkt) | ||||
| { | { | ||||
| const uint8_t *buf = avpkt->data; | |||||
| int buf_size = avpkt->size; | |||||
| Ir2Context * const s = avctx->priv_data; | Ir2Context * const s = avctx->priv_data; | ||||
| AVFrame *picture = data; | |||||
| AVFrame * const p = &s->picture; | |||||
| const uint8_t *buf = avpkt->data; | |||||
| int buf_size = avpkt->size; | |||||
| AVFrame *picture = data; | |||||
| AVFrame * const p = &s->picture; | |||||
| int start, ret; | int start, ret; | ||||
| p->reference = 3; | p->reference = 3; | ||||
| @@ -209,7 +209,8 @@ static int ir2_decode_frame(AVCodecContext *avctx, | |||||
| return buf_size; | return buf_size; | ||||
| } | } | ||||
| static av_cold int ir2_decode_init(AVCodecContext *avctx){ | |||||
| static av_cold int ir2_decode_init(AVCodecContext *avctx) | |||||
| { | |||||
| Ir2Context * const ic = avctx->priv_data; | Ir2Context * const ic = avctx->priv_data; | ||||
| static VLC_TYPE vlc_tables[1 << CODE_VLC_BITS][2]; | static VLC_TYPE vlc_tables[1 << CODE_VLC_BITS][2]; | ||||
| @@ -233,7 +234,8 @@ static av_cold int ir2_decode_init(AVCodecContext *avctx){ | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| static av_cold int ir2_decode_end(AVCodecContext *avctx){ | |||||
| static av_cold int ir2_decode_end(AVCodecContext *avctx) | |||||
| { | |||||
| Ir2Context * const ic = avctx->priv_data; | Ir2Context * const ic = avctx->priv_data; | ||||
| AVFrame *pic = &ic->picture; | AVFrame *pic = &ic->picture; | ||||
| @@ -163,7 +163,7 @@ static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size) | |||||
| ncol = *buf++; | ncol = *buf++; | ||||
| if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3) | if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3) | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| for (i = 0; i < ncol; i++) | for (i = 0; i < ncol; i++) | ||||
| *pal++ = AV_RB24(buf + 3 * i); | *pal++ = AV_RB24(buf + 3 * i); | ||||
| @@ -189,7 +189,7 @@ static int decode_555(GetByteContext *gB, uint16_t *dst, int stride, | |||||
| READ_PAIR(y, endy) | READ_PAIR(y, endy) | ||||
| if (endx >= w || endy >= h || x > endx || y > endy) | if (endx >= w || endy >= h || x > endx || y > endy) | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| dst += x + stride * y; | dst += x + stride * y; | ||||
| w = endx - x + 1; | w = endx - x + 1; | ||||
| h = endy - y + 1; | h = endy - y + 1; | ||||
| @@ -373,13 +373,14 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, | |||||
| VC1Context *v = avctx->priv_data; | VC1Context *v = avctx->priv_data; | ||||
| MpegEncContext *s = &v->s; | MpegEncContext *s = &v->s; | ||||
| AVFrame *f; | AVFrame *f; | ||||
| int ret; | |||||
| ff_mpeg_flush(avctx); | ff_mpeg_flush(avctx); | ||||
| if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { | if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { | ||||
| int i = ff_find_unused_picture(s, 0); | int i = ff_find_unused_picture(s, 0); | ||||
| if (i < 0) | if (i < 0) | ||||
| return -1; | |||||
| return i; | |||||
| s->current_picture_ptr = &s->picture[i]; | s->current_picture_ptr = &s->picture[i]; | ||||
| } | } | ||||
| @@ -399,10 +400,10 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, | |||||
| avctx->pix_fmt = AV_PIX_FMT_YUV420P; | avctx->pix_fmt = AV_PIX_FMT_YUV420P; | ||||
| if (ff_MPV_frame_start(s, avctx) < 0) { | |||||
| if ((ret = ff_MPV_frame_start(s, avctx)) < 0) { | |||||
| av_log(v->s.avctx, AV_LOG_ERROR, "ff_MPV_frame_start error\n"); | av_log(v->s.avctx, AV_LOG_ERROR, "ff_MPV_frame_start error\n"); | ||||
| avctx->pix_fmt = AV_PIX_FMT_RGB24; | avctx->pix_fmt = AV_PIX_FMT_RGB24; | ||||
| return -1; | |||||
| return ret; | |||||
| } | } | ||||
| ff_er_frame_start(s); | ff_er_frame_start(s); | ||||
| @@ -617,7 +618,7 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| ctx->last_pic.linesize[0] * (avctx->height - 1); | ctx->last_pic.linesize[0] * (avctx->height - 1); | ||||
| } else { | } else { | ||||
| av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n"); | av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| } else { | } else { | ||||
| if (ctx->last_pic.data[0]) | if (ctx->last_pic.data[0]) | ||||
| @@ -753,6 +754,7 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| static av_cold int wmv9_init(AVCodecContext *avctx) | static av_cold int wmv9_init(AVCodecContext *avctx) | ||||
| { | { | ||||
| VC1Context *v = avctx->priv_data; | VC1Context *v = avctx->priv_data; | ||||
| int ret; | |||||
| v->s.avctx = avctx; | v->s.avctx = avctx; | ||||
| avctx->flags |= CODEC_FLAG_EMU_EDGE; | avctx->flags |= CODEC_FLAG_EMU_EDGE; | ||||
| @@ -761,8 +763,8 @@ static av_cold int wmv9_init(AVCodecContext *avctx) | |||||
| if (avctx->idct_algo == FF_IDCT_AUTO) | if (avctx->idct_algo == FF_IDCT_AUTO) | ||||
| avctx->idct_algo = FF_IDCT_WMV2; | avctx->idct_algo = FF_IDCT_WMV2; | ||||
| if (ff_vc1_init_common(v) < 0) | |||||
| return -1; | |||||
| if ((ret = ff_vc1_init_common(v)) < 0) | |||||
| return ret; | |||||
| ff_vc1dsp_init(&v->vc1dsp); | ff_vc1dsp_init(&v->vc1dsp); | ||||
| v->profile = PROFILE_MAIN; | v->profile = PROFILE_MAIN; | ||||
| @@ -802,9 +804,9 @@ static av_cold int wmv9_init(AVCodecContext *avctx) | |||||
| ff_vc1_init_transposed_scantables(v); | ff_vc1_init_transposed_scantables(v); | ||||
| if (ff_msmpeg4_decode_init(avctx) < 0 || | |||||
| ff_vc1_decode_init_alloc_tables(v) < 0) | |||||
| return -1; | |||||
| if ((ret = ff_msmpeg4_decode_init(avctx)) < 0 || | |||||
| (ret = ff_vc1_decode_init_alloc_tables(v)) < 0) | |||||
| return ret; | |||||
| /* error concealment */ | /* error concealment */ | ||||
| v->s.me.qpel_put = v->s.dsp.put_qpel_pixels_tab; | v->s.me.qpel_put = v->s.dsp.put_qpel_pixels_tab; | ||||
| @@ -46,6 +46,7 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| AVFrame *picture = data; | AVFrame *picture = data; | ||||
| AVFrame * const p = &s->picture; | AVFrame * const p = &s->picture; | ||||
| unsigned int offset, w, h, y, stride, bytes_per_pixel; | unsigned int offset, w, h, y, stride, bytes_per_pixel; | ||||
| int ret; | |||||
| uint8_t *ptr; | uint8_t *ptr; | ||||
| if (buf_end - buf < 14) | if (buf_end - buf < 14) | ||||
| @@ -72,13 +73,13 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| if (p->data[0]) | if (p->data[0]) | ||||
| avctx->release_buffer(avctx, p); | avctx->release_buffer(avctx, p); | ||||
| if (av_image_check_size(w, h, 0, avctx)) | |||||
| return -1; | |||||
| if ((ret = av_image_check_size(w, h, 0, avctx)) < 0) | |||||
| return ret; | |||||
| if (w != avctx->width || h != avctx->height) | if (w != avctx->width || h != avctx->height) | ||||
| avcodec_set_dimensions(avctx, w, h); | avcodec_set_dimensions(avctx, w, h); | ||||
| if (ff_get_buffer(avctx, p) < 0) { | |||||
| if ((ret = ff_get_buffer(avctx, p)) < 0) { | |||||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | ||||
| return -1; | |||||
| return ret; | |||||
| } | } | ||||
| p->pict_type = AV_PICTURE_TYPE_I; | p->pict_type = AV_PICTURE_TYPE_I; | ||||
| @@ -399,7 +399,7 @@ static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst, | |||||
| static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | ||||
| const uint8_t *src, int size, int lines) | const uint8_t *src, int size, int lines) | ||||
| { | { | ||||
| int c, line, pixels, code; | |||||
| int c, line, pixels, code, ret; | |||||
| const uint8_t *ssrc = src; | const uint8_t *ssrc = src; | ||||
| int width = ((s->width * s->bpp) + 7) >> 3; | int width = ((s->width * s->bpp) + 7) >> 3; | ||||
| @@ -434,7 +434,7 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| (unsigned long)width * lines, ret); | (unsigned long)width * lines, ret); | ||||
| av_free(src2); | av_free(src2); | ||||
| av_free(zbuf); | av_free(zbuf); | ||||
| return -1; | |||||
| return AVERROR_UNKNOWN; | |||||
| } | } | ||||
| src = zbuf; | src = zbuf; | ||||
| for (line = 0; line < lines; line++) { | for (line = 0; line < lines; line++) { | ||||
| @@ -465,9 +465,9 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| if (size > 1 && !src[0] && (src[1]&1)) { | if (size > 1 && !src[0] && (src[1]&1)) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n"); | av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n"); | ||||
| } | } | ||||
| if (ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF) < 0) { | |||||
| if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) { | |||||
| av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n"); | av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n"); | ||||
| return -1; | |||||
| return ret; | |||||
| } | } | ||||
| } | } | ||||
| if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 | if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 | ||||
| @@ -485,7 +485,7 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "Uncompressed fax mode is not supported (yet)\n"); | "Uncompressed fax mode is not supported (yet)\n"); | ||||
| av_free(src2); | av_free(src2); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| if (!s->fill_order) { | if (!s->fill_order) { | ||||
| memcpy(src2, src, size); | memcpy(src2, src, size); | ||||
| @@ -513,7 +513,7 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| for (line = 0; line < lines; line++) { | for (line = 0; line < lines; line++) { | ||||
| if (src - ssrc > size) { | if (src - ssrc > size) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n"); | av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| switch (s->compr) { | switch (s->compr) { | ||||
| case TIFF_RAW: | case TIFF_RAW: | ||||
| @@ -541,7 +541,7 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| if (pixels + code > width) { | if (pixels + code > width) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "Copy went out of bounds\n"); | "Copy went out of bounds\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| if (ssrc + size - src < code) { | if (ssrc + size - src < code) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n"); | av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n"); | ||||
| @@ -556,7 +556,7 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| if (pixels + code > width) { | if (pixels + code > width) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "Run went out of bounds\n"); | "Run went out of bounds\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| c = *src++; | c = *src++; | ||||
| horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), | horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), | ||||
| @@ -570,7 +570,7 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride, | |||||
| if (pixels < width) { | if (pixels < width) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n", | av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n", | ||||
| pixels, width); | pixels, width); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) | if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) | ||||
| horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0); | horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0); | ||||
| @@ -705,7 +705,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "This format is not supported (bpp=%d, %d components)\n", | "This format is not supported (bpp=%d, %d components)\n", | ||||
| s->bpp, count); | s->bpp, count); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| if (count == 1) | if (count == 1) | ||||
| s->bpp = value; | s->bpp = value; | ||||
| @@ -719,7 +719,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| case TIFF_LONG: | case TIFF_LONG: | ||||
| s->bpp = 0; | s->bpp = 0; | ||||
| if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count) | if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count) | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| for (i = 0; i < count; i++) | for (i = 0; i < count; i++) | ||||
| s->bpp += tget(&s->gb, type, s->le); | s->bpp += tget(&s->gb, type, s->le); | ||||
| break; | break; | ||||
| @@ -757,17 +757,17 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| break; | break; | ||||
| #else | #else | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n"); | av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n"); | ||||
| return -1; | |||||
| return AVERROR(ENOSYS); | |||||
| #endif | #endif | ||||
| case TIFF_JPEG: | case TIFF_JPEG: | ||||
| case TIFF_NEWJPEG: | case TIFF_NEWJPEG: | ||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "JPEG compression is not supported\n"); | "JPEG compression is not supported\n"); | ||||
| return -1; | |||||
| return AVERROR_PATCHWELCOME; | |||||
| default: | default: | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", | av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", | ||||
| s->compr); | s->compr); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| break; | break; | ||||
| case TIFF_ROWSPERSTRIP: | case TIFF_ROWSPERSTRIP: | ||||
| @@ -776,7 +776,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| if (value < 1) { | if (value < 1) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "Incorrect value of rows per strip\n"); | "Incorrect value of rows per strip\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| s->rps = value; | s->rps = value; | ||||
| break; | break; | ||||
| @@ -793,7 +793,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| if (s->strippos > bytestream2_size(&s->gb)) { | if (s->strippos > bytestream2_size(&s->gb)) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "Tag referencing position outside the image\n"); | "Tag referencing position outside the image\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| break; | break; | ||||
| case TIFF_STRIP_SIZE: | case TIFF_STRIP_SIZE: | ||||
| @@ -809,7 +809,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| if (s->stripsizesoff > bytestream2_size(&s->gb)) { | if (s->stripsizesoff > bytestream2_size(&s->gb)) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, | av_log(s->avctx, AV_LOG_ERROR, | ||||
| "Tag referencing position outside the image\n"); | "Tag referencing position outside the image\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| break; | break; | ||||
| case TIFF_TILE_BYTE_COUNTS: | case TIFF_TILE_BYTE_COUNTS: | ||||
| @@ -836,7 +836,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| default: | default: | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", | av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", | ||||
| value); | value); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| break; | break; | ||||
| case TIFF_FILL_ORDER: | case TIFF_FILL_ORDER: | ||||
| @@ -851,7 +851,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| pal = (uint32_t *) s->palette; | pal = (uint32_t *) s->palette; | ||||
| off = type_sizes[type]; | off = type_sizes[type]; | ||||
| if (count / 3 > 256 || bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3) | if (count / 3 > 256 || bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3) | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| off = (type_sizes[type] - 1) << 3; | off = (type_sizes[type] - 1) << 3; | ||||
| for (k = 2; k >= 0; k--) { | for (k = 2; k >= 0; k--) { | ||||
| for (i = 0; i < count / 3; i++) { | for (i = 0; i < count / 3; i++) { | ||||
| @@ -866,7 +866,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| case TIFF_PLANAR: | case TIFF_PLANAR: | ||||
| if (value == 2) { | if (value == 2) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n"); | av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n"); | ||||
| return -1; | |||||
| return AVERROR_PATCHWELCOME; | |||||
| } | } | ||||
| break; | break; | ||||
| case TIFF_T4OPTIONS: | case TIFF_T4OPTIONS: | ||||
| @@ -959,7 +959,7 @@ static int tiff_decode_tag(TiffContext *s) | |||||
| bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET); | bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET); | ||||
| if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count) | if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count) | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| ap = av_malloc(s->geotags[i].count); | ap = av_malloc(s->geotags[i].count); | ||||
| if (!ap) { | if (!ap) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); | av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n"); | ||||
| @@ -1040,7 +1040,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| le = 0; | le = 0; | ||||
| else { | else { | ||||
| av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n"); | av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| s->le = le; | s->le = le; | ||||
| // TIFF_BPP is not a required tag and defaults to 1 | // TIFF_BPP is not a required tag and defaults to 1 | ||||
| @@ -1058,7 +1058,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| if (tget_short(&s->gb, le) != 42) { | if (tget_short(&s->gb, le) != 42) { | ||||
| av_log(avctx, AV_LOG_ERROR, | av_log(avctx, AV_LOG_ERROR, | ||||
| "The answer to life, universe and everything is not correct!\n"); | "The answer to life, universe and everything is not correct!\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| // Reset these offsets so we can tell if they were set this frame | // Reset these offsets so we can tell if they were set this frame | ||||
| s->stripsizesoff = s->strippos = 0; | s->stripsizesoff = s->strippos = 0; | ||||
| @@ -1073,8 +1073,8 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| if (bytestream2_get_bytes_left(&s->gb) < entries * 12) | if (bytestream2_get_bytes_left(&s->gb) < entries * 12) | ||||
| return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
| for (i = 0; i < entries; i++) { | for (i = 0; i < entries; i++) { | ||||
| if (tiff_decode_tag(s) < 0) | |||||
| return -1; | |||||
| if ((ret = tiff_decode_tag(s)) < 0) | |||||
| return ret; | |||||
| } | } | ||||
| for (i = 0; i<s->geotag_count; i++) { | for (i = 0; i<s->geotag_count; i++) { | ||||
| @@ -1096,7 +1096,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| if (!s->strippos && !s->stripoff) { | if (!s->strippos && !s->stripoff) { | ||||
| av_log(avctx, AV_LOG_ERROR, "Image data is missing\n"); | av_log(avctx, AV_LOG_ERROR, "Image data is missing\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| /* now we have the data and may start decoding */ | /* now we have the data and may start decoding */ | ||||
| if ((ret = init_image(s)) < 0) | if ((ret = init_image(s)) < 0) | ||||
| @@ -1138,7 +1138,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| if (soff > avpkt->size || ssize > avpkt->size - soff) { | if (soff > avpkt->size || ssize > avpkt->size - soff) { | ||||
| av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n"); | av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| if (tiff_unpack_strip(s, dst, stride, avpkt->data + soff, ssize, | if (tiff_unpack_strip(s, dst, stride, avpkt->data + soff, ssize, | ||||
| FFMIN(s->rps, s->height - i)) < 0) | FFMIN(s->rps, s->height - i)) < 0) | ||||
| @@ -55,7 +55,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||||
| if (avctx->width & 1) { | if (avctx->width & 1) { | ||||
| av_log(avctx, AV_LOG_ERROR, "v210 needs even width\n"); | av_log(avctx, AV_LOG_ERROR, "v210 needs even width\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| avctx->pix_fmt = AV_PIX_FMT_YUV422P10; | avctx->pix_fmt = AV_PIX_FMT_YUV422P10; | ||||
| avctx->bits_per_raw_sample = 10; | avctx->bits_per_raw_sample = 10; | ||||
| @@ -77,7 +77,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| { | { | ||||
| V210DecContext *s = avctx->priv_data; | V210DecContext *s = avctx->priv_data; | ||||
| int h, w, stride, aligned_input; | |||||
| int h, w, ret, stride, aligned_input; | |||||
| AVFrame *pic = avctx->coded_frame; | AVFrame *pic = avctx->coded_frame; | ||||
| const uint8_t *psrc = avpkt->data; | const uint8_t *psrc = avpkt->data; | ||||
| uint16_t *y, *u, *v; | uint16_t *y, *u, *v; | ||||
| @@ -97,7 +97,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| s->stride_warning_shown = 1; | s->stride_warning_shown = 1; | ||||
| } else { | } else { | ||||
| av_log(avctx, AV_LOG_ERROR, "packet too small\n"); | av_log(avctx, AV_LOG_ERROR, "packet too small\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| } | } | ||||
| @@ -112,8 +112,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| avctx->release_buffer(avctx, pic); | avctx->release_buffer(avctx, pic); | ||||
| pic->reference = 0; | pic->reference = 0; | ||||
| if (ff_get_buffer(avctx, pic) < 0) | |||||
| return -1; | |||||
| if ((ret = ff_get_buffer(avctx, pic)) < 0) | |||||
| return ret; | |||||
| y = (uint16_t*)pic->data[0]; | y = (uint16_t*)pic->data[0]; | ||||
| u = (uint16_t*)pic->data[1]; | u = (uint16_t*)pic->data[1]; | ||||
| @@ -123,7 +123,7 @@ static int vb_decode_framedata(VBDecContext *c, int offset) | |||||
| if(!t){ //raw block | if(!t){ //raw block | ||||
| if (bytestream2_get_bytes_left(&g) < 16) { | if (bytestream2_get_bytes_left(&g) < 16) { | ||||
| av_log(c->avctx, AV_LOG_ERROR, "Insufficient data\n"); | av_log(c->avctx, AV_LOG_ERROR, "Insufficient data\n"); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| for(y = 0; y < 4; y++) | for(y = 0; y < 4; y++) | ||||
| bytestream2_get_buffer(&g, cur + y * width, 4); | bytestream2_get_buffer(&g, cur + y * width, 4); | ||||
| @@ -168,7 +168,7 @@ static int vb_decode_framedata(VBDecContext *c, int offset) | |||||
| break; | break; | ||||
| case 3: | case 3: | ||||
| av_log(c->avctx, AV_LOG_ERROR, "Invalid opcode seen @%d\n",blk); | av_log(c->avctx, AV_LOG_ERROR, "Invalid opcode seen @%d\n",blk); | ||||
| return -1; | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | } | ||||
| break; | break; | ||||
| } | } | ||||
| @@ -190,7 +190,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| { | { | ||||
| VBDecContext * const c = avctx->priv_data; | VBDecContext * const c = avctx->priv_data; | ||||
| uint8_t *outptr, *srcptr; | uint8_t *outptr, *srcptr; | ||||
| int i, j; | |||||
| int i, j, ret; | |||||
| int flags; | int flags; | ||||
| uint32_t size; | uint32_t size; | ||||
| int offset = 0; | int offset = 0; | ||||
| @@ -200,9 +200,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
| if(c->pic.data[0]) | if(c->pic.data[0]) | ||||
| avctx->release_buffer(avctx, &c->pic); | avctx->release_buffer(avctx, &c->pic); | ||||
| c->pic.reference = 3; | c->pic.reference = 3; | ||||
| if(ff_get_buffer(avctx, &c->pic) < 0){ | |||||
| if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) { | |||||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | ||||
| return -1; | |||||
| return ret; | |||||
| } | } | ||||
| flags = bytestream2_get_le16(&c->stream); | flags = bytestream2_get_le16(&c->stream); | ||||
| @@ -122,7 +122,7 @@ typedef struct VqaContext { | |||||
| static av_cold int vqa_decode_init(AVCodecContext *avctx) | static av_cold int vqa_decode_init(AVCodecContext *avctx) | ||||
| { | { | ||||
| VqaContext *s = avctx->priv_data; | VqaContext *s = avctx->priv_data; | ||||
| int i, j, codebook_index; | |||||
| int i, j, codebook_index, ret; | |||||
| s->avctx = avctx; | s->avctx = avctx; | ||||
| avctx->pix_fmt = AV_PIX_FMT_PAL8; | avctx->pix_fmt = AV_PIX_FMT_PAL8; | ||||
| @@ -130,7 +130,7 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) | |||||
| /* make sure the extradata made it */ | /* make sure the extradata made it */ | ||||
| if (s->avctx->extradata_size != VQA_HEADER_SIZE) { | if (s->avctx->extradata_size != VQA_HEADER_SIZE) { | ||||
| av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n", VQA_HEADER_SIZE); | av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n", VQA_HEADER_SIZE); | ||||
| return AVERROR_INVALIDDATA; | |||||
| return AVERROR(EINVAL); | |||||
| } | } | ||||
| /* load up the VQA parameters from the header */ | /* load up the VQA parameters from the header */ | ||||
| @@ -141,9 +141,9 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) | |||||
| } | } | ||||
| s->width = AV_RL16(&s->avctx->extradata[6]); | s->width = AV_RL16(&s->avctx->extradata[6]); | ||||
| s->height = AV_RL16(&s->avctx->extradata[8]); | s->height = AV_RL16(&s->avctx->extradata[8]); | ||||
| if(av_image_check_size(s->width, s->height, 0, avctx)){ | |||||
| if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0) { | |||||
| s->width= s->height= 0; | s->width= s->height= 0; | ||||
| return AVERROR_INVALIDDATA; | |||||
| return ret; | |||||
| } | } | ||||
| s->vector_width = s->avctx->extradata[10]; | s->vector_width = s->avctx->extradata[10]; | ||||
| s->vector_height = s->avctx->extradata[11]; | s->vector_height = s->avctx->extradata[11]; | ||||
| @@ -592,7 +592,7 @@ static int vqa_decode_frame(AVCodecContext *avctx, | |||||
| if (s->frame.data[0]) | if (s->frame.data[0]) | ||||
| avctx->release_buffer(avctx, &s->frame); | avctx->release_buffer(avctx, &s->frame); | ||||
| if ((res = ff_get_buffer(avctx, &s->frame))) { | |||||
| if ((res = ff_get_buffer(avctx, &s->frame)) < 0) { | |||||
| av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | ||||
| return res; | return res; | ||||
| } | } | ||||