It's got_frame, not data sizetags/n1.1
| @@ -753,7 +753,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length) | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -870,7 +870,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, | |||
| p->key_frame = p->pict_type == AV_PICTURE_TYPE_I; | |||
| *picture = *p; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| emms_c(); | |||
| @@ -64,7 +64,7 @@ typedef struct EightBpsContext { | |||
| * | |||
| */ | |||
| static int decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -149,7 +149,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, | |||
| memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE); | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -50,7 +50,7 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int aasc_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -85,7 +85,7 @@ static int aasc_decode_frame(AVCodecContext *avctx, | |||
| return -1; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* report that the buffer was completely consumed */ | |||
| @@ -105,7 +105,7 @@ exhausted: | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| AnmContext *s = avctx->priv_data; | |||
| @@ -170,7 +170,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| } | |||
| @@ -311,7 +311,7 @@ static int execute_code(AVCodecContext * avctx, int c) | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| AnsiContext *s = avctx->priv_data; | |||
| @@ -411,7 +411,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| buf++; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| } | |||
| @@ -181,7 +181,7 @@ static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){ | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -247,7 +247,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture = a->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| emms_c(); | |||
| @@ -46,7 +46,7 @@ static av_cold int aura_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int aura_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *pkt) | |||
| { | |||
| AuraDecodeContext *s=avctx->priv_data; | |||
| @@ -108,7 +108,7 @@ static int aura_decode_frame(AVCodecContext *avctx, | |||
| V += s->frame.linesize[2] - (avctx->width >> 1); | |||
| } | |||
| *data_size=sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data= s->frame; | |||
| return pkt->size; | |||
| @@ -44,7 +44,7 @@ typedef enum { | |||
| static int | |||
| avs_decode_frame(AVCodecContext * avctx, | |||
| void *data, int *data_size, AVPacket *avpkt) | |||
| void *data, int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| const uint8_t *buf_end = avpkt->data + avpkt->size; | |||
| @@ -150,7 +150,7 @@ avs_decode_frame(AVCodecContext * avctx, | |||
| } | |||
| *picture = avs->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -63,7 +63,7 @@ static int set_palette(BethsoftvidContext *ctx) | |||
| } | |||
| static int bethsoftvid_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| BethsoftvidContext * vid = avctx->priv_data; | |||
| @@ -95,7 +95,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx, | |||
| switch(block_type = bytestream2_get_byte(&vid->g)){ | |||
| case PALETTE_BLOCK: { | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| if ((ret = set_palette(vid)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "error reading palette\n"); | |||
| return ret; | |||
| @@ -136,7 +136,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| end: | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = vid->frame; | |||
| return avpkt->size; | |||
| @@ -46,7 +46,7 @@ static av_cold int bfi_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int bfi_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| GetByteContext g; | |||
| int buf_size = avpkt->size; | |||
| @@ -164,7 +164,7 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data, | |||
| src += avctx->width; | |||
| dst += bfi->frame.linesize[0]; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame *)data = bfi->frame; | |||
| return buf_size; | |||
| } | |||
| @@ -1161,7 +1161,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *pkt) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) | |||
| { | |||
| BinkContext * const c = avctx->priv_data; | |||
| GetBitContext gb; | |||
| @@ -1208,7 +1208,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| } | |||
| emms_c(); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| if (c->version > 'b') | |||
| @@ -35,7 +35,7 @@ static av_cold int bmp_decode_init(AVCodecContext *avctx){ | |||
| } | |||
| static int bmp_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -347,7 +347,7 @@ static int bmp_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -194,7 +194,8 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *pkt) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *pkt) | |||
| { | |||
| BMVDecContext * const c = avctx->priv_data; | |||
| int type, scr_off; | |||
| @@ -265,7 +266,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| outptr += c->pic.linesize[0]; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -113,7 +113,7 @@ static inline void draw_n_color(uint8_t *out, int stride, int width, | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -239,7 +239,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| *picture = *newpic; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -1096,7 +1096,7 @@ static void cavs_flush(AVCodecContext * avctx) { | |||
| h->got_keyframe = 0; | |||
| } | |||
| static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, | |||
| static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -1112,7 +1112,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, | |||
| if (buf_size == 0) { | |||
| if (!s->low_delay && h->DPB[0].f.data[0]) { | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| *picture = h->DPB[0].f; | |||
| memset(&h->DPB[0], 0, sizeof(h->DPB[0])); | |||
| } | |||
| @@ -1140,19 +1140,19 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, | |||
| h->got_keyframe = 1; | |||
| } | |||
| case PIC_PB_START_CODE: | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| if(!h->got_keyframe) | |||
| break; | |||
| init_get_bits(&s->gb, buf_ptr, input_size); | |||
| h->stc = stc; | |||
| if(decode_pic(h)) | |||
| break; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| if(h->pic_type != AV_PICTURE_TYPE_B) { | |||
| if(h->DPB[1].f.data[0]) { | |||
| *picture = h->DPB[1].f; | |||
| } else { | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| } | |||
| } else | |||
| *picture = h->picture.f; | |||
| @@ -267,7 +267,7 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data, | |||
| } | |||
| static int cdg_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, AVPacket *avpkt) | |||
| void *data, int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -348,9 +348,9 @@ static int cdg_decode_frame(AVCodecContext *avctx, | |||
| break; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| } else { | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| buf_size = 0; | |||
| } | |||
| @@ -208,7 +208,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c) | |||
| } | |||
| static int cdxl_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *pkt) | |||
| int *got_frame, AVPacket *pkt) | |||
| { | |||
| CDXLVideoContext *c = avctx->priv_data; | |||
| AVFrame * const p = &c->frame; | |||
| @@ -281,7 +281,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data, | |||
| } else { | |||
| cdxl_decode_rgb(c); | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->frame; | |||
| return buf_size; | |||
| @@ -419,7 +419,7 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int cinepak_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -450,7 +450,7 @@ static int cinepak_decode_frame(AVCodecContext *avctx, | |||
| if (s->palette_video) | |||
| memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* report that the buffer was completely consumed */ | |||
| @@ -44,7 +44,7 @@ static av_cold int common_init(AVCodecContext *avctx) | |||
| #if CONFIG_CLJR_DECODER | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -95,7 +95,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture = a->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -137,7 +137,7 @@ static void add_frame_32(AVFrame *f, const uint8_t *src, | |||
| } | |||
| #endif | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -215,7 +215,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| } | |||
| *picture = c->pic; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -60,7 +60,7 @@ static av_cold int cyuv_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int cyuv_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -163,7 +163,7 @@ static int cyuv_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| } | |||
| *data_size=sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data= s->frame; | |||
| return buf_size; | |||
| @@ -308,7 +308,7 @@ static const char* chunk_name[8] = { | |||
| }; | |||
| static int dfa_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| DfaContext *s = avctx->priv_data; | |||
| @@ -363,7 +363,7 @@ static int dfa_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| memcpy(s->pic.data[1], s->pal, sizeof(s->pal)); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->pic; | |||
| return avpkt->size; | |||
| @@ -329,7 +329,7 @@ static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int b | |||
| return 0; | |||
| } | |||
| static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -374,7 +374,7 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| } | |||
| *picture = ctx->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -52,7 +52,7 @@ static inline unsigned make_16bit(unsigned value) | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, | |||
| int *data_size, | |||
| int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -213,7 +213,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -201,7 +201,7 @@ static void cin_decode_rle(const unsigned char *src, int src_size, unsigned char | |||
| } | |||
| static int cinvideo_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -297,7 +297,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx, | |||
| FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame *)data = cin->frame; | |||
| return buf_size; | |||
| @@ -311,7 +311,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) | |||
| /* NOTE: exactly one frame must be given (120000 bytes for NTSC, | |||
| 144000 bytes for PAL - or twice those for 50Mbps) */ | |||
| static int dvvideo_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| uint8_t *buf = avpkt->data; | |||
| @@ -349,7 +349,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, | |||
| emms_c(); | |||
| /* return image */ | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->picture; | |||
| /* Determine the codec's sample_aspect ratio from the packet */ | |||
| @@ -189,7 +189,7 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -282,7 +282,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| if(c->pic.data[0]) | |||
| avctx->release_buffer(avctx, &c->pic); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->prev; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -35,7 +35,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| int h, w; | |||
| @@ -83,7 +83,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| V += pic->linesize[2]; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = *pic; | |||
| return avpkt->size; | |||
| @@ -147,7 +147,7 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t | |||
| #define MVIh_TAG MKTAG('M', 'V', 'I', 'h') | |||
| static int cmv_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -192,7 +192,7 @@ static int cmv_decode_frame(AVCodecContext *avctx, | |||
| cmv_decode_intra(s, buf+2, buf_end); | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| @@ -219,7 +219,7 @@ static void calc_quant_matrix(MadContext *s, int qscale) | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -232,7 +232,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| if (buf_size < 17) { | |||
| av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n"); | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| return -1; | |||
| } | |||
| @@ -274,7 +274,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++) | |||
| decode_mb(s, inter); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| if (chunk_type != MADe_TAG) | |||
| @@ -184,7 +184,7 @@ static void tgq_calculate_qtable(TgqContext *s, int quant){ | |||
| } | |||
| static int tgq_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt){ | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -227,7 +227,7 @@ static int tgq_decode_frame(AVCodecContext *avctx, | |||
| for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++) | |||
| tgq_decode_mb(s, y, x); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return avpkt->size; | |||
| @@ -249,7 +249,7 @@ static void cond_release_buffer(AVFrame *pic) | |||
| } | |||
| static int tgv_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -325,7 +325,7 @@ static int tgv_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| @@ -97,7 +97,7 @@ static void tqi_calculate_qtable(MpegEncContext *s, int quant) | |||
| } | |||
| static int tqi_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -138,7 +138,7 @@ static int tqi_decode_frame(AVCodecContext *avctx, | |||
| tqi_idct_put(t, t->block); | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = t->frame; | |||
| return buf_size; | |||
| } | |||
| @@ -197,7 +197,7 @@ static const uint16_t mask_matrix[] = {0x1, 0x2, 0x10, 0x20, | |||
| 0x400, 0x800, 0x4000, 0x8000}; | |||
| static int escape124_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -232,7 +232,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, | |||
| if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { | |||
| av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n"); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return frame_size; | |||
| @@ -358,7 +358,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, | |||
| avctx->release_buffer(avctx, &s->frame); | |||
| *(AVFrame*)data = s->frame = new_frame; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return frame_size; | |||
| } | |||
| @@ -794,7 +794,7 @@ static av_cold int ffv1_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int ffv1_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -901,7 +901,7 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data, | |||
| f->picture_number++; | |||
| *picture = *p; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| FFSWAP(AVFrame, f->picture, f->last_picture); | |||
| @@ -234,7 +234,7 @@ static int calc_deflate_block_size(int tmpblock_size) | |||
| } | |||
| static int flashsv_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| int buf_size = avpkt->size; | |||
| FlashSVContext *s = avctx->priv_data; | |||
| @@ -441,7 +441,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, | |||
| memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height); | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| if ((get_bits_count(&gb) / 8) != buf_size) | |||
| @@ -129,7 +129,7 @@ static av_cold int flic_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int flic_decode_frame_8BPP(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| const uint8_t *buf, int buf_size) | |||
| { | |||
| FlicDecodeContext *s = avctx->priv_data; | |||
| @@ -419,14 +419,14 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx, | |||
| s->new_palette = 0; | |||
| } | |||
| *data_size=sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| } | |||
| static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| const uint8_t *buf, int buf_size) | |||
| { | |||
| /* Note, the only difference between the 15Bpp and 16Bpp */ | |||
| @@ -681,14 +681,14 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, | |||
| "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); | |||
| *data_size=sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| } | |||
| static int flic_decode_frame_24BPP(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| const uint8_t *buf, int buf_size) | |||
| { | |||
| av_log(avctx, AV_LOG_ERROR, "24Bpp FLC Unsupported due to lack of test files.\n"); | |||
| @@ -696,22 +696,22 @@ static int flic_decode_frame_24BPP(AVCodecContext *avctx, | |||
| } | |||
| static int flic_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { | |||
| return flic_decode_frame_8BPP(avctx, data, data_size, | |||
| return flic_decode_frame_8BPP(avctx, data, got_frame, | |||
| buf, buf_size); | |||
| } | |||
| else if ((avctx->pix_fmt == AV_PIX_FMT_RGB555) || | |||
| (avctx->pix_fmt == AV_PIX_FMT_RGB565)) { | |||
| return flic_decode_frame_15_16BPP(avctx, data, data_size, | |||
| return flic_decode_frame_15_16BPP(avctx, data, got_frame, | |||
| buf, buf_size); | |||
| } | |||
| else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) { | |||
| return flic_decode_frame_24BPP(avctx, data, data_size, | |||
| return flic_decode_frame_24BPP(avctx, data, got_frame, | |||
| buf, buf_size); | |||
| } | |||
| @@ -124,7 +124,7 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w, | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -343,7 +343,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *frame = *f; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -39,7 +39,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| int field, ret; | |||
| @@ -97,7 +97,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| buf += field_size - min_field_size; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = *pic; | |||
| return avpkt->size; | |||
| @@ -282,7 +282,8 @@ static av_cold int gif_decode_init(AVCodecContext *avctx) | |||
| return 0; | |||
| } | |||
| static int gif_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) | |||
| static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -312,7 +313,7 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *data_size, A | |||
| return ret; | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return s->bytestream - buf; | |||
| } | |||
| @@ -543,7 +543,7 @@ static int get_consumed_bytes(MpegEncContext *s, int buf_size){ | |||
| } | |||
| static int h261_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -628,7 +628,7 @@ assert(s->current_picture.f.pict_type == s->pict_type); | |||
| *pict = s->current_picture_ptr->f; | |||
| ff_print_debug_info(s, pict); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return get_consumed_bytes(s, buf_size); | |||
| } | |||
| @@ -69,7 +69,7 @@ int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code); | |||
| av_const int ff_h263_aspect_to_info(AVRational aspect); | |||
| int ff_h263_decode_init(AVCodecContext *avctx); | |||
| int ff_h263_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt); | |||
| int ff_h263_decode_end(AVCodecContext *avctx); | |||
| void ff_h263_encode_mb(MpegEncContext *s, | |||
| @@ -340,7 +340,7 @@ static int decode_slice(MpegEncContext *s){ | |||
| } | |||
| int ff_h263_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -362,7 +362,7 @@ uint64_t time= rdtsc(); | |||
| *pict = s->next_picture_ptr->f; | |||
| s->next_picture_ptr= NULL; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| } | |||
| return 0; | |||
| @@ -729,7 +729,7 @@ intrax8_decoded: | |||
| } | |||
| if(s->last_picture_ptr || s->low_delay){ | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| ff_print_debug_info(s, pict); | |||
| } | |||
| @@ -3988,7 +3988,7 @@ static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size) | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -4025,7 +4025,7 @@ out: | |||
| h->delayed_pic[i] = h->delayed_pic[i + 1]; | |||
| if (out) { | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *pict = out->f; | |||
| } | |||
| @@ -4057,14 +4057,14 @@ out: | |||
| if (!h->next_output_pic) { | |||
| /* Wait for second field. */ | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| } else { | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *pict = h->next_output_pic->f; | |||
| } | |||
| } | |||
| assert(pict->data[0] || !*data_size); | |||
| assert(pict->data[0] || !*got_frame); | |||
| ff_print_debug_info(s, pict); | |||
| return get_consumed_bytes(s, buf_index, buf_size); | |||
| @@ -928,7 +928,7 @@ static void draw_slice(HYuvContext *s, int y) | |||
| s->last_slice_end = y + h; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -1181,7 +1181,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| emms_c(); | |||
| *picture = *p; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size; | |||
| } | |||
| @@ -210,7 +210,7 @@ static void idcin_decode_vlcs(IdcinContext *s) | |||
| } | |||
| static int idcin_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -238,7 +238,7 @@ static int idcin_decode_frame(AVCodecContext *avctx, | |||
| /* make the palette available on the way out */ | |||
| memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* report that the buffer was completely consumed */ | |||
| @@ -247,7 +247,7 @@ static int decode_byterun(uint8_t *dst, int dst_size, | |||
| } | |||
| static int decode_frame_ilbm(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| IffContext *s = avctx->priv_data; | |||
| @@ -298,13 +298,13 @@ static int decode_frame_ilbm(AVCodecContext *avctx, | |||
| } | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| } | |||
| static int decode_frame_byterun1(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| IffContext *s = avctx->priv_data; | |||
| @@ -354,7 +354,7 @@ static int decode_frame_byterun1(AVCodecContext *avctx, | |||
| } | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| } | |||
| @@ -138,7 +138,7 @@ static int ir2_decode_plane_inter(Ir2Context *ctx, int width, int height, uint8_ | |||
| } | |||
| static int ir2_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -194,7 +194,7 @@ static int ir2_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -1025,7 +1025,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| Indeo3DecodeContext *ctx = avctx->priv_data; | |||
| @@ -1040,7 +1040,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| /* skip sync(null) frames */ | |||
| if (res) { | |||
| // we have processed 16 bytes but no data was decoded | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| return buf_size; | |||
| } | |||
| @@ -1086,7 +1086,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| ctx->frame.data[2], ctx->frame.linesize[2], | |||
| (avctx->height + 3) >> 2); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = ctx->frame; | |||
| return buf_size; | |||
| @@ -953,7 +953,7 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int ipvideo_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -988,7 +988,7 @@ static int ipvideo_decode_frame(AVCodecContext *avctx, | |||
| ipvideo_decode_opcodes(s); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->current_frame; | |||
| /* shuffle frames */ | |||
| @@ -743,7 +743,7 @@ static int decode_band(IVI45DecContext *ctx, | |||
| return result; | |||
| } | |||
| int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| IVI45DecContext *ctx = avctx->priv_data; | |||
| @@ -820,7 +820,7 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]); | |||
| ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = ctx->frame; | |||
| return buf_size; | |||
| @@ -386,7 +386,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile); | |||
| */ | |||
| void ff_ivi_output_plane(IVIPlaneDesc *plane, uint8_t *dst, int dst_pitch); | |||
| int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt); | |||
| av_cold int ff_ivi_decode_close(AVCodecContext *avctx); | |||
| @@ -129,7 +129,7 @@ static inline void decode8x8(GetBitContext *gb, uint8_t *dst, int linesize, DSPC | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| JvContext *s = avctx->priv_data; | |||
| @@ -185,7 +185,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| s->palette_has_changed = 0; | |||
| memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| } | |||
| @@ -43,7 +43,8 @@ static void decode_flush(AVCodecContext *avctx) | |||
| avctx->release_buffer(avctx, &c->prev); | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| const uint8_t *buf_end = buf + avpkt->size; | |||
| @@ -155,7 +156,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| if (outcnt - maxcnt) | |||
| av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->cur; | |||
| if (c->prev.data[0]) | |||
| @@ -243,7 +243,8 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h) | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) | |||
| static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| KmvcContext *const ctx = avctx->priv_data; | |||
| uint8_t *out, *src; | |||
| @@ -344,7 +345,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa | |||
| ctx->prev = ctx->frm1; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame *) data = ctx->pic; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -497,7 +497,7 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst, | |||
| * @return number of consumed bytes on success or negative if decode fails | |||
| */ | |||
| static int lag_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, AVPacket *avpkt) | |||
| void *data, int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -664,7 +664,7 @@ static int lag_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture = *p; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -158,7 +158,7 @@ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, i | |||
| * Decode a frame | |||
| * | |||
| */ | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -461,7 +461,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| return -1; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -253,7 +253,7 @@ static av_cold int libopenjpeg_decode_init_thread_copy(AVCodecContext *avctx) | |||
| } | |||
| static int libopenjpeg_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| uint8_t *buf = avpkt->data; | |||
| @@ -269,7 +269,7 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx, | |||
| int ispacked = 0; | |||
| int i; | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| // Check if input is a raw jpeg2k codestream or in jp2 wrapping | |||
| if ((AV_RB32(buf) == 12) && | |||
| @@ -412,7 +412,7 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *output = ctx->image; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| ret = buf_size; | |||
| done: | |||
| @@ -207,7 +207,7 @@ static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext) | |||
| } | |||
| static int libschroedinger_decode_frame(AVCodecContext *avccontext, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -225,7 +225,7 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, | |||
| SchroParseUnitContext parse_ctx; | |||
| LibSchroFrameContext *framewithpts = NULL; | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| parse_context_init(&parse_ctx, buf, buf_size); | |||
| if (!buf_size) { | |||
| @@ -341,14 +341,14 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, | |||
| p_schro_params->dec_frame.linesize[2] = framewithpts->frame->components[2].stride; | |||
| *(AVFrame*)data = p_schro_params->dec_frame; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| /* Now free the frame resources. */ | |||
| libschroedinger_decode_frame_free(framewithpts->frame); | |||
| av_free(framewithpts); | |||
| } else { | |||
| data = NULL; | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| } | |||
| return buf_size; | |||
| } | |||
| @@ -59,7 +59,7 @@ static av_cold int vp8_init(AVCodecContext *avctx) | |||
| } | |||
| static int vp8_decode(AVCodecContext *avctx, | |||
| void *data, int *data_size, AVPacket *avpkt) | |||
| void *data, int *got_frame, AVPacket *avpkt) | |||
| { | |||
| VP8Context *ctx = avctx->priv_data; | |||
| AVFrame *picture = data; | |||
| @@ -100,7 +100,7 @@ static int vp8_decode(AVCodecContext *avctx, | |||
| picture->linesize[1] = img->stride[1]; | |||
| picture->linesize[2] = img->stride[2]; | |||
| picture->linesize[3] = 0; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| } | |||
| return avpkt->size; | |||
| } | |||
| @@ -158,7 +158,7 @@ static int loco_decode_plane(LOCOContext *l, uint8_t *data, int width, int heigh | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -223,7 +223,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| break; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = l->pic; | |||
| return buf_size; | |||
| @@ -153,7 +153,7 @@ static inline void idct_put(MDECContext *a, int mb_x, int mb_y){ | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -206,7 +206,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| memset(p->qscale_table, a->qscale, a->mb_width); | |||
| *picture = a->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return (get_bits_count(&a->gb)+31)/32*4; | |||
| } | |||
| @@ -301,7 +301,7 @@ static void prepare_avpic(MimicContext *ctx, AVPicture *dst, AVPicture *src) | |||
| } | |||
| static int mimic_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -390,7 +390,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| *(AVFrame*)data = ctx->buf_ptrs[ctx->cur_index]; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| ctx->prev_index = ctx->next_prev_index; | |||
| ctx->cur_index = ctx->next_cur_index; | |||
| @@ -38,7 +38,7 @@ static uint32_t read_offs(AVCodecContext *avctx, GetBitContext *gb, uint32_t siz | |||
| } | |||
| static int mjpegb_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -137,7 +137,7 @@ read_header: | |||
| //XXX FIXME factorize, this looks very similar to the EOI code | |||
| *picture= *s->picture_ptr; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| if(!s->lossless){ | |||
| picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); | |||
| @@ -1436,7 +1436,7 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s, | |||
| return start_code; | |||
| } | |||
| int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -1551,7 +1551,7 @@ eoi_parser: | |||
| goto not_the_end; | |||
| } | |||
| *picture = *s->picture_ptr; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| if (!s->lossless) { | |||
| picture->quality = FFMAX3(s->qscale[0], | |||
| @@ -116,7 +116,7 @@ typedef struct MJpegDecodeContext { | |||
| int ff_mjpeg_decode_init(AVCodecContext *avctx); | |||
| int ff_mjpeg_decode_end(AVCodecContext *avctx); | |||
| int ff_mjpeg_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt); | |||
| int ff_mjpeg_decode_dqt(MJpegDecodeContext *s); | |||
| int ff_mjpeg_decode_dht(MJpegDecodeContext *s); | |||
| @@ -172,7 +172,7 @@ static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert) | |||
| } | |||
| static int mm_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -209,7 +209,7 @@ static int mm_decode_frame(AVCodecContext *avctx, | |||
| memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return buf_size; | |||
| @@ -237,7 +237,7 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb) | |||
| } | |||
| static int mp_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -295,7 +295,7 @@ static int mp_decode_frame(AVCodecContext *avctx, | |||
| ff_free_vlc(&mp->vlc); | |||
| end: | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame *)data = mp->frame; | |||
| return buf_size; | |||
| } | |||
| @@ -72,7 +72,7 @@ static av_cold int msrle_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int msrle_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -128,7 +128,7 @@ static int msrle_decode_frame(AVCodecContext *avctx, | |||
| ff_msrle_decode(avctx, (AVPicture*)&s->frame, avctx->bits_per_coded_sample, &s->gb); | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* report that the buffer was completely consumed */ | |||
| @@ -135,7 +135,7 @@ static int decode_pal(MSS12Context *ctx, ArithCoder *acoder) | |||
| return !!ncol; | |||
| } | |||
| static int mss1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int mss1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -180,7 +180,7 @@ static int mss1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| memcpy(ctx->pic.data[1], c->pal, AVPALETTE_SIZE); | |||
| ctx->pic.palette_has_changed = pal_changed; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = ctx->pic; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -461,7 +461,7 @@ typedef struct Rectangle { | |||
| #define MAX_WMV9_RECTANGLES 20 | |||
| #define ARITH2_PADDING 2 | |||
| static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -744,7 +744,7 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| if (buf_size) | |||
| av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n"); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame *)data = ctx->pic; | |||
| return avpkt->size; | |||
| @@ -674,7 +674,7 @@ static av_cold void init_coders(MSS3Context *ctx) | |||
| } | |||
| } | |||
| static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -740,7 +740,7 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| c->pic.key_frame = keyframe; | |||
| c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; | |||
| if (!bytestream2_get_bytes_left(&gb)) { | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| return buf_size; | |||
| @@ -798,7 +798,7 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| dst[2] += c->pic.linesize[2] * 8; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| return buf_size; | |||
| @@ -506,7 +506,7 @@ static inline void mss4_update_dc_cache(MSS4Context *c, int mb_x) | |||
| } | |||
| } | |||
| static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -566,7 +566,7 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| c->pic.pict_type = (frame_type == INTRA_FRAME) ? AV_PICTURE_TYPE_I | |||
| : AV_PICTURE_TYPE_P; | |||
| if (frame_type == SKIP_FRAME) { | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| return buf_size; | |||
| @@ -623,7 +623,7 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| dst[2] += c->pic.linesize[2] * 16; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = c->pic; | |||
| return buf_size; | |||
| @@ -286,7 +286,7 @@ static void msvideo1_decode_16bit(Msvideo1Context *s) | |||
| } | |||
| static int msvideo1_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -317,7 +317,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, | |||
| else | |||
| msvideo1_decode_16bit(s); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* report that the buffer was completely consumed */ | |||
| @@ -156,7 +156,7 @@ static int mxpeg_check_dimensions(MXpegDecodeContext *s, MJpegDecodeContext *jpg | |||
| } | |||
| static int mxpeg_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -293,7 +293,7 @@ static int mxpeg_decode_frame(AVCodecContext *avctx, | |||
| the_end: | |||
| if (jpg->got_picture) { | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *picture = *jpg->picture_ptr; | |||
| s->picture_index ^= 1; | |||
| jpg->picture_ptr = &s->picture[s->picture_index]; | |||
| @@ -302,7 +302,7 @@ the_end: | |||
| if (!s->got_mxm_bitmask) | |||
| s->has_complete_frame = 1; | |||
| else | |||
| *data_size = 0; | |||
| *got_frame = 0; | |||
| } | |||
| } | |||
| @@ -138,7 +138,7 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height, | |||
| return 1; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -262,7 +262,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| } | |||
| *picture = c->pic; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return orig_size; | |||
| } | |||
| @@ -77,7 +77,7 @@ static void pcx_palette(const uint8_t **src, uint32_t *dst, unsigned int pallen) | |||
| memset(dst, 0, (256 - pallen) * sizeof(*dst)); | |||
| } | |||
| static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -231,7 +231,7 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| ret = buf - bufstart; | |||
| end: | |||
| @@ -98,7 +98,7 @@ static const uint8_t cga_mode45_index[6][4] = { | |||
| }; | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| PicContext *s = avctx->priv_data; | |||
| @@ -238,7 +238,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| return avpkt->size; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| return avpkt->size; | |||
| } | |||
| @@ -383,7 +383,7 @@ static int png_decode_idat(PNGDecContext *s, int length) | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -611,7 +611,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| } | |||
| *picture= *s->current_picture; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| ret = bytestream2_tell(&s->gb); | |||
| the_end: | |||
| @@ -27,7 +27,7 @@ | |||
| static int pnm_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -182,7 +182,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data, | |||
| break; | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return s->bytestream - s->bytestream_start; | |||
| } | |||
| @@ -599,7 +599,7 @@ static int decode_picture(ProresContext *ctx, int pic_num, | |||
| #define MOVE_DATA_PTR(nbytes) buf += (nbytes); buf_size -= (nbytes) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| ProresContext *ctx = avctx->priv_data; | |||
| @@ -641,7 +641,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| MOVE_DATA_PTR(pic_data_size); | |||
| } | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| *(AVFrame*) data = *avctx->coded_frame; | |||
| return avpkt->size; | |||
| @@ -38,7 +38,7 @@ static av_cold int ptx_init(AVCodecContext *avctx) { | |||
| return 0; | |||
| } | |||
| static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) { | |||
| const uint8_t *buf = avpkt->data; | |||
| const uint8_t *buf_end = avpkt->data + avpkt->size; | |||
| @@ -99,7 +99,7 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| if (y < h) { | |||
| av_log(avctx, AV_LOG_WARNING, "incomplete packet\n"); | |||
| @@ -35,7 +35,7 @@ typedef struct QdrawContext{ | |||
| } QdrawContext; | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -140,7 +140,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| outdata += a->pic.linesize[0]; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = a->pic; | |||
| return buf_size; | |||
| @@ -240,7 +240,7 @@ static void qpeg_decode_inter(QpegContext *qctx, uint8_t *dst, | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| uint8_t ctable[128]; | |||
| @@ -280,7 +280,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| } | |||
| memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = a->pic; | |||
| return avpkt->size; | |||
| @@ -382,7 +382,7 @@ static av_cold int qtrle_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int qtrle_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| QtrleContext *s = avctx->priv_data; | |||
| @@ -478,7 +478,7 @@ static int qtrle_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| done: | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -35,7 +35,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| int h, w; | |||
| @@ -82,7 +82,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||
| dst_line += pic->linesize[0]; | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = *avctx->coded_frame; | |||
| return avpkt->size; | |||
| @@ -113,7 +113,7 @@ static void flip(AVCodecContext *avctx, AVPicture * picture){ | |||
| } | |||
| static int raw_decode(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -196,7 +196,7 @@ static int raw_decode(AVCodecContext *avctx, | |||
| } | |||
| } | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||
| @@ -172,7 +172,7 @@ static av_cold int rl2_decode_init(AVCodecContext *avctx) | |||
| static int rl2_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -195,7 +195,7 @@ static int rl2_decode_frame(AVCodecContext *avctx, | |||
| /** make the palette available on the way out */ | |||
| memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /** report that the buffer was completely consumed */ | |||
| @@ -169,7 +169,7 @@ static av_cold int roq_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int roq_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -190,7 +190,7 @@ static int roq_decode_frame(AVCodecContext *avctx, | |||
| bytestream2_init(&s->gb, buf, buf_size); | |||
| roqvideo_decode_frame(s); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = *s->current_frame; | |||
| /* shuffle frames */ | |||
| @@ -244,7 +244,7 @@ static av_cold int rpza_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int rpza_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -263,7 +263,7 @@ static int rpza_decode_frame(AVCodecContext *avctx, | |||
| rpza_decode_stream(s); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -638,7 +638,7 @@ static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n) | |||
| } | |||
| static int rv10_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -703,7 +703,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, | |||
| } | |||
| if(s->last_picture_ptr || s->low_delay){ | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| ff_print_debug_info(s, pict); | |||
| } | |||
| s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) | |||
| @@ -133,7 +133,7 @@ typedef struct RV34DecContext{ | |||
| */ | |||
| int ff_rv34_get_start_offset(GetBitContext *gb, int blocks); | |||
| int ff_rv34_decode_init(AVCodecContext *avctx); | |||
| int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt); | |||
| int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt); | |||
| int ff_rv34_decode_end(AVCodecContext *avctx); | |||
| int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx); | |||
| int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src); | |||
| @@ -151,7 +151,7 @@ static int read_uncompressed_sgi(unsigned char* out_buf, uint8_t* out_end, | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| SgiState *s = avctx->priv_data; | |||
| @@ -233,7 +233,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
| if (ret == 0) { | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVPicture); | |||
| *got_frame = 1; | |||
| return avpkt->size; | |||
| } else { | |||
| return ret; | |||
| @@ -349,7 +349,8 @@ static av_always_inline int smk_get_code(GetBitContext *gb, int *recode, int *la | |||
| return v; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| SmackVContext * const smk = avctx->priv_data; | |||
| uint8_t *out; | |||
| @@ -496,7 +497,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| } | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = smk->pic; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -422,7 +422,7 @@ static av_cold int smc_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int smc_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -447,7 +447,7 @@ static int smc_decode_frame(AVCodecContext *avctx, | |||
| smc_decode_stream(s); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| *(AVFrame*)data = s->frame; | |||
| /* always report that the buffer was completely consumed */ | |||
| @@ -382,7 +382,9 @@ static int decode_blocks(SnowContext *s){ | |||
| return 0; | |||
| } | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ | |||
| static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| SnowContext *s = avctx->priv_data; | |||
| @@ -542,7 +544,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| else | |||
| *picture= s->mconly_picture; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| bytes_read= c->bytestream - c->bytestream_start; | |||
| if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME | |||
| @@ -31,7 +31,7 @@ | |||
| static int sp5x_decode_frame(AVCodecContext *avctx, | |||
| void *data, int *data_size, | |||
| void *data, int *got_frame, | |||
| AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -86,7 +86,7 @@ static int sp5x_decode_frame(AVCodecContext *avctx, | |||
| av_init_packet(&avpkt_recoded); | |||
| avpkt_recoded.data = recoded; | |||
| avpkt_recoded.size = j; | |||
| i = ff_mjpeg_decode_frame(avctx, data, data_size, &avpkt_recoded); | |||
| i = ff_mjpeg_decode_frame(avctx, data, got_frame, &avpkt_recoded); | |||
| av_free(recoded); | |||
| @@ -40,7 +40,8 @@ static av_cold int sunrast_init(AVCodecContext *avctx) { | |||
| } | |||
| static int sunrast_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) { | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| const uint8_t *buf_end = avpkt->data + avpkt->size; | |||
| SUNRASTContext * const s = avctx->priv_data; | |||
| @@ -181,7 +182,7 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| *picture = s->picture; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return buf - bufstart; | |||
| } | |||
| @@ -587,7 +587,7 @@ static int svq1_decode_frame_header(GetBitContext *bitbuf, MpegEncContext *s) | |||
| } | |||
| static int svq1_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| @@ -708,7 +708,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, void *data, | |||
| ff_MPV_frame_end(s); | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| result = buf_size; | |||
| err: | |||
| @@ -1003,7 +1003,7 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx) | |||
| } | |||
| static int svq3_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *data_size, AVPacket *avpkt) | |||
| int *got_frame, AVPacket *avpkt) | |||
| { | |||
| const uint8_t *buf = avpkt->data; | |||
| SVQ3Context *svq3 = avctx->priv_data; | |||
| @@ -1017,7 +1017,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data, | |||
| if (s->next_picture_ptr && !s->low_delay) { | |||
| *(AVFrame *) data = s->next_picture.f; | |||
| s->next_picture_ptr = NULL; | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| } | |||
| return 0; | |||
| } | |||
| @@ -1139,7 +1139,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data, | |||
| /* Do not output the last pic after seeking. */ | |||
| if (s->last_picture_ptr || s->low_delay) | |||
| *data_size = sizeof(AVFrame); | |||
| *got_frame = 1; | |||
| return buf_size; | |||
| } | |||