* commit 'cb7b47a61dba0b9329ecede5dd3211dc0662dc05': g726: decode directly to the user-provided AVFrame g723.1: decode directly to the user-provided AVFrame g722: decode directly to the user-provided AVFrame flac: decode directly to the user-provided AVFrame cinaudio: decode directly to the user-provided AVFrame Conflicts: libavcodec/g723_1.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n1.2
| @@ -46,7 +46,6 @@ typedef struct CinVideoContext { | |||
| } CinVideoContext; | |||
| typedef struct CinAudioContext { | |||
| AVFrame frame; | |||
| int initial_decode_frame; | |||
| int delta; | |||
| } CinAudioContext; | |||
| @@ -349,15 +348,13 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx) | |||
| avctx->channels = 1; | |||
| avctx->channel_layout = AV_CH_LAYOUT_MONO; | |||
| avcodec_get_frame_defaults(&cin->frame); | |||
| avctx->coded_frame = &cin->frame; | |||
| return 0; | |||
| } | |||
| static int cinaudio_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| AVFrame *frame = data; | |||
| const uint8_t *buf = avpkt->data; | |||
| CinAudioContext *cin = avctx->priv_data; | |||
| const uint8_t *buf_end = buf + avpkt->size; | |||
| @@ -365,12 +362,12 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, void *data, | |||
| int delta, ret; | |||
| /* get output buffer */ | |||
| cin->frame.nb_samples = avpkt->size - cin->initial_decode_frame; | |||
| if ((ret = ff_get_buffer(avctx, &cin->frame)) < 0) { | |||
| frame->nb_samples = avpkt->size - cin->initial_decode_frame; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| samples = (int16_t *)cin->frame.data[0]; | |||
| samples = (int16_t *)frame->data[0]; | |||
| delta = cin->delta; | |||
| if (cin->initial_decode_frame) { | |||
| @@ -386,8 +383,7 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| cin->delta = delta; | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = cin->frame; | |||
| *got_frame_ptr = 1; | |||
| return avpkt->size; | |||
| } | |||
| @@ -49,7 +49,6 @@ typedef struct FLACContext { | |||
| FLACSTREAMINFO | |||
| AVCodecContext *avctx; ///< parent AVCodecContext | |||
| AVFrame frame; | |||
| GetBitContext gb; ///< GetBitContext initialized to start at the current frame | |||
| int blocksize; ///< number of samples in the current frame | |||
| @@ -113,9 +112,6 @@ static av_cold int flac_decode_init(AVCodecContext *avctx) | |||
| ff_flacdsp_init(&s->dsp, avctx->sample_fmt, s->bps); | |||
| s->got_streaminfo = 1; | |||
| avcodec_get_frame_defaults(&s->frame); | |||
| avctx->coded_frame = &s->frame; | |||
| return 0; | |||
| } | |||
| @@ -491,6 +487,7 @@ static int decode_frame(FLACContext *s) | |||
| static int flac_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| AVFrame *frame = data; | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| FLACContext *s = avctx->priv_data; | |||
| @@ -539,13 +536,13 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data, | |||
| bytes_read = (get_bits_count(&s->gb)+7)/8; | |||
| /* get output buffer */ | |||
| s->frame.nb_samples = s->blocksize; | |||
| if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
| frame->nb_samples = s->blocksize; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| s->dsp.decorrelate[s->ch_mode](s->frame.data, s->decoded, s->channels, | |||
| s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded, s->channels, | |||
| s->blocksize, s->sample_shift); | |||
| if (bytes_read > buf_size) { | |||
| @@ -557,8 +554,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data, | |||
| buf_size - bytes_read, buf_size); | |||
| } | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = s->frame; | |||
| *got_frame_ptr = 1; | |||
| return bytes_read; | |||
| } | |||
| @@ -32,7 +32,6 @@ | |||
| typedef struct G722Context { | |||
| const AVClass *class; | |||
| AVFrame frame; | |||
| int bits_per_codeword; | |||
| int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples | |||
| int prev_samples_pos; ///< the number of values in prev_samples | |||
| @@ -67,9 +67,6 @@ static av_cold int g722_decode_init(AVCodecContext * avctx) | |||
| c->band[1].scale_factor = 2; | |||
| c->prev_samples_pos = 22; | |||
| avcodec_get_frame_defaults(&c->frame); | |||
| avctx->coded_frame = &c->frame; | |||
| return 0; | |||
| } | |||
| @@ -88,6 +85,7 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| G722Context *c = avctx->priv_data; | |||
| AVFrame *frame = data; | |||
| int16_t *out_buf; | |||
| int j, ret; | |||
| const int skip = 8 - c->bits_per_codeword; | |||
| @@ -95,12 +93,12 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, | |||
| GetBitContext gb; | |||
| /* get output buffer */ | |||
| c->frame.nb_samples = avpkt->size * 2; | |||
| if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) { | |||
| frame->nb_samples = avpkt->size * 2; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| out_buf = (int16_t *)c->frame.data[0]; | |||
| out_buf = (int16_t *)frame->data[0]; | |||
| init_get_bits(&gb, avpkt->data, avpkt->size * 8); | |||
| @@ -135,8 +133,7 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| } | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = c->frame; | |||
| *got_frame_ptr = 1; | |||
| return avpkt->size; | |||
| } | |||
| @@ -42,7 +42,6 @@ | |||
| typedef struct g723_1_context { | |||
| AVClass *class; | |||
| AVFrame frame; | |||
| G723_1_Subframe subframe[4]; | |||
| enum FrameType cur_frame_type; | |||
| @@ -93,9 +92,6 @@ static av_cold int g723_1_decode_init(AVCodecContext *avctx) | |||
| avctx->channels = 1; | |||
| p->pf_gain = 1 << 12; | |||
| avcodec_get_frame_defaults(&p->frame); | |||
| avctx->coded_frame = &p->frame; | |||
| memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp)); | |||
| memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp)); | |||
| @@ -1158,6 +1154,7 @@ static int g723_1_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| G723_1_Context *p = avctx->priv_data; | |||
| AVFrame *frame = data; | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| int dec_mode = buf[0] & 3; | |||
| @@ -1187,13 +1184,13 @@ static int g723_1_decode_frame(AVCodecContext *avctx, void *data, | |||
| p->cur_frame_type = UNTRANSMITTED_FRAME; | |||
| } | |||
| p->frame.nb_samples = FRAME_LEN; | |||
| if ((ret = ff_get_buffer(avctx, &p->frame)) < 0) { | |||
| frame->nb_samples = FRAME_LEN; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| out = (int16_t *)p->frame.data[0]; | |||
| out = (int16_t *)frame->data[0]; | |||
| if (p->cur_frame_type == ACTIVE_FRAME) { | |||
| if (!bad_frame) | |||
| @@ -1264,7 +1261,7 @@ static int g723_1_decode_frame(AVCodecContext *avctx, void *data, | |||
| (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation)); | |||
| memset(p->prev_excitation, 0, | |||
| PITCH_MAX * sizeof(*p->excitation)); | |||
| memset(p->frame.data[0], 0, | |||
| memset(frame->data[0], 0, | |||
| (FRAME_LEN + LPC_ORDER) * sizeof(int16_t)); | |||
| } else { | |||
| int16_t *buf = p->audio + LPC_ORDER; | |||
| @@ -1313,8 +1310,7 @@ static int g723_1_decode_frame(AVCodecContext *avctx, void *data, | |||
| out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1); | |||
| } | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = p->frame; | |||
| *got_frame_ptr = 1; | |||
| return frame_size[dec_mode]; | |||
| } | |||
| @@ -77,7 +77,6 @@ typedef struct G726Tables { | |||
| typedef struct G726Context { | |||
| AVClass *class; | |||
| AVFrame frame; | |||
| G726Tables tbls; /**< static tables needed for computation */ | |||
| Float11 sr[2]; /**< prev. reconstructed samples */ | |||
| @@ -432,15 +431,13 @@ static av_cold int g726_decode_init(AVCodecContext *avctx) | |||
| avctx->sample_fmt = AV_SAMPLE_FMT_S16; | |||
| avcodec_get_frame_defaults(&c->frame); | |||
| avctx->coded_frame = &c->frame; | |||
| return 0; | |||
| } | |||
| static int g726_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| AVFrame *frame = data; | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| G726Context *c = avctx->priv_data; | |||
| @@ -451,12 +448,12 @@ static int g726_decode_frame(AVCodecContext *avctx, void *data, | |||
| out_samples = buf_size * 8 / c->code_size; | |||
| /* get output buffer */ | |||
| c->frame.nb_samples = out_samples; | |||
| if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) { | |||
| frame->nb_samples = out_samples; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| samples = (int16_t *)c->frame.data[0]; | |||
| samples = (int16_t *)frame->data[0]; | |||
| init_get_bits(&gb, buf, buf_size * 8); | |||
| @@ -466,8 +463,7 @@ static int g726_decode_frame(AVCodecContext *avctx, void *data, | |||
| if (get_bits_left(&gb) > 0) | |||
| av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n"); | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = c->frame; | |||
| *got_frame_ptr = 1; | |||
| return buf_size; | |||
| } | |||