* commit 'a8ea936a0a00570f61a16a588821b52f6a3115c2': libilbc: decode directly to the user-provided AVFrame dpcm: decode directly to the user-provided AVFrame imc/iac: decode directly to the user-provided AVFrame gsm: decode directly to the user-provided AVFrame Conflicts: libavcodec/dpcm.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n1.2
| @@ -44,7 +44,6 @@ | |||
| #include "mathops.h" | |||
| typedef struct DPCMContext { | |||
| AVFrame frame; | |||
| int16_t roq_square_array[256]; | |||
| int sample[2]; ///< previous sample (for SOL_DPCM) | |||
| const int8_t *sol_table; ///< delta table for SOL_DPCM | |||
| @@ -163,9 +162,6 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx) | |||
| else | |||
| avctx->sample_fmt = AV_SAMPLE_FMT_S16; | |||
| avcodec_get_frame_defaults(&s->frame); | |||
| avctx->coded_frame = &s->frame; | |||
| return 0; | |||
| } | |||
| @@ -175,6 +171,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, | |||
| { | |||
| int buf_size = avpkt->size; | |||
| DPCMContext *s = avctx->priv_data; | |||
| AVFrame *frame = data; | |||
| int out = 0, ret; | |||
| int predictor[2]; | |||
| int ch = 0; | |||
| @@ -213,12 +210,12 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| /* get output buffer */ | |||
| s->frame.nb_samples = (out + avctx->channels - 1) / avctx->channels; | |||
| if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
| frame->nb_samples = (out + avctx->channels - 1) / avctx->channels; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| output_samples = (int16_t *)s->frame.data[0]; | |||
| output_samples = (int16_t *)frame->data[0]; | |||
| samples_end = output_samples + out; | |||
| switch(avctx->codec->id) { | |||
| @@ -298,7 +295,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| case AV_CODEC_ID_SOL_DPCM: | |||
| if (avctx->codec_tag != 3) { | |||
| uint8_t *output_samples_u8 = s->frame.data[0], | |||
| uint8_t *output_samples_u8 = frame->data[0], | |||
| *samples_end_u8 = output_samples_u8 + out; | |||
| while (output_samples_u8 < samples_end_u8) { | |||
| int n = bytestream2_get_byteu(&gb); | |||
| @@ -325,8 +322,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, | |||
| break; | |||
| } | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = s->frame; | |||
| *got_frame_ptr = 1; | |||
| return avpkt->size; | |||
| } | |||
| @@ -34,8 +34,6 @@ | |||
| static av_cold int gsm_init(AVCodecContext *avctx) | |||
| { | |||
| GSMContext *s = avctx->priv_data; | |||
| avctx->channels = 1; | |||
| avctx->channel_layout = AV_CH_LAYOUT_MONO; | |||
| if (!avctx->sample_rate) | |||
| @@ -52,16 +50,13 @@ static av_cold int gsm_init(AVCodecContext *avctx) | |||
| avctx->block_align = GSM_MS_BLOCK_SIZE; | |||
| } | |||
| avcodec_get_frame_defaults(&s->frame); | |||
| avctx->coded_frame = &s->frame; | |||
| return 0; | |||
| } | |||
| static int gsm_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| GSMContext *s = avctx->priv_data; | |||
| AVFrame *frame = data; | |||
| int res; | |||
| GetBitContext gb; | |||
| const uint8_t *buf = avpkt->data; | |||
| @@ -74,12 +69,12 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| /* get output buffer */ | |||
| s->frame.nb_samples = avctx->frame_size; | |||
| if ((res = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
| frame->nb_samples = avctx->frame_size; | |||
| if ((res = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return res; | |||
| } | |||
| samples = (int16_t *)s->frame.data[0]; | |||
| samples = (int16_t *)frame->data[0]; | |||
| switch (avctx->codec_id) { | |||
| case AV_CODEC_ID_GSM: | |||
| @@ -96,8 +91,7 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data, | |||
| return res; | |||
| } | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = s->frame; | |||
| *got_frame_ptr = 1; | |||
| return avctx->block_align; | |||
| } | |||
| @@ -26,7 +26,6 @@ | |||
| #include "avcodec.h" | |||
| typedef struct GSMContext { | |||
| AVFrame frame; | |||
| // Contains first 120 elements from the previous frame | |||
| // (used by long_term_synth according to the "lag"), | |||
| // then in the following 160 elements the current | |||
| @@ -81,8 +81,6 @@ typedef struct IMCChannel { | |||
| } IMCChannel; | |||
| typedef struct { | |||
| AVFrame frame; | |||
| IMCChannel chctx[2]; | |||
| /** MDCT tables */ | |||
| @@ -253,9 +251,6 @@ static av_cold int imc_decode_init(AVCodecContext *avctx) | |||
| avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO | |||
| : AV_CH_LAYOUT_STEREO; | |||
| avcodec_get_frame_defaults(&q->frame); | |||
| avctx->coded_frame = &q->frame; | |||
| return 0; | |||
| } | |||
| @@ -937,6 +932,7 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch) | |||
| static int imc_decode_frame(AVCodecContext *avctx, void *data, | |||
| int *got_frame_ptr, AVPacket *avpkt) | |||
| { | |||
| AVFrame *frame = data; | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| int ret, i; | |||
| @@ -951,14 +947,14 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| /* get output buffer */ | |||
| q->frame.nb_samples = COEFFS; | |||
| if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) { | |||
| frame->nb_samples = COEFFS; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| for (i = 0; i < avctx->channels; i++) { | |||
| q->out_samples = (float *)q->frame.extended_data[i]; | |||
| q->out_samples = (float *)frame->extended_data[i]; | |||
| q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2); | |||
| @@ -971,12 +967,11 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data, | |||
| } | |||
| if (avctx->channels == 2) { | |||
| q->fdsp.butterflies_float((float *)q->frame.extended_data[0], | |||
| (float *)q->frame.extended_data[1], COEFFS); | |||
| q->fdsp.butterflies_float((float *)frame->extended_data[0], | |||
| (float *)frame->extended_data[1], COEFFS); | |||
| } | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = q->frame; | |||
| *got_frame_ptr = 1; | |||
| return IMC_BLOCK_SIZE * avctx->channels; | |||
| } | |||
| @@ -41,7 +41,6 @@ static int get_mode(AVCodecContext *avctx) | |||
| typedef struct ILBCDecContext { | |||
| const AVClass *class; | |||
| AVFrame frame; | |||
| iLBC_Dec_Inst_t decoder; | |||
| int enhance; | |||
| } ILBCDecContext; | |||
| @@ -69,8 +68,6 @@ static av_cold int ilbc_decode_init(AVCodecContext *avctx) | |||
| } | |||
| WebRtcIlbcfix_InitDecode(&s->decoder, mode, s->enhance); | |||
| avcodec_get_frame_defaults(&s->frame); | |||
| avctx->coded_frame = &s->frame; | |||
| avctx->channels = 1; | |||
| avctx->channel_layout = AV_CH_LAYOUT_MONO; | |||
| @@ -86,6 +83,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data, | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| ILBCDecContext *s = avctx->priv_data; | |||
| AVFrame *frame = data; | |||
| int ret; | |||
| if (s->decoder.no_of_bytes > buf_size) { | |||
| @@ -94,17 +92,16 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data, | |||
| return AVERROR_INVALIDDATA; | |||
| } | |||
| s->frame.nb_samples = s->decoder.blockl; | |||
| if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
| frame->nb_samples = s->decoder.blockl; | |||
| if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
| av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
| return ret; | |||
| } | |||
| WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) s->frame.data[0], | |||
| WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0], | |||
| (const WebRtc_UWord16*) buf, &s->decoder, 1); | |||
| *got_frame_ptr = 1; | |||
| *(AVFrame *)data = s->frame; | |||
| *got_frame_ptr = 1; | |||
| return s->decoder.no_of_bytes; | |||
| } | |||