* commit '1b9b6d6e5ea556b6d307f9d473f54f6406fdc3c8': qcelp: decode directly to the user-provided AVFrame pcm-bluray: decode directly to the user-provided AVFrame nellymoser: decode directly to the user-provided AVFrame mpc7/8: decode directly to the user-provided AVFrame mpegaudio: decode directly to the user-provided AVFrame mlp/truehd: decode directly to the user-provided AVFrame Conflicts: libavcodec/mpc7.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n1.2
@@ -118,7 +118,6 @@ typedef struct SubStream { | |||
typedef struct MLPDecodeContext { | |||
AVCodecContext *avctx; | |||
AVFrame frame; | |||
/// Current access unit being read has a major sync. | |||
int is_major_sync_unit; | |||
@@ -271,9 +270,6 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx) | |||
m->substream[substr].lossless_check_data = 0xffffffff; | |||
ff_mlpdsp_init(&m->dsp); | |||
avcodec_get_frame_defaults(&m->frame); | |||
avctx->coded_frame = &m->frame; | |||
return 0; | |||
} | |||
@@ -1005,7 +1001,7 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr) | |||
/** Write the audio data into the output buffer. */ | |||
static int output_data(MLPDecodeContext *m, unsigned int substr, | |||
void *data, int *got_frame_ptr) | |||
AVFrame *frame, int *got_frame_ptr) | |||
{ | |||
AVCodecContext *avctx = m->avctx; | |||
SubStream *s = &m->substream[substr]; | |||
@@ -1021,13 +1017,13 @@ static int output_data(MLPDecodeContext *m, unsigned int substr, | |||
} | |||
/* get output buffer */ | |||
m->frame.nb_samples = s->blockpos; | |||
if ((ret = ff_get_buffer(avctx, &m->frame)) < 0) { | |||
frame->nb_samples = s->blockpos; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
data_32 = (int32_t *)m->frame.data[0]; | |||
data_16 = (int16_t *)m->frame.data[0]; | |||
data_32 = (int32_t *)frame->data[0]; | |||
data_16 = (int16_t *)frame->data[0]; | |||
for (i = 0; i < s->blockpos; i++) { | |||
for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) { | |||
@@ -1040,8 +1036,7 @@ static int output_data(MLPDecodeContext *m, unsigned int substr, | |||
} | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = m->frame; | |||
*got_frame_ptr = 1; | |||
return 0; | |||
} | |||
@@ -50,7 +50,6 @@ typedef struct Band { | |||
}Band; | |||
typedef struct MPCContext { | |||
AVFrame frame; | |||
DSPContext dsp; | |||
MPADSPContext mpadsp; | |||
GetBitContext gb; | |||
@@ -95,9 +95,6 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx) | |||
avctx->sample_fmt = AV_SAMPLE_FMT_S16P; | |||
avctx->channel_layout = AV_CH_LAYOUT_STEREO; | |||
avcodec_get_frame_defaults(&c->frame); | |||
avctx->coded_frame = &c->frame; | |||
if(vlc_initialized) return 0; | |||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n"); | |||
scfi_vlc.table = scfi_table; | |||
@@ -197,6 +194,7 @@ static int get_scale_idx(GetBitContext *gb, int ref) | |||
static int mpc7_decode_frame(AVCodecContext * avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size; | |||
MPCContext *c = avctx->priv_data; | |||
@@ -226,8 +224,8 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data, | |||
buf_size -= 4; | |||
/* get output buffer */ | |||
c->frame.nb_samples = MPC_FRAME_SIZE; | |||
if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) { | |||
frame->nb_samples = MPC_FRAME_SIZE; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
@@ -295,9 +293,9 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data, | |||
for(ch = 0; ch < 2; ch++) | |||
idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off); | |||
ff_mpc_dequantize_and_synth(c, mb, (int16_t **)c->frame.extended_data, 2); | |||
ff_mpc_dequantize_and_synth(c, mb, (int16_t **)frame->extended_data, 2); | |||
if(last_frame) | |||
c->frame.nb_samples = c->lastframelen; | |||
frame->nb_samples = c->lastframelen; | |||
bits_used = get_bits_count(&gb); | |||
bits_avail = buf_size * 8; | |||
@@ -311,8 +309,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data, | |||
return avpkt->size; | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = c->frame; | |||
*got_frame_ptr = 1; | |||
return avpkt->size; | |||
} | |||
@@ -144,9 +144,6 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx) | |||
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; | |||
avctx->channels = channels; | |||
avcodec_get_frame_defaults(&c->frame); | |||
avctx->coded_frame = &c->frame; | |||
if(vlc_initialized) return 0; | |||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n"); | |||
@@ -244,6 +241,7 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx) | |||
static int mpc8_decode_frame(AVCodecContext * avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
MPCContext *c = avctx->priv_data; | |||
@@ -255,8 +253,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data, | |||
int last[2]; | |||
/* get output buffer */ | |||
c->frame.nb_samples = MPC_FRAME_SIZE; | |||
if ((res = ff_get_buffer(avctx, &c->frame)) < 0) { | |||
frame->nb_samples = MPC_FRAME_SIZE; | |||
if ((res = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return res; | |||
} | |||
@@ -415,7 +413,7 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data, | |||
} | |||
ff_mpc_dequantize_and_synth(c, maxband - 1, | |||
(int16_t **)c->frame.extended_data, | |||
(int16_t **)frame->extended_data, | |||
avctx->channels); | |||
c->cur_frame++; | |||
@@ -426,8 +424,7 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data, | |||
if(c->cur_frame >= c->frames) | |||
c->cur_frame = 0; | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = c->frame; | |||
*got_frame_ptr = 1; | |||
return c->cur_frame ? c->last_bits_used >> 3 : buf_size; | |||
} | |||
@@ -86,7 +86,7 @@ typedef struct MPADecodeContext { | |||
AVCodecContext* avctx; | |||
MPADSPContext mpadsp; | |||
AVFloatDSPContext fdsp; | |||
AVFrame frame; | |||
AVFrame *frame; | |||
} MPADecodeContext; | |||
#if CONFIG_FLOAT | |||
@@ -455,9 +455,6 @@ static av_cold int decode_init(AVCodecContext * avctx) | |||
if (avctx->codec_id == AV_CODEC_ID_MP3ADU) | |||
s->adu_mode = 1; | |||
avcodec_get_frame_defaults(&s->frame); | |||
avctx->coded_frame = &s->frame; | |||
return 0; | |||
} | |||
@@ -1630,12 +1627,13 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples, | |||
/* get output buffer */ | |||
if (!samples) { | |||
s->frame.nb_samples = s->avctx->frame_size; | |||
if ((ret = ff_get_buffer(s->avctx, &s->frame)) < 0) { | |||
av_assert0(s->frame != NULL); | |||
s->frame->nb_samples = s->avctx->frame_size; | |||
if ((ret = ff_get_buffer(s->avctx, s->frame)) < 0) { | |||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
samples = (OUT_INT **)s->frame.extended_data; | |||
samples = (OUT_INT **)s->frame->extended_data; | |||
} | |||
/* apply the synthesis filter */ | |||
@@ -1707,11 +1705,13 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr, | |||
buf_size= s->frame_size; | |||
} | |||
s->frame = data; | |||
ret = mp_decode_frame(s, NULL, buf, buf_size); | |||
if (ret >= 0) { | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = s->frame; | |||
avctx->sample_rate = s->sample_rate; | |||
s->frame->nb_samples = avctx->frame_size; | |||
*got_frame_ptr = 1; | |||
avctx->sample_rate = s->sample_rate; | |||
//FIXME maybe move the other codec info stuff from above here too | |||
} else { | |||
av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n"); | |||
@@ -1779,14 +1779,15 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, | |||
s->frame_size = len; | |||
s->frame = data; | |||
ret = mp_decode_frame(s, NULL, buf, buf_size); | |||
if (ret < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n"); | |||
return ret; | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = s->frame; | |||
*got_frame_ptr = 1; | |||
return buf_size; | |||
} | |||
@@ -1798,7 +1799,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, | |||
* Context for MP3On4 decoder | |||
*/ | |||
typedef struct MP3On4DecodeContext { | |||
AVFrame *frame; | |||
int frames; ///< number of mp3 frames per block (number of mp3 decoder instances) | |||
int syncword; ///< syncword patch | |||
const uint8_t *coff; ///< channel offsets in output buffer | |||
@@ -1887,7 +1887,6 @@ static int decode_init_mp3on4(AVCodecContext * avctx) | |||
// Put decoder context in place to make init_decode() happy | |||
avctx->priv_data = s->mp3decctx[0]; | |||
decode_init(avctx); | |||
s->frame = avctx->coded_frame; | |||
// Restore mp3on4 context pointer | |||
avctx->priv_data = s; | |||
s->mp3decctx[0]->adu_mode = 1; // Set adu mode | |||
@@ -1924,6 +1923,7 @@ static void flush_mp3on4(AVCodecContext *avctx) | |||
static int decode_frame_mp3on4(AVCodecContext *avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
MP3On4DecodeContext *s = avctx->priv_data; | |||
@@ -1935,12 +1935,12 @@ static int decode_frame_mp3on4(AVCodecContext *avctx, void *data, | |||
int fr, ch, ret; | |||
/* get output buffer */ | |||
s->frame->nb_samples = MPA_FRAME_SIZE; | |||
if ((ret = ff_get_buffer(avctx, s->frame)) < 0) { | |||
frame->nb_samples = MPA_FRAME_SIZE; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
out_samples = (OUT_INT **)s->frame->extended_data; | |||
out_samples = (OUT_INT **)frame->extended_data; | |||
// Discard too short frames | |||
if (buf_size < HEADER_SIZE) | |||
@@ -1990,9 +1990,8 @@ static int decode_frame_mp3on4(AVCodecContext *avctx, void *data, | |||
/* update codec info */ | |||
avctx->sample_rate = s->mp3decctx[0]->sample_rate; | |||
s->frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT)); | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = *s->frame; | |||
frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT)); | |||
*got_frame_ptr = 1; | |||
return buf_size; | |||
} | |||
@@ -49,7 +49,6 @@ | |||
typedef struct NellyMoserDecodeContext { | |||
AVCodecContext* avctx; | |||
AVFrame frame; | |||
AVLFG random_state; | |||
GetBitContext gb; | |||
float scale_bias; | |||
@@ -136,15 +135,13 @@ static av_cold int decode_init(AVCodecContext * avctx) { | |||
avctx->channels = 1; | |||
avctx->channel_layout = AV_CH_LAYOUT_MONO; | |||
avcodec_get_frame_defaults(&s->frame); | |||
avctx->coded_frame = &s->frame; | |||
return 0; | |||
} | |||
static int decode_tag(AVCodecContext *avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
const uint8_t *side=av_packet_get_side_data(avpkt, 'F', NULL); | |||
int buf_size = avpkt->size; | |||
@@ -174,12 +171,12 @@ static int decode_tag(AVCodecContext *avctx, void *data, | |||
avctx->sample_rate= 11025*(blocks/2); | |||
/* get output buffer */ | |||
s->frame.nb_samples = NELLY_SAMPLES * blocks; | |||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
frame->nb_samples = NELLY_SAMPLES * blocks; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
samples_flt = (float *)s->frame.data[0]; | |||
samples_flt = (float *)frame->data[0]; | |||
for (i=0 ; i<blocks ; i++) { | |||
nelly_decode_block(s, buf, samples_flt); | |||
@@ -187,8 +184,7 @@ static int decode_tag(AVCodecContext *avctx, void *data, | |||
buf += NELLY_BLOCK_LEN; | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = s->frame; | |||
*got_frame_ptr = 1; | |||
return buf_size; | |||
} | |||
@@ -122,26 +122,12 @@ static int pcm_bluray_parse_header(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
typedef struct PCMBRDecode { | |||
AVFrame frame; | |||
} PCMBRDecode; | |||
static av_cold int pcm_bluray_decode_init(AVCodecContext * avctx) | |||
{ | |||
PCMBRDecode *s = avctx->priv_data; | |||
avcodec_get_frame_defaults(&s->frame); | |||
avctx->coded_frame = &s->frame; | |||
return 0; | |||
} | |||
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *src = avpkt->data; | |||
int buf_size = avpkt->size; | |||
PCMBRDecode *s = avctx->priv_data; | |||
GetByteContext gb; | |||
int num_source_channels, channel, retval; | |||
int sample_size, samples; | |||
@@ -166,13 +152,13 @@ static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data, | |||
samples = buf_size / sample_size; | |||
/* get output buffer */ | |||
s->frame.nb_samples = samples; | |||
if ((retval = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
frame->nb_samples = samples; | |||
if ((retval = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return retval; | |||
} | |||
dst16 = (int16_t *)s->frame.data[0]; | |||
dst32 = (int32_t *)s->frame.data[0]; | |||
dst16 = (int16_t *)frame->data[0]; | |||
dst32 = (int32_t *)frame->data[0]; | |||
if (samples) { | |||
switch (avctx->channel_layout) { | |||
@@ -306,8 +292,7 @@ static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data, | |||
} | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = s->frame; | |||
*got_frame_ptr = 1; | |||
retval = bytestream2_tell(&gb); | |||
if (avctx->debug & FF_DEBUG_BITSTREAM) | |||
@@ -320,8 +305,6 @@ AVCodec ff_pcm_bluray_decoder = { | |||
.name = "pcm_bluray", | |||
.type = AVMEDIA_TYPE_AUDIO, | |||
.id = AV_CODEC_ID_PCM_BLURAY, | |||
.priv_data_size = sizeof(PCMBRDecode), | |||
.init = pcm_bluray_decode_init, | |||
.decode = pcm_bluray_decode_frame, | |||
.capabilities = CODEC_CAP_DR1, | |||
.sample_fmts = (const enum AVSampleFormat[]){ | |||
@@ -53,7 +53,6 @@ typedef enum { | |||
} qcelp_packet_rate; | |||
typedef struct { | |||
AVFrame avframe; | |||
GetBitContext gb; | |||
qcelp_packet_rate bitrate; | |||
QCELPFrame frame; /**< unpacked data frame */ | |||
@@ -97,9 +96,6 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx) | |||
for (i = 0; i < 10; i++) | |||
q->prev_lspf[i] = (i + 1) / 11.; | |||
avcodec_get_frame_defaults(&q->avframe); | |||
avctx->coded_frame = &q->avframe; | |||
return 0; | |||
} | |||
@@ -690,6 +686,7 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data, | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
QCELPContext *q = avctx->priv_data; | |||
AVFrame *frame = data; | |||
float *outbuffer; | |||
int i, ret; | |||
float quantized_lspf[10], lpc[10]; | |||
@@ -697,12 +694,12 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data, | |||
float *formant_mem; | |||
/* get output buffer */ | |||
q->avframe.nb_samples = 160; | |||
if ((ret = ff_get_buffer(avctx, &q->avframe)) < 0) { | |||
frame->nb_samples = 160; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
outbuffer = (float *)q->avframe.data[0]; | |||
outbuffer = (float *)frame->data[0]; | |||
if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) { | |||
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined."); | |||
@@ -785,8 +782,7 @@ erasure: | |||
memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf)); | |||
q->prev_bitrate = q->bitrate; | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = q->avframe; | |||
*got_frame_ptr = 1; | |||
return buf_size; | |||
} | |||