* commit '5d5c248c3df30fa91a8dde639618c985b9a11c53': s302m: decode directly to the user-provided AVFrame ra288: decode directly to the user-provided AVFrame ra144: decode directly to the user-provided AVFrame ralf: decode directly to the user-provided AVFrame qdm2: decode directly to the user-provided AVFrame Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n1.2
@@ -130,8 +130,6 @@ typedef struct { | |||
* QDM2 decoder context | |||
*/ | |||
typedef struct { | |||
AVFrame frame; | |||
/// Parameters from codec header, do not change during playback | |||
int nb_channels; ///< number of channels | |||
int channels; ///< number of channels | |||
@@ -1879,9 +1877,6 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx) | |||
avctx->sample_fmt = AV_SAMPLE_FMT_S16; | |||
avcodec_get_frame_defaults(&s->frame); | |||
avctx->coded_frame = &s->frame; | |||
return 0; | |||
} | |||
@@ -1962,6 +1957,7 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out) | |||
static int qdm2_decode_frame(AVCodecContext *avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
QDM2Context *s = avctx->priv_data; | |||
@@ -1974,12 +1970,12 @@ static int qdm2_decode_frame(AVCodecContext *avctx, void *data, | |||
return -1; | |||
/* get output buffer */ | |||
s->frame.nb_samples = 16 * s->frame_size; | |||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
frame->nb_samples = 16 * s->frame_size; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
out = (int16_t *)s->frame.data[0]; | |||
out = (int16_t *)frame->data[0]; | |||
for (i = 0; i < 16; i++) { | |||
if (qdm2_decode(s, buf, out) < 0) | |||
@@ -1987,8 +1983,7 @@ static int qdm2_decode_frame(AVCodecContext *avctx, void *data, | |||
out += s->channels * s->frame_size; | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = s->frame; | |||
*got_frame_ptr = 1; | |||
return s->checksum_size; | |||
} | |||
@@ -35,7 +35,6 @@ | |||
typedef struct RA144Context { | |||
AVCodecContext *avctx; | |||
AVFrame frame; | |||
LPCContext lpc_ctx; | |||
AudioFrameQueue afq; | |||
int last_frame; | |||
@@ -43,9 +43,6 @@ static av_cold int ra144_decode_init(AVCodecContext * avctx) | |||
avctx->channel_layout = AV_CH_LAYOUT_MONO; | |||
avctx->sample_fmt = AV_SAMPLE_FMT_S16; | |||
avcodec_get_frame_defaults(&ractx->frame); | |||
avctx->coded_frame = &ractx->frame; | |||
return 0; | |||
} | |||
@@ -65,6 +62,7 @@ static void do_output_subblock(RA144Context *ractx, const uint16_t *lpc_coefs, | |||
static int ra144_decode_frame(AVCodecContext * avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
static const uint8_t sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2}; | |||
@@ -80,12 +78,12 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *data, | |||
GetBitContext gb; | |||
/* get output buffer */ | |||
ractx->frame.nb_samples = NBLOCKS * BLOCKSIZE; | |||
if ((ret = ff_get_buffer(avctx, &ractx->frame)) < 0) { | |||
frame->nb_samples = NBLOCKS * BLOCKSIZE; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
samples = (int16_t *)ractx->frame.data[0]; | |||
samples = (int16_t *)frame->data[0]; | |||
if(buf_size < FRAMESIZE) { | |||
av_log(avctx, AV_LOG_ERROR, | |||
@@ -124,8 +122,7 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *data, | |||
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = ractx->frame; | |||
*got_frame_ptr = 1; | |||
return FRAMESIZE; | |||
} | |||
@@ -38,7 +38,6 @@ | |||
#define RA288_BLOCKS_PER_FRAME 32 | |||
typedef struct { | |||
AVFrame frame; | |||
AVFloatDSPContext fdsp; | |||
DECLARE_ALIGNED(32, float, sp_lpc)[FFALIGN(36, 16)]; ///< LPC coefficients for speech data (spec: A) | |||
DECLARE_ALIGNED(32, float, gain_lpc)[FFALIGN(10, 16)]; ///< LPC coefficients for gain (spec: GB) | |||
@@ -75,9 +74,6 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx) | |||
avpriv_float_dsp_init(&ractx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT); | |||
avcodec_get_frame_defaults(&ractx->frame); | |||
avctx->coded_frame = &ractx->frame; | |||
return 0; | |||
} | |||
@@ -185,6 +181,7 @@ static void backward_filter(RA288Context *ractx, | |||
static int ra288_decode_frame(AVCodecContext * avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
float *out; | |||
@@ -200,12 +197,12 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data, | |||
} | |||
/* get output buffer */ | |||
ractx->frame.nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME; | |||
if ((ret = ff_get_buffer(avctx, &ractx->frame)) < 0) { | |||
frame->nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
out = (float *)ractx->frame.data[0]; | |||
out = (float *)frame->data[0]; | |||
init_get_bits(&gb, buf, avctx->block_align * 8); | |||
@@ -227,8 +224,7 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data, | |||
} | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = ractx->frame; | |||
*got_frame_ptr = 1; | |||
return avctx->block_align; | |||
} | |||
@@ -49,8 +49,6 @@ typedef struct VLCSet { | |||
#define RALF_MAX_PKT_SIZE 8192 | |||
typedef struct RALFContext { | |||
AVFrame frame; | |||
int version; | |||
int max_frame_size; | |||
VLCSet sets[3]; | |||
@@ -154,9 +152,6 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO | |||
: AV_CH_LAYOUT_MONO; | |||
avcodec_get_frame_defaults(&ctx->frame); | |||
avctx->coded_frame = &ctx->frame; | |||
ctx->max_frame_size = AV_RB32(avctx->extradata + 16); | |||
if (ctx->max_frame_size > (1 << 20) || !ctx->max_frame_size) { | |||
av_log(avctx, AV_LOG_ERROR, "invalid frame size %d\n", | |||
@@ -426,6 +421,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, | |||
AVPacket *avpkt) | |||
{ | |||
RALFContext *ctx = avctx->priv_data; | |||
AVFrame *frame = data; | |||
int16_t *samples0; | |||
int16_t *samples1; | |||
int ret; | |||
@@ -463,13 +459,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, | |||
src_size = avpkt->size; | |||
} | |||
ctx->frame.nb_samples = ctx->max_frame_size; | |||
if ((ret = ff_get_buffer(avctx, &ctx->frame)) < 0) { | |||
frame->nb_samples = ctx->max_frame_size; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "Me fail get_buffer()? That's unpossible!\n"); | |||
return ret; | |||
} | |||
samples0 = (int16_t *)ctx->frame.data[0]; | |||
samples1 = (int16_t *)ctx->frame.data[1]; | |||
samples0 = (int16_t *)frame->data[0]; | |||
samples1 = (int16_t *)frame->data[1]; | |||
if (src_size < 5) { | |||
av_log(avctx, AV_LOG_ERROR, "too short packets are too short!\n"); | |||
@@ -511,9 +507,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, | |||
bytes_left -= ctx->block_size[i]; | |||
} | |||
ctx->frame.nb_samples = ctx->sample_offset; | |||
*got_frame_ptr = ctx->sample_offset > 0; | |||
*(AVFrame*)data = ctx->frame; | |||
frame->nb_samples = ctx->sample_offset; | |||
*got_frame_ptr = ctx->sample_offset > 0; | |||
return avpkt->size; | |||
} | |||
@@ -28,10 +28,6 @@ | |||
#define AES3_HEADER_LEN 4 | |||
typedef struct S302MDecodeContext { | |||
AVFrame frame; | |||
} S302MDecodeContext; | |||
static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, | |||
int buf_size) | |||
{ | |||
@@ -95,7 +91,7 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, | |||
static int s302m_decode_frame(AVCodecContext *avctx, void *data, | |||
int *got_frame_ptr, AVPacket *avpkt) | |||
{ | |||
S302MDecodeContext *s = avctx->priv_data; | |||
AVFrame *frame = data; | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
int block_size, ret; | |||
@@ -109,16 +105,16 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, | |||
/* get output buffer */ | |||
block_size = (avctx->bits_per_coded_sample + 4) / 4; | |||
s->frame.nb_samples = 2 * (buf_size / block_size) / avctx->channels; | |||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { | |||
frame->nb_samples = 2 * (buf_size / block_size) / avctx->channels; | |||
if ((ret = ff_get_buffer(avctx, frame)) < 0) { | |||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||
return ret; | |||
} | |||
buf_size = (s->frame.nb_samples * avctx->channels / 2) * block_size; | |||
buf_size = (frame->nb_samples * avctx->channels / 2) * block_size; | |||
if (avctx->bits_per_coded_sample == 24) { | |||
uint32_t *o = (uint32_t *)s->frame.data[0]; | |||
uint32_t *o = (uint32_t *)frame->data[0]; | |||
for (; buf_size > 6; buf_size -= 7) { | |||
*o++ = (ff_reverse[buf[2]] << 24) | | |||
(ff_reverse[buf[1]] << 16) | | |||
@@ -130,7 +126,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, | |||
buf += 7; | |||
} | |||
} else if (avctx->bits_per_coded_sample == 20) { | |||
uint32_t *o = (uint32_t *)s->frame.data[0]; | |||
uint32_t *o = (uint32_t *)frame->data[0]; | |||
for (; buf_size > 5; buf_size -= 6) { | |||
*o++ = (ff_reverse[buf[2] & 0xf0] << 28) | | |||
(ff_reverse[buf[1]] << 20) | | |||
@@ -141,7 +137,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, | |||
buf += 6; | |||
} | |||
} else { | |||
uint16_t *o = (uint16_t *)s->frame.data[0]; | |||
uint16_t *o = (uint16_t *)frame->data[0]; | |||
for (; buf_size > 4; buf_size -= 5) { | |||
*o++ = (ff_reverse[buf[1]] << 8) | | |||
ff_reverse[buf[0]]; | |||
@@ -152,29 +148,15 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, | |||
} | |||
} | |||
*got_frame_ptr = 1; | |||
*(AVFrame *)data = s->frame; | |||
*got_frame_ptr = 1; | |||
return avpkt->size; | |||
} | |||
static int s302m_decode_init(AVCodecContext *avctx) | |||
{ | |||
S302MDecodeContext *s = avctx->priv_data; | |||
avcodec_get_frame_defaults(&s->frame); | |||
avctx->coded_frame = &s->frame; | |||
return 0; | |||
} | |||
AVCodec ff_s302m_decoder = { | |||
.name = "s302m", | |||
.type = AVMEDIA_TYPE_AUDIO, | |||
.id = AV_CODEC_ID_S302M, | |||
.priv_data_size = sizeof(S302MDecodeContext), | |||
.init = s302m_decode_init, | |||
.decode = s302m_decode_frame, | |||
.capabilities = CODEC_CAP_DR1, | |||
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"), | |||