* qatar/master: adpcm: split ADPCM encoders and decoders into separate files. doc/avconv: fix typo. rv34: check that subsequent slices have the same type as first one. smacker demuxer: handle possible av_realloc() failure. lavfi: add split filter from soc. lavfi: add showinfo filter libxavs: add private options corresponding to deprecated global options Conflicts: Changelog libavcodec/adpcm.c libavfilter/avfilter.h libavfilter/vf_showinfo.c libavfilter/vf_split.c libavformat/smacker.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.9
| @@ -30,7 +30,7 @@ As a general rule, options are applied to the next specified | |||||
| file. Therefore, order is important, and you can have the same | file. Therefore, order is important, and you can have the same | ||||
| option on the command line multiple times. Each occurrence is | option on the command line multiple times. Each occurrence is | ||||
| then applied to the next input or output file. | then applied to the next input or output file. | ||||
| Exceptions from this rule are the global options (e.g. vebosity level), | |||||
| Exceptions from this rule are the global options (e.g. verbosity level), | |||||
| which should be specified first. | which should be specified first. | ||||
| @itemize | @itemize | ||||
| @@ -499,10 +499,10 @@ OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o | |||||
| OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o | OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o | ||||
| OBJS-$(CONFIG_PCM_ZORK_ENCODER) += pcm.o | OBJS-$(CONFIG_PCM_ZORK_ENCODER) += pcm.o | ||||
| OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o | OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o | ||||
| OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o | OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o | ||||
| OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o | ||||
| @@ -513,29 +513,29 @@ OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o | |||||
| OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o | OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o | ||||
| OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o | OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o | ||||
| OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o | OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o | ||||
| OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcmenc.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcmenc.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcmenc.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o | OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o | ||||
| OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcm.o | |||||
| OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o | |||||
| OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o | |||||
| # libavformat dependencies | # libavformat dependencies | ||||
| OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o | OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o | ||||
| @@ -1,5 +1,4 @@ | |||||
| /* | /* | ||||
| * ADPCM codecs | |||||
| * Copyright (c) 2001-2003 The ffmpeg Project | * Copyright (c) 2001-2003 The ffmpeg Project | ||||
| * | * | ||||
| * This file is part of FFmpeg. | * This file is part of FFmpeg. | ||||
| @@ -22,10 +21,12 @@ | |||||
| #include "get_bits.h" | #include "get_bits.h" | ||||
| #include "put_bits.h" | #include "put_bits.h" | ||||
| #include "bytestream.h" | #include "bytestream.h" | ||||
| #include "adpcm.h" | |||||
| #include "adpcm_data.h" | |||||
| /** | /** | ||||
| * @file | * @file | ||||
| * ADPCM codecs. | |||||
| * ADPCM decoders | |||||
| * First version by Francois Revol (revol@free.fr) | * First version by Francois Revol (revol@free.fr) | ||||
| * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood) | * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood) | ||||
| * by Mike Melanson (melanson@pcisys.net) | * by Mike Melanson (melanson@pcisys.net) | ||||
| @@ -54,48 +55,6 @@ | |||||
| * readstr http://www.geocities.co.jp/Playtown/2004/ | * readstr http://www.geocities.co.jp/Playtown/2004/ | ||||
| */ | */ | ||||
| #define BLKSIZE 1024 | |||||
| /* step_table[] and index_table[] are from the ADPCM reference source */ | |||||
| /* This is the index table: */ | |||||
| static const int index_table[16] = { | |||||
| -1, -1, -1, -1, 2, 4, 6, 8, | |||||
| -1, -1, -1, -1, 2, 4, 6, 8, | |||||
| }; | |||||
| /** | |||||
| * This is the step table. Note that many programs use slight deviations from | |||||
| * this table, but such deviations are negligible: | |||||
| */ | |||||
| static const int step_table[89] = { | |||||
| 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, | |||||
| 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, | |||||
| 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, | |||||
| 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, | |||||
| 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, | |||||
| 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, | |||||
| 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, | |||||
| 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, | |||||
| 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 | |||||
| }; | |||||
| /* These are for MS-ADPCM */ | |||||
| /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */ | |||||
| static const int AdaptationTable[] = { | |||||
| 230, 230, 230, 230, 307, 409, 512, 614, | |||||
| 768, 614, 512, 409, 307, 230, 230, 230 | |||||
| }; | |||||
| /** Divided by 4 to fit in 8-bit integers */ | |||||
| static const uint8_t AdaptCoeff1[] = { | |||||
| 64, 128, 0, 48, 60, 115, 98 | |||||
| }; | |||||
| /** Divided by 4 to fit in 8-bit integers */ | |||||
| static const int8_t AdaptCoeff2[] = { | |||||
| 0, -64, 0, 16, 0, -52, -58 | |||||
| }; | |||||
| /* These are for CD-ROM XA ADPCM */ | /* These are for CD-ROM XA ADPCM */ | ||||
| static const int xa_adpcm_table[5][2] = { | static const int xa_adpcm_table[5][2] = { | ||||
| { 0, 0 }, | { 0, 0 }, | ||||
| @@ -118,668 +77,15 @@ static const int swf_index_tables[4][16] = { | |||||
| /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } | /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } | ||||
| }; | }; | ||||
| static const int yamaha_indexscale[] = { | |||||
| 230, 230, 230, 230, 307, 409, 512, 614, | |||||
| 230, 230, 230, 230, 307, 409, 512, 614 | |||||
| }; | |||||
| static const int yamaha_difflookup[] = { | |||||
| 1, 3, 5, 7, 9, 11, 13, 15, | |||||
| -1, -3, -5, -7, -9, -11, -13, -15 | |||||
| }; | |||||
| /* end of tables */ | /* end of tables */ | ||||
| typedef struct ADPCMChannelStatus { | |||||
| int predictor; | |||||
| short int step_index; | |||||
| int step; | |||||
| /* for encoding */ | |||||
| int prev_sample; | |||||
| /* MS version */ | |||||
| short sample1; | |||||
| short sample2; | |||||
| int coeff1; | |||||
| int coeff2; | |||||
| int idelta; | |||||
| } ADPCMChannelStatus; | |||||
| typedef struct TrellisPath { | |||||
| int nibble; | |||||
| int prev; | |||||
| } TrellisPath; | |||||
| typedef struct TrellisNode { | |||||
| uint32_t ssd; | |||||
| int path; | |||||
| int sample1; | |||||
| int sample2; | |||||
| int step; | |||||
| } TrellisNode; | |||||
| typedef struct ADPCMContext { | |||||
| typedef struct ADPCMDecodeContext { | |||||
| ADPCMChannelStatus status[6]; | ADPCMChannelStatus status[6]; | ||||
| TrellisPath *paths; | |||||
| TrellisNode *node_buf; | |||||
| TrellisNode **nodep_buf; | |||||
| uint8_t *trellis_hash; | |||||
| } ADPCMContext; | |||||
| #define FREEZE_INTERVAL 128 | |||||
| /* XXX: implement encoding */ | |||||
| #if CONFIG_ENCODERS | |||||
| static av_cold int adpcm_encode_init(AVCodecContext *avctx) | |||||
| { | |||||
| ADPCMContext *s = avctx->priv_data; | |||||
| uint8_t *extradata; | |||||
| int i; | |||||
| if (avctx->channels > 2) | |||||
| return -1; /* only stereo or mono =) */ | |||||
| if(avctx->trellis && (unsigned)avctx->trellis > 16U){ | |||||
| av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); | |||||
| return -1; | |||||
| } | |||||
| if (avctx->trellis) { | |||||
| int frontier = 1 << avctx->trellis; | |||||
| int max_paths = frontier * FREEZE_INTERVAL; | |||||
| FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); | |||||
| FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); | |||||
| FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); | |||||
| FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); | |||||
| } | |||||
| switch(avctx->codec->id) { | |||||
| case CODEC_ID_ADPCM_IMA_WAV: | |||||
| avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ | |||||
| /* and we have 4 bytes per channel overhead */ | |||||
| avctx->block_align = BLKSIZE; | |||||
| avctx->bits_per_coded_sample = 4; | |||||
| /* seems frame_size isn't taken into account... have to buffer the samples :-( */ | |||||
| break; | |||||
| case CODEC_ID_ADPCM_IMA_QT: | |||||
| avctx->frame_size = 64; | |||||
| avctx->block_align = 34 * avctx->channels; | |||||
| break; | |||||
| case CODEC_ID_ADPCM_MS: | |||||
| avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ | |||||
| /* and we have 7 bytes per channel overhead */ | |||||
| avctx->block_align = BLKSIZE; | |||||
| avctx->bits_per_coded_sample = 4; | |||||
| avctx->extradata_size = 32; | |||||
| extradata = avctx->extradata = av_malloc(avctx->extradata_size); | |||||
| if (!extradata) | |||||
| return AVERROR(ENOMEM); | |||||
| bytestream_put_le16(&extradata, avctx->frame_size); | |||||
| bytestream_put_le16(&extradata, 7); /* wNumCoef */ | |||||
| for (i = 0; i < 7; i++) { | |||||
| bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4); | |||||
| bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4); | |||||
| } | |||||
| break; | |||||
| case CODEC_ID_ADPCM_YAMAHA: | |||||
| avctx->frame_size = BLKSIZE * avctx->channels; | |||||
| avctx->block_align = BLKSIZE; | |||||
| break; | |||||
| case CODEC_ID_ADPCM_SWF: | |||||
| if (avctx->sample_rate != 11025 && | |||||
| avctx->sample_rate != 22050 && | |||||
| avctx->sample_rate != 44100) { | |||||
| av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); | |||||
| goto error; | |||||
| } | |||||
| avctx->frame_size = 512 * (avctx->sample_rate / 11025); | |||||
| break; | |||||
| default: | |||||
| goto error; | |||||
| } | |||||
| avctx->coded_frame= avcodec_alloc_frame(); | |||||
| avctx->coded_frame->key_frame= 1; | |||||
| return 0; | |||||
| error: | |||||
| av_freep(&s->paths); | |||||
| av_freep(&s->node_buf); | |||||
| av_freep(&s->nodep_buf); | |||||
| av_freep(&s->trellis_hash); | |||||
| return -1; | |||||
| } | |||||
| static av_cold int adpcm_encode_close(AVCodecContext *avctx) | |||||
| { | |||||
| ADPCMContext *s = avctx->priv_data; | |||||
| av_freep(&avctx->coded_frame); | |||||
| av_freep(&s->paths); | |||||
| av_freep(&s->node_buf); | |||||
| av_freep(&s->nodep_buf); | |||||
| av_freep(&s->trellis_hash); | |||||
| return 0; | |||||
| } | |||||
| static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int delta = sample - c->prev_sample; | |||||
| int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; | |||||
| c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); | |||||
| c->prev_sample = av_clip_int16(c->prev_sample); | |||||
| c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); | |||||
| return nibble; | |||||
| } | |||||
| static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int delta = sample - c->prev_sample; | |||||
| int diff, step = step_table[c->step_index]; | |||||
| int nibble = 8*(delta < 0); | |||||
| delta= abs(delta); | |||||
| diff = delta + (step >> 3); | |||||
| if (delta >= step) { | |||||
| nibble |= 4; | |||||
| delta -= step; | |||||
| } | |||||
| step >>= 1; | |||||
| if (delta >= step) { | |||||
| nibble |= 2; | |||||
| delta -= step; | |||||
| } | |||||
| step >>= 1; | |||||
| if (delta >= step) { | |||||
| nibble |= 1; | |||||
| delta -= step; | |||||
| } | |||||
| diff -= delta; | |||||
| if (nibble & 8) | |||||
| c->prev_sample -= diff; | |||||
| else | |||||
| c->prev_sample += diff; | |||||
| c->prev_sample = av_clip_int16(c->prev_sample); | |||||
| c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); | |||||
| return nibble; | |||||
| } | |||||
| static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int predictor, nibble, bias; | |||||
| predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; | |||||
| nibble= sample - predictor; | |||||
| if(nibble>=0) bias= c->idelta/2; | |||||
| else bias=-c->idelta/2; | |||||
| nibble= (nibble + bias) / c->idelta; | |||||
| nibble= av_clip(nibble, -8, 7)&0x0F; | |||||
| predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; | |||||
| c->sample2 = c->sample1; | |||||
| c->sample1 = av_clip_int16(predictor); | |||||
| c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; | |||||
| if (c->idelta < 16) c->idelta = 16; | |||||
| return nibble; | |||||
| } | |||||
| static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int nibble, delta; | |||||
| if(!c->step) { | |||||
| c->predictor = 0; | |||||
| c->step = 127; | |||||
| } | |||||
| delta = sample - c->predictor; | |||||
| nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; | |||||
| c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8); | |||||
| c->predictor = av_clip_int16(c->predictor); | |||||
| c->step = (c->step * yamaha_indexscale[nibble]) >> 8; | |||||
| c->step = av_clip(c->step, 127, 24567); | |||||
| return nibble; | |||||
| } | |||||
| static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, | |||||
| uint8_t *dst, ADPCMChannelStatus *c, int n) | |||||
| { | |||||
| //FIXME 6% faster if frontier is a compile-time constant | |||||
| ADPCMContext *s = avctx->priv_data; | |||||
| const int frontier = 1 << avctx->trellis; | |||||
| const int stride = avctx->channels; | |||||
| const int version = avctx->codec->id; | |||||
| TrellisPath *paths = s->paths, *p; | |||||
| TrellisNode *node_buf = s->node_buf; | |||||
| TrellisNode **nodep_buf = s->nodep_buf; | |||||
| TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd | |||||
| TrellisNode **nodes_next = nodep_buf + frontier; | |||||
| int pathn = 0, froze = -1, i, j, k, generation = 0; | |||||
| uint8_t *hash = s->trellis_hash; | |||||
| memset(hash, 0xff, 65536 * sizeof(*hash)); | |||||
| memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); | |||||
| nodes[0] = node_buf + frontier; | |||||
| nodes[0]->ssd = 0; | |||||
| nodes[0]->path = 0; | |||||
| nodes[0]->step = c->step_index; | |||||
| nodes[0]->sample1 = c->sample1; | |||||
| nodes[0]->sample2 = c->sample2; | |||||
| if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) | |||||
| nodes[0]->sample1 = c->prev_sample; | |||||
| if(version == CODEC_ID_ADPCM_MS) | |||||
| nodes[0]->step = c->idelta; | |||||
| if(version == CODEC_ID_ADPCM_YAMAHA) { | |||||
| if(c->step == 0) { | |||||
| nodes[0]->step = 127; | |||||
| nodes[0]->sample1 = 0; | |||||
| } else { | |||||
| nodes[0]->step = c->step; | |||||
| nodes[0]->sample1 = c->predictor; | |||||
| } | |||||
| } | |||||
| for(i=0; i<n; i++) { | |||||
| TrellisNode *t = node_buf + frontier*(i&1); | |||||
| TrellisNode **u; | |||||
| int sample = samples[i*stride]; | |||||
| int heap_pos = 0; | |||||
| memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); | |||||
| for(j=0; j<frontier && nodes[j]; j++) { | |||||
| // higher j have higher ssd already, so they're likely to yield a suboptimal next sample too | |||||
| const int range = (j < frontier/2) ? 1 : 0; | |||||
| const int step = nodes[j]->step; | |||||
| int nidx; | |||||
| if(version == CODEC_ID_ADPCM_MS) { | |||||
| const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; | |||||
| const int div = (sample - predictor) / step; | |||||
| const int nmin = av_clip(div-range, -8, 6); | |||||
| const int nmax = av_clip(div+range, -7, 7); | |||||
| for(nidx=nmin; nidx<=nmax; nidx++) { | |||||
| const int nibble = nidx & 0xf; | |||||
| int dec_sample = predictor + nidx * step; | |||||
| #define STORE_NODE(NAME, STEP_INDEX)\ | |||||
| int d;\ | |||||
| uint32_t ssd;\ | |||||
| int pos;\ | |||||
| TrellisNode *u;\ | |||||
| uint8_t *h;\ | |||||
| dec_sample = av_clip_int16(dec_sample);\ | |||||
| d = sample - dec_sample;\ | |||||
| ssd = nodes[j]->ssd + d*d;\ | |||||
| /* Check for wraparound, skip such samples completely. \ | |||||
| * Note, changing ssd to a 64 bit variable would be \ | |||||
| * simpler, avoiding this check, but it's slower on \ | |||||
| * x86 32 bit at the moment. */\ | |||||
| if (ssd < nodes[j]->ssd)\ | |||||
| goto next_##NAME;\ | |||||
| /* Collapse any two states with the same previous sample value. \ | |||||
| * One could also distinguish states by step and by 2nd to last | |||||
| * sample, but the effects of that are negligible. | |||||
| * Since nodes in the previous generation are iterated | |||||
| * through a heap, they're roughly ordered from better to | |||||
| * worse, but not strictly ordered. Therefore, an earlier | |||||
| * node with the same sample value is better in most cases | |||||
| * (and thus the current is skipped), but not strictly | |||||
| * in all cases. Only skipping samples where ssd >= | |||||
| * ssd of the earlier node with the same sample gives | |||||
| * slightly worse quality, though, for some reason. */ \ | |||||
| h = &hash[(uint16_t) dec_sample];\ | |||||
| if (*h == generation)\ | |||||
| goto next_##NAME;\ | |||||
| if (heap_pos < frontier) {\ | |||||
| pos = heap_pos++;\ | |||||
| } else {\ | |||||
| /* Try to replace one of the leaf nodes with the new \ | |||||
| * one, but try a different slot each time. */\ | |||||
| pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ | |||||
| if (ssd > nodes_next[pos]->ssd)\ | |||||
| goto next_##NAME;\ | |||||
| heap_pos++;\ | |||||
| }\ | |||||
| *h = generation;\ | |||||
| u = nodes_next[pos];\ | |||||
| if(!u) {\ | |||||
| assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\ | |||||
| u = t++;\ | |||||
| nodes_next[pos] = u;\ | |||||
| u->path = pathn++;\ | |||||
| }\ | |||||
| u->ssd = ssd;\ | |||||
| u->step = STEP_INDEX;\ | |||||
| u->sample2 = nodes[j]->sample1;\ | |||||
| u->sample1 = dec_sample;\ | |||||
| paths[u->path].nibble = nibble;\ | |||||
| paths[u->path].prev = nodes[j]->path;\ | |||||
| /* Sift the newly inserted node up in the heap to \ | |||||
| * restore the heap property. */\ | |||||
| while (pos > 0) {\ | |||||
| int parent = (pos - 1) >> 1;\ | |||||
| if (nodes_next[parent]->ssd <= ssd)\ | |||||
| break;\ | |||||
| FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ | |||||
| pos = parent;\ | |||||
| }\ | |||||
| next_##NAME:; | |||||
| STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); | |||||
| } | |||||
| } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { | |||||
| #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ | |||||
| const int predictor = nodes[j]->sample1;\ | |||||
| const int div = (sample - predictor) * 4 / STEP_TABLE;\ | |||||
| int nmin = av_clip(div-range, -7, 6);\ | |||||
| int nmax = av_clip(div+range, -6, 7);\ | |||||
| if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ | |||||
| if(nmax<0) nmax--;\ | |||||
| for(nidx=nmin; nidx<=nmax; nidx++) {\ | |||||
| const int nibble = nidx<0 ? 7-nidx : nidx;\ | |||||
| int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ | |||||
| STORE_NODE(NAME, STEP_INDEX);\ | |||||
| } | |||||
| LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); | |||||
| } else { //CODEC_ID_ADPCM_YAMAHA | |||||
| LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); | |||||
| #undef LOOP_NODES | |||||
| #undef STORE_NODE | |||||
| } | |||||
| } | |||||
| u = nodes; | |||||
| nodes = nodes_next; | |||||
| nodes_next = u; | |||||
| generation++; | |||||
| if (generation == 255) { | |||||
| memset(hash, 0xff, 65536 * sizeof(*hash)); | |||||
| generation = 0; | |||||
| } | |||||
| // prevent overflow | |||||
| if(nodes[0]->ssd > (1<<28)) { | |||||
| for(j=1; j<frontier && nodes[j]; j++) | |||||
| nodes[j]->ssd -= nodes[0]->ssd; | |||||
| nodes[0]->ssd = 0; | |||||
| } | |||||
| // merge old paths to save memory | |||||
| if(i == froze + FREEZE_INTERVAL) { | |||||
| p = &paths[nodes[0]->path]; | |||||
| for(k=i; k>froze; k--) { | |||||
| dst[k] = p->nibble; | |||||
| p = &paths[p->prev]; | |||||
| } | |||||
| froze = i; | |||||
| pathn = 0; | |||||
| // other nodes might use paths that don't coincide with the frozen one. | |||||
| // checking which nodes do so is too slow, so just kill them all. | |||||
| // this also slightly improves quality, but I don't know why. | |||||
| memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); | |||||
| } | |||||
| } | |||||
| p = &paths[nodes[0]->path]; | |||||
| for(i=n-1; i>froze; i--) { | |||||
| dst[i] = p->nibble; | |||||
| p = &paths[p->prev]; | |||||
| } | |||||
| c->predictor = nodes[0]->sample1; | |||||
| c->sample1 = nodes[0]->sample1; | |||||
| c->sample2 = nodes[0]->sample2; | |||||
| c->step_index = nodes[0]->step; | |||||
| c->step = nodes[0]->step; | |||||
| c->idelta = nodes[0]->step; | |||||
| } | |||||
| static int adpcm_encode_frame(AVCodecContext *avctx, | |||||
| unsigned char *frame, int buf_size, void *data) | |||||
| { | |||||
| int n, i, st; | |||||
| short *samples; | |||||
| unsigned char *dst; | |||||
| ADPCMContext *c = avctx->priv_data; | |||||
| uint8_t *buf; | |||||
| dst = frame; | |||||
| samples = (short *)data; | |||||
| st= avctx->channels == 2; | |||||
| /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ | |||||
| switch(avctx->codec->id) { | |||||
| case CODEC_ID_ADPCM_IMA_WAV: | |||||
| n = avctx->frame_size / 8; | |||||
| c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ | |||||
| /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ | |||||
| bytestream_put_le16(&dst, c->status[0].prev_sample); | |||||
| *dst++ = (unsigned char)c->status[0].step_index; | |||||
| *dst++ = 0; /* unknown */ | |||||
| samples++; | |||||
| if (avctx->channels == 2) { | |||||
| c->status[1].prev_sample = (signed short)samples[0]; | |||||
| /* c->status[1].step_index = 0; */ | |||||
| bytestream_put_le16(&dst, c->status[1].prev_sample); | |||||
| *dst++ = (unsigned char)c->status[1].step_index; | |||||
| *dst++ = 0; | |||||
| samples++; | |||||
| } | |||||
| /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ | |||||
| if(avctx->trellis > 0) { | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); | |||||
| if(avctx->channels == 2) | |||||
| adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); | |||||
| for(i=0; i<n; i++) { | |||||
| *dst++ = buf[8*i+0] | (buf[8*i+1] << 4); | |||||
| *dst++ = buf[8*i+2] | (buf[8*i+3] << 4); | |||||
| *dst++ = buf[8*i+4] | (buf[8*i+5] << 4); | |||||
| *dst++ = buf[8*i+6] | (buf[8*i+7] << 4); | |||||
| if (avctx->channels == 2) { | |||||
| uint8_t *buf1 = buf + n*8; | |||||
| *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); | |||||
| *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); | |||||
| *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); | |||||
| *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); | |||||
| } | |||||
| } | |||||
| av_free(buf); | |||||
| } else | |||||
| for (; n>0; n--) { | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; | |||||
| dst++; | |||||
| /* right channel */ | |||||
| if (avctx->channels == 2) { | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; | |||||
| dst++; | |||||
| } | |||||
| samples += 8 * avctx->channels; | |||||
| } | |||||
| break; | |||||
| case CODEC_ID_ADPCM_IMA_QT: | |||||
| { | |||||
| int ch, i; | |||||
| PutBitContext pb; | |||||
| init_put_bits(&pb, dst, buf_size*8); | |||||
| for(ch=0; ch<avctx->channels; ch++){ | |||||
| put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); | |||||
| put_bits(&pb, 7, c->status[ch].step_index); | |||||
| if(avctx->trellis > 0) { | |||||
| uint8_t buf[64]; | |||||
| adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); | |||||
| for(i=0; i<64; i++) | |||||
| put_bits(&pb, 4, buf[i^1]); | |||||
| } else { | |||||
| for (i=0; i<64; i+=2){ | |||||
| int t1, t2; | |||||
| t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); | |||||
| t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); | |||||
| put_bits(&pb, 4, t2); | |||||
| put_bits(&pb, 4, t1); | |||||
| } | |||||
| } | |||||
| } | |||||
| flush_put_bits(&pb); | |||||
| dst += put_bits_count(&pb)>>3; | |||||
| break; | |||||
| } | |||||
| case CODEC_ID_ADPCM_SWF: | |||||
| { | |||||
| int i; | |||||
| PutBitContext pb; | |||||
| init_put_bits(&pb, dst, buf_size*8); | |||||
| n = avctx->frame_size-1; | |||||
| //Store AdpcmCodeSize | |||||
| put_bits(&pb, 2, 2); //Set 4bits flash adpcm format | |||||
| //Init the encoder state | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits | |||||
| put_sbits(&pb, 16, samples[i]); | |||||
| put_bits(&pb, 6, c->status[i].step_index); | |||||
| c->status[i].prev_sample = (signed short)samples[i]; | |||||
| } | |||||
| if(avctx->trellis > 0) { | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); | |||||
| adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); | |||||
| if (avctx->channels == 2) | |||||
| adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); | |||||
| for(i=0; i<n; i++) { | |||||
| put_bits(&pb, 4, buf[i]); | |||||
| if (avctx->channels == 2) | |||||
| put_bits(&pb, 4, buf[n+i]); | |||||
| } | |||||
| av_free(buf); | |||||
| } else { | |||||
| for (i=1; i<avctx->frame_size; i++) { | |||||
| put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); | |||||
| if (avctx->channels == 2) | |||||
| put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); | |||||
| } | |||||
| } | |||||
| flush_put_bits(&pb); | |||||
| dst += put_bits_count(&pb)>>3; | |||||
| break; | |||||
| } | |||||
| case CODEC_ID_ADPCM_MS: | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| int predictor=0; | |||||
| *dst++ = predictor; | |||||
| c->status[i].coeff1 = AdaptCoeff1[predictor]; | |||||
| c->status[i].coeff2 = AdaptCoeff2[predictor]; | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| if (c->status[i].idelta < 16) | |||||
| c->status[i].idelta = 16; | |||||
| bytestream_put_le16(&dst, c->status[i].idelta); | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| c->status[i].sample2= *samples++; | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| c->status[i].sample1= *samples++; | |||||
| bytestream_put_le16(&dst, c->status[i].sample1); | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++) | |||||
| bytestream_put_le16(&dst, c->status[i].sample2); | |||||
| if(avctx->trellis > 0) { | |||||
| int n = avctx->block_align - 7*avctx->channels; | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); | |||||
| if(avctx->channels == 1) { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| for(i=0; i<n; i+=2) | |||||
| *dst++ = (buf[i] << 4) | buf[i+1]; | |||||
| } else { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); | |||||
| for(i=0; i<n; i++) | |||||
| *dst++ = (buf[i] << 4) | buf[n+i]; | |||||
| } | |||||
| av_free(buf); | |||||
| } else | |||||
| for(i=7*avctx->channels; i<avctx->block_align; i++) { | |||||
| int nibble; | |||||
| nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; | |||||
| nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); | |||||
| *dst++ = nibble; | |||||
| } | |||||
| break; | |||||
| case CODEC_ID_ADPCM_YAMAHA: | |||||
| n = avctx->frame_size / 2; | |||||
| if(avctx->trellis > 0) { | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); | |||||
| n *= 2; | |||||
| if(avctx->channels == 1) { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| for(i=0; i<n; i+=2) | |||||
| *dst++ = buf[i] | (buf[i+1] << 4); | |||||
| } else { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); | |||||
| for(i=0; i<n; i++) | |||||
| *dst++ = buf[i] | (buf[n+i] << 4); | |||||
| } | |||||
| av_free(buf); | |||||
| } else | |||||
| for (n *= avctx->channels; n>0; n--) { | |||||
| int nibble; | |||||
| nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); | |||||
| nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; | |||||
| *dst++ = nibble; | |||||
| } | |||||
| break; | |||||
| default: | |||||
| error: | |||||
| return -1; | |||||
| } | |||||
| return dst - frame; | |||||
| } | |||||
| #endif //CONFIG_ENCODERS | |||||
| } ADPCMDecodeContext; | |||||
| static av_cold int adpcm_decode_init(AVCodecContext * avctx) | static av_cold int adpcm_decode_init(AVCodecContext * avctx) | ||||
| { | { | ||||
| ADPCMContext *c = avctx->priv_data; | |||||
| ADPCMDecodeContext *c = avctx->priv_data; | |||||
| unsigned int max_channels = 2; | unsigned int max_channels = 2; | ||||
| switch(avctx->codec->id) { | switch(avctx->codec->id) { | ||||
| @@ -823,8 +129,8 @@ static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, | |||||
| int predictor; | int predictor; | ||||
| int sign, delta, diff, step; | int sign, delta, diff, step; | ||||
| step = step_table[c->step_index]; | |||||
| step_index = c->step_index + index_table[(unsigned)nibble]; | |||||
| step = ff_adpcm_step_table[c->step_index]; | |||||
| step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble]; | |||||
| if (step_index < 0) step_index = 0; | if (step_index < 0) step_index = 0; | ||||
| else if (step_index > 88) step_index = 88; | else if (step_index > 88) step_index = 88; | ||||
| @@ -850,8 +156,8 @@ static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, | |||||
| int predictor; | int predictor; | ||||
| int diff, step; | int diff, step; | ||||
| step = step_table[c->step_index]; | |||||
| step_index = c->step_index + index_table[nibble]; | |||||
| step = ff_adpcm_step_table[c->step_index]; | |||||
| step_index = c->step_index + ff_adpcm_index_table[nibble]; | |||||
| step_index = av_clip(step_index, 0, 88); | step_index = av_clip(step_index, 0, 88); | ||||
| diff = step >> 3; | diff = step >> 3; | ||||
| @@ -879,7 +185,7 @@ static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) | |||||
| c->sample2 = c->sample1; | c->sample2 = c->sample1; | ||||
| c->sample1 = av_clip_int16(predictor); | c->sample1 = av_clip_int16(predictor); | ||||
| c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; | |||||
| c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; | |||||
| if (c->idelta < 16) c->idelta = 16; | if (c->idelta < 16) c->idelta = 16; | ||||
| return c->sample1; | return c->sample1; | ||||
| @@ -900,7 +206,7 @@ static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) | |||||
| c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); | c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); | ||||
| c->predictor = av_clip_int16(c->predictor); | c->predictor = av_clip_int16(c->predictor); | ||||
| /* calculate new step and clamp it to range 511..32767 */ | /* calculate new step and clamp it to range 511..32767 */ | ||||
| new_step = (AdaptationTable[nibble & 7] * c->step) >> 8; | |||||
| new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8; | |||||
| c->step = av_clip(new_step, 511, 32767); | c->step = av_clip(new_step, 511, 32767); | ||||
| return (short)c->predictor; | return (short)c->predictor; | ||||
| @@ -933,9 +239,9 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c | |||||
| c->step = 127; | c->step = 127; | ||||
| } | } | ||||
| c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; | |||||
| c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8; | |||||
| c->predictor = av_clip_int16(c->predictor); | c->predictor = av_clip_int16(c->predictor); | ||||
| c->step = (c->step * yamaha_indexscale[nibble]) >> 8; | |||||
| c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8; | |||||
| c->step = av_clip(c->step, 127, 24567); | c->step = av_clip(c->step, 127, 24567); | ||||
| return c->predictor; | return c->predictor; | ||||
| } | } | ||||
| @@ -1027,7 +333,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, | |||||
| { | { | ||||
| const uint8_t *buf = avpkt->data; | const uint8_t *buf = avpkt->data; | ||||
| int buf_size = avpkt->size; | int buf_size = avpkt->size; | ||||
| ADPCMContext *c = avctx->priv_data; | |||||
| ADPCMDecodeContext *c = avctx->priv_data; | |||||
| ADPCMChannelStatus *cs; | ADPCMChannelStatus *cs; | ||||
| int n, m, channel, i; | int n, m, channel, i; | ||||
| int block_predictor[2]; | int block_predictor[2]; | ||||
| @@ -1183,10 +489,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, | |||||
| if (st){ | if (st){ | ||||
| c->status[1].idelta = (int16_t)bytestream_get_le16(&src); | c->status[1].idelta = (int16_t)bytestream_get_le16(&src); | ||||
| } | } | ||||
| c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; | |||||
| c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; | |||||
| c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; | |||||
| c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; | |||||
| c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[0]]; | |||||
| c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[0]]; | |||||
| c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[1]]; | |||||
| c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[1]]; | |||||
| c->status[0].sample1 = bytestream_get_le16(&src); | c->status[0].sample1 = bytestream_get_le16(&src); | ||||
| if (st) c->status[1].sample1 = bytestream_get_le16(&src); | if (st) c->status[1].sample1 = bytestream_get_le16(&src); | ||||
| @@ -1655,7 +961,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, | |||||
| for (i = 0; i < avctx->channels; i++) { | for (i = 0; i < avctx->channels; i++) { | ||||
| // similar to IMA adpcm | // similar to IMA adpcm | ||||
| int delta = get_bits(&gb, nb_bits); | int delta = get_bits(&gb, nb_bits); | ||||
| int step = step_table[c->status[i].step_index]; | |||||
| int step = ff_adpcm_step_table[c->status[i].step_index]; | |||||
| long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 | long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 | ||||
| int k = k0; | int k = k0; | ||||
| @@ -1774,44 +1080,18 @@ static int adpcm_decode_frame(AVCodecContext *avctx, | |||||
| } | } | ||||
| #if CONFIG_ENCODERS | |||||
| #define ADPCM_ENCODER(id,name,long_name_) \ | |||||
| AVCodec ff_ ## name ## _encoder = { \ | |||||
| #name, \ | |||||
| AVMEDIA_TYPE_AUDIO, \ | |||||
| id, \ | |||||
| sizeof(ADPCMContext), \ | |||||
| adpcm_encode_init, \ | |||||
| adpcm_encode_frame, \ | |||||
| adpcm_encode_close, \ | |||||
| NULL, \ | |||||
| .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ | |||||
| .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ | |||||
| } | |||||
| #else | |||||
| #define ADPCM_ENCODER(id,name,long_name_) | |||||
| #endif | |||||
| #if CONFIG_DECODERS | |||||
| #define ADPCM_DECODER(id,name,long_name_) \ | #define ADPCM_DECODER(id,name,long_name_) \ | ||||
| AVCodec ff_ ## name ## _decoder = { \ | AVCodec ff_ ## name ## _decoder = { \ | ||||
| #name, \ | #name, \ | ||||
| AVMEDIA_TYPE_AUDIO, \ | AVMEDIA_TYPE_AUDIO, \ | ||||
| id, \ | id, \ | ||||
| sizeof(ADPCMContext), \ | |||||
| sizeof(ADPCMDecodeContext), \ | |||||
| adpcm_decode_init, \ | adpcm_decode_init, \ | ||||
| NULL, \ | NULL, \ | ||||
| NULL, \ | NULL, \ | ||||
| adpcm_decode_frame, \ | adpcm_decode_frame, \ | ||||
| .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ | .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ | ||||
| } | } | ||||
| #else | |||||
| #define ADPCM_DECODER(id,name,long_name_) | |||||
| #endif | |||||
| #define ADPCM_CODEC(id,name,long_name_) \ | |||||
| ADPCM_ENCODER(id,name,long_name_); ADPCM_DECODER(id,name,long_name_) | |||||
| /* Note: Do not forget to add new entries to the Makefile as well. */ | /* Note: Do not forget to add new entries to the Makefile as well. */ | ||||
| ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); | ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); | ||||
| @@ -1828,15 +1108,15 @@ ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); | ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); | ||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); | ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); | ||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); | ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); | ||||
| ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); | ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); | ||||
| ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); | ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); | ||||
| ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); | ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); | ||||
| ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); | ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); | ||||
| ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); | ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); | ||||
| ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); | ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); | ||||
| ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); | ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); | ||||
| ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); | |||||
| ADPCM_DECODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); | |||||
| @@ -0,0 +1,46 @@ | |||||
| /* | |||||
| * Copyright (c) 2001-2003 The ffmpeg Project | |||||
| * | |||||
| * This file is part of FFmpeg. | |||||
| * | |||||
| * FFmpeg is free software; you can redistribute it and/or | |||||
| * modify it under the terms of the GNU Lesser General Public | |||||
| * License as published by the Free Software Foundation; either | |||||
| * version 2.1 of the License, or (at your option) any later version. | |||||
| * | |||||
| * FFmpeg is distributed in the hope that it will be useful, | |||||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
| * Lesser General Public License for more details. | |||||
| * | |||||
| * You should have received a copy of the GNU Lesser General Public | |||||
| * License along with FFmpeg; if not, write to the Free Software | |||||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
| */ | |||||
| /** | |||||
| * @file | |||||
| * ADPCM encoder/decoder common header. | |||||
| */ | |||||
| #ifndef AVCODEC_ADPCM_H | |||||
| #define AVCODEC_ADPCM_H | |||||
| #define BLKSIZE 1024 | |||||
| typedef struct ADPCMChannelStatus { | |||||
| int predictor; | |||||
| short int step_index; | |||||
| int step; | |||||
| /* for encoding */ | |||||
| int prev_sample; | |||||
| /* MS version */ | |||||
| short sample1; | |||||
| short sample2; | |||||
| int coeff1; | |||||
| int coeff2; | |||||
| int idelta; | |||||
| } ADPCMChannelStatus; | |||||
| #endif /* AVCODEC_ADPCM_H */ | |||||
| @@ -0,0 +1,78 @@ | |||||
| /* | |||||
| * Copyright (c) 2001-2003 The ffmpeg Project | |||||
| * | |||||
| * This file is part of FFmpeg. | |||||
| * | |||||
| * FFmpeg is free software; you can redistribute it and/or | |||||
| * modify it under the terms of the GNU Lesser General Public | |||||
| * License as published by the Free Software Foundation; either | |||||
| * version 2.1 of the License, or (at your option) any later version. | |||||
| * | |||||
| * FFmpeg is distributed in the hope that it will be useful, | |||||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
| * Lesser General Public License for more details. | |||||
| * | |||||
| * You should have received a copy of the GNU Lesser General Public | |||||
| * License along with FFmpeg; if not, write to the Free Software | |||||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
| */ | |||||
| /** | |||||
| * @file | |||||
| * ADPCM tables | |||||
| */ | |||||
| #include <stdint.h> | |||||
| /* ff_adpcm_step_table[] and ff_adpcm_index_table[] are from the ADPCM | |||||
| reference source */ | |||||
| /* This is the index table: */ | |||||
| const int8_t ff_adpcm_index_table[16] = { | |||||
| -1, -1, -1, -1, 2, 4, 6, 8, | |||||
| -1, -1, -1, -1, 2, 4, 6, 8, | |||||
| }; | |||||
| /** | |||||
| * This is the step table. Note that many programs use slight deviations from | |||||
| * this table, but such deviations are negligible: | |||||
| */ | |||||
| const int16_t ff_adpcm_step_table[89] = { | |||||
| 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, | |||||
| 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, | |||||
| 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, | |||||
| 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, | |||||
| 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, | |||||
| 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, | |||||
| 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, | |||||
| 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, | |||||
| 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 | |||||
| }; | |||||
| /* These are for MS-ADPCM */ | |||||
| /* ff_adpcm_AdaptationTable[], ff_adpcm_AdaptCoeff1[], and | |||||
| ff_adpcm_AdaptCoeff2[] are from libsndfile */ | |||||
| const int16_t ff_adpcm_AdaptationTable[] = { | |||||
| 230, 230, 230, 230, 307, 409, 512, 614, | |||||
| 768, 614, 512, 409, 307, 230, 230, 230 | |||||
| }; | |||||
| /** Divided by 4 to fit in 8-bit integers */ | |||||
| const uint8_t ff_adpcm_AdaptCoeff1[] = { | |||||
| 64, 128, 0, 48, 60, 115, 98 | |||||
| }; | |||||
| /** Divided by 4 to fit in 8-bit integers */ | |||||
| const int8_t ff_adpcm_AdaptCoeff2[] = { | |||||
| 0, -64, 0, 16, 0, -52, -58 | |||||
| }; | |||||
| const int16_t ff_adpcm_yamaha_indexscale[] = { | |||||
| 230, 230, 230, 230, 307, 409, 512, 614, | |||||
| 230, 230, 230, 230, 307, 409, 512, 614 | |||||
| }; | |||||
| const int8_t ff_adpcm_yamaha_difflookup[] = { | |||||
| 1, 3, 5, 7, 9, 11, 13, 15, | |||||
| -1, -3, -5, -7, -9, -11, -13, -15 | |||||
| }; | |||||
| @@ -0,0 +1,37 @@ | |||||
| /* | |||||
| * Copyright (c) 2001-2003 The ffmpeg Project | |||||
| * | |||||
| * This file is part of FFmpeg. | |||||
| * | |||||
| * FFmpeg is free software; you can redistribute it and/or | |||||
| * modify it under the terms of the GNU Lesser General Public | |||||
| * License as published by the Free Software Foundation; either | |||||
| * version 2.1 of the License, or (at your option) any later version. | |||||
| * | |||||
| * FFmpeg is distributed in the hope that it will be useful, | |||||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
| * Lesser General Public License for more details. | |||||
| * | |||||
| * You should have received a copy of the GNU Lesser General Public | |||||
| * License along with FFmpeg; if not, write to the Free Software | |||||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
| */ | |||||
| /** | |||||
| * @file | |||||
| * ADPCM tables | |||||
| */ | |||||
| #ifndef AVCODEC_ADPCM_DATA_H | |||||
| #define AVCODEC_ADPCM_DATA_H | |||||
| extern const int8_t ff_adpcm_index_table[16]; | |||||
| extern const int16_t ff_adpcm_step_table[89]; | |||||
| extern const int16_t ff_adpcm_AdaptationTable[]; | |||||
| extern const uint8_t ff_adpcm_AdaptCoeff1[]; | |||||
| extern const int8_t ff_adpcm_AdaptCoeff2[]; | |||||
| extern const int16_t ff_adpcm_yamaha_indexscale[]; | |||||
| extern const int8_t ff_adpcm_yamaha_difflookup[]; | |||||
| #endif /* AVCODEC_ADPCM_DATA_H */ | |||||
| @@ -0,0 +1,691 @@ | |||||
| /* | |||||
| * Copyright (c) 2001-2003 The ffmpeg Project | |||||
| * | |||||
| * This file is part of FFmpeg. | |||||
| * | |||||
| * FFmpeg is free software; you can redistribute it and/or | |||||
| * modify it under the terms of the GNU Lesser General Public | |||||
| * License as published by the Free Software Foundation; either | |||||
| * version 2.1 of the License, or (at your option) any later version. | |||||
| * | |||||
| * FFmpeg is distributed in the hope that it will be useful, | |||||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
| * Lesser General Public License for more details. | |||||
| * | |||||
| * You should have received a copy of the GNU Lesser General Public | |||||
| * License along with FFmpeg; if not, write to the Free Software | |||||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
| */ | |||||
| #include "avcodec.h" | |||||
| #include "get_bits.h" | |||||
| #include "put_bits.h" | |||||
| #include "bytestream.h" | |||||
| #include "adpcm.h" | |||||
| #include "adpcm_data.h" | |||||
| /** | |||||
| * @file | |||||
| * ADPCM encoders | |||||
| * First version by Francois Revol (revol@free.fr) | |||||
| * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood) | |||||
| * by Mike Melanson (melanson@pcisys.net) | |||||
| * | |||||
| * Reference documents: | |||||
| * http://www.pcisys.net/~melanson/codecs/simpleaudio.html | |||||
| * http://www.geocities.com/SiliconValley/8682/aud3.txt | |||||
| * http://openquicktime.sourceforge.net/plugins.htm | |||||
| * XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html | |||||
| * http://www.cs.ucla.edu/~leec/mediabench/applications.html | |||||
| * SoX source code http://home.sprynet.com/~cbagwell/sox.html | |||||
| */ | |||||
| typedef struct TrellisPath { | |||||
| int nibble; | |||||
| int prev; | |||||
| } TrellisPath; | |||||
| typedef struct TrellisNode { | |||||
| uint32_t ssd; | |||||
| int path; | |||||
| int sample1; | |||||
| int sample2; | |||||
| int step; | |||||
| } TrellisNode; | |||||
| typedef struct ADPCMEncodeContext { | |||||
| ADPCMChannelStatus status[6]; | |||||
| TrellisPath *paths; | |||||
| TrellisNode *node_buf; | |||||
| TrellisNode **nodep_buf; | |||||
| uint8_t *trellis_hash; | |||||
| } ADPCMEncodeContext; | |||||
| #define FREEZE_INTERVAL 128 | |||||
| static av_cold int adpcm_encode_init(AVCodecContext *avctx) | |||||
| { | |||||
| ADPCMEncodeContext *s = avctx->priv_data; | |||||
| uint8_t *extradata; | |||||
| int i; | |||||
| if (avctx->channels > 2) | |||||
| return -1; /* only stereo or mono =) */ | |||||
| if(avctx->trellis && (unsigned)avctx->trellis > 16U){ | |||||
| av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); | |||||
| return -1; | |||||
| } | |||||
| if (avctx->trellis) { | |||||
| int frontier = 1 << avctx->trellis; | |||||
| int max_paths = frontier * FREEZE_INTERVAL; | |||||
| FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); | |||||
| FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); | |||||
| FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); | |||||
| FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); | |||||
| } | |||||
| switch(avctx->codec->id) { | |||||
| case CODEC_ID_ADPCM_IMA_WAV: | |||||
| avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ | |||||
| /* and we have 4 bytes per channel overhead */ | |||||
| avctx->block_align = BLKSIZE; | |||||
| avctx->bits_per_coded_sample = 4; | |||||
| /* seems frame_size isn't taken into account... have to buffer the samples :-( */ | |||||
| break; | |||||
| case CODEC_ID_ADPCM_IMA_QT: | |||||
| avctx->frame_size = 64; | |||||
| avctx->block_align = 34 * avctx->channels; | |||||
| break; | |||||
| case CODEC_ID_ADPCM_MS: | |||||
| avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ | |||||
| /* and we have 7 bytes per channel overhead */ | |||||
| avctx->block_align = BLKSIZE; | |||||
| avctx->bits_per_coded_sample = 4; | |||||
| avctx->extradata_size = 32; | |||||
| extradata = avctx->extradata = av_malloc(avctx->extradata_size); | |||||
| if (!extradata) | |||||
| return AVERROR(ENOMEM); | |||||
| bytestream_put_le16(&extradata, avctx->frame_size); | |||||
| bytestream_put_le16(&extradata, 7); /* wNumCoef */ | |||||
| for (i = 0; i < 7; i++) { | |||||
| bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4); | |||||
| bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4); | |||||
| } | |||||
| break; | |||||
| case CODEC_ID_ADPCM_YAMAHA: | |||||
| avctx->frame_size = BLKSIZE * avctx->channels; | |||||
| avctx->block_align = BLKSIZE; | |||||
| break; | |||||
| case CODEC_ID_ADPCM_SWF: | |||||
| if (avctx->sample_rate != 11025 && | |||||
| avctx->sample_rate != 22050 && | |||||
| avctx->sample_rate != 44100) { | |||||
| av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); | |||||
| goto error; | |||||
| } | |||||
| avctx->frame_size = 512 * (avctx->sample_rate / 11025); | |||||
| break; | |||||
| default: | |||||
| goto error; | |||||
| } | |||||
| avctx->coded_frame= avcodec_alloc_frame(); | |||||
| avctx->coded_frame->key_frame= 1; | |||||
| return 0; | |||||
| error: | |||||
| av_freep(&s->paths); | |||||
| av_freep(&s->node_buf); | |||||
| av_freep(&s->nodep_buf); | |||||
| av_freep(&s->trellis_hash); | |||||
| return -1; | |||||
| } | |||||
| static av_cold int adpcm_encode_close(AVCodecContext *avctx) | |||||
| { | |||||
| ADPCMEncodeContext *s = avctx->priv_data; | |||||
| av_freep(&avctx->coded_frame); | |||||
| av_freep(&s->paths); | |||||
| av_freep(&s->node_buf); | |||||
| av_freep(&s->nodep_buf); | |||||
| av_freep(&s->trellis_hash); | |||||
| return 0; | |||||
| } | |||||
| static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int delta = sample - c->prev_sample; | |||||
| int nibble = FFMIN(7, abs(delta)*4/ff_adpcm_step_table[c->step_index]) + (delta<0)*8; | |||||
| c->prev_sample += ((ff_adpcm_step_table[c->step_index] * ff_adpcm_yamaha_difflookup[nibble]) / 8); | |||||
| c->prev_sample = av_clip_int16(c->prev_sample); | |||||
| c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); | |||||
| return nibble; | |||||
| } | |||||
| static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int delta = sample - c->prev_sample; | |||||
| int diff, step = ff_adpcm_step_table[c->step_index]; | |||||
| int nibble = 8*(delta < 0); | |||||
| delta= abs(delta); | |||||
| diff = delta + (step >> 3); | |||||
| if (delta >= step) { | |||||
| nibble |= 4; | |||||
| delta -= step; | |||||
| } | |||||
| step >>= 1; | |||||
| if (delta >= step) { | |||||
| nibble |= 2; | |||||
| delta -= step; | |||||
| } | |||||
| step >>= 1; | |||||
| if (delta >= step) { | |||||
| nibble |= 1; | |||||
| delta -= step; | |||||
| } | |||||
| diff -= delta; | |||||
| if (nibble & 8) | |||||
| c->prev_sample -= diff; | |||||
| else | |||||
| c->prev_sample += diff; | |||||
| c->prev_sample = av_clip_int16(c->prev_sample); | |||||
| c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); | |||||
| return nibble; | |||||
| } | |||||
| static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int predictor, nibble, bias; | |||||
| predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; | |||||
| nibble= sample - predictor; | |||||
| if(nibble>=0) bias= c->idelta/2; | |||||
| else bias=-c->idelta/2; | |||||
| nibble= (nibble + bias) / c->idelta; | |||||
| nibble= av_clip(nibble, -8, 7)&0x0F; | |||||
| predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; | |||||
| c->sample2 = c->sample1; | |||||
| c->sample1 = av_clip_int16(predictor); | |||||
| c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; | |||||
| if (c->idelta < 16) c->idelta = 16; | |||||
| return nibble; | |||||
| } | |||||
| static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) | |||||
| { | |||||
| int nibble, delta; | |||||
| if(!c->step) { | |||||
| c->predictor = 0; | |||||
| c->step = 127; | |||||
| } | |||||
| delta = sample - c->predictor; | |||||
| nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; | |||||
| c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8); | |||||
| c->predictor = av_clip_int16(c->predictor); | |||||
| c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8; | |||||
| c->step = av_clip(c->step, 127, 24567); | |||||
| return nibble; | |||||
| } | |||||
| static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, | |||||
| uint8_t *dst, ADPCMChannelStatus *c, int n) | |||||
| { | |||||
| //FIXME 6% faster if frontier is a compile-time constant | |||||
| ADPCMEncodeContext *s = avctx->priv_data; | |||||
| const int frontier = 1 << avctx->trellis; | |||||
| const int stride = avctx->channels; | |||||
| const int version = avctx->codec->id; | |||||
| TrellisPath *paths = s->paths, *p; | |||||
| TrellisNode *node_buf = s->node_buf; | |||||
| TrellisNode **nodep_buf = s->nodep_buf; | |||||
| TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd | |||||
| TrellisNode **nodes_next = nodep_buf + frontier; | |||||
| int pathn = 0, froze = -1, i, j, k, generation = 0; | |||||
| uint8_t *hash = s->trellis_hash; | |||||
| memset(hash, 0xff, 65536 * sizeof(*hash)); | |||||
| memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); | |||||
| nodes[0] = node_buf + frontier; | |||||
| nodes[0]->ssd = 0; | |||||
| nodes[0]->path = 0; | |||||
| nodes[0]->step = c->step_index; | |||||
| nodes[0]->sample1 = c->sample1; | |||||
| nodes[0]->sample2 = c->sample2; | |||||
| if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) | |||||
| nodes[0]->sample1 = c->prev_sample; | |||||
| if(version == CODEC_ID_ADPCM_MS) | |||||
| nodes[0]->step = c->idelta; | |||||
| if(version == CODEC_ID_ADPCM_YAMAHA) { | |||||
| if(c->step == 0) { | |||||
| nodes[0]->step = 127; | |||||
| nodes[0]->sample1 = 0; | |||||
| } else { | |||||
| nodes[0]->step = c->step; | |||||
| nodes[0]->sample1 = c->predictor; | |||||
| } | |||||
| } | |||||
| for(i=0; i<n; i++) { | |||||
| TrellisNode *t = node_buf + frontier*(i&1); | |||||
| TrellisNode **u; | |||||
| int sample = samples[i*stride]; | |||||
| int heap_pos = 0; | |||||
| memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); | |||||
| for(j=0; j<frontier && nodes[j]; j++) { | |||||
| // higher j have higher ssd already, so they're likely to yield a suboptimal next sample too | |||||
| const int range = (j < frontier/2) ? 1 : 0; | |||||
| const int step = nodes[j]->step; | |||||
| int nidx; | |||||
| if(version == CODEC_ID_ADPCM_MS) { | |||||
| const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; | |||||
| const int div = (sample - predictor) / step; | |||||
| const int nmin = av_clip(div-range, -8, 6); | |||||
| const int nmax = av_clip(div+range, -7, 7); | |||||
| for(nidx=nmin; nidx<=nmax; nidx++) { | |||||
| const int nibble = nidx & 0xf; | |||||
| int dec_sample = predictor + nidx * step; | |||||
| #define STORE_NODE(NAME, STEP_INDEX)\ | |||||
| int d;\ | |||||
| uint32_t ssd;\ | |||||
| int pos;\ | |||||
| TrellisNode *u;\ | |||||
| uint8_t *h;\ | |||||
| dec_sample = av_clip_int16(dec_sample);\ | |||||
| d = sample - dec_sample;\ | |||||
| ssd = nodes[j]->ssd + d*d;\ | |||||
| /* Check for wraparound, skip such samples completely. \ | |||||
| * Note, changing ssd to a 64 bit variable would be \ | |||||
| * simpler, avoiding this check, but it's slower on \ | |||||
| * x86 32 bit at the moment. */\ | |||||
| if (ssd < nodes[j]->ssd)\ | |||||
| goto next_##NAME;\ | |||||
| /* Collapse any two states with the same previous sample value. \ | |||||
| * One could also distinguish states by step and by 2nd to last | |||||
| * sample, but the effects of that are negligible. | |||||
| * Since nodes in the previous generation are iterated | |||||
| * through a heap, they're roughly ordered from better to | |||||
| * worse, but not strictly ordered. Therefore, an earlier | |||||
| * node with the same sample value is better in most cases | |||||
| * (and thus the current is skipped), but not strictly | |||||
| * in all cases. Only skipping samples where ssd >= | |||||
| * ssd of the earlier node with the same sample gives | |||||
| * slightly worse quality, though, for some reason. */ \ | |||||
| h = &hash[(uint16_t) dec_sample];\ | |||||
| if (*h == generation)\ | |||||
| goto next_##NAME;\ | |||||
| if (heap_pos < frontier) {\ | |||||
| pos = heap_pos++;\ | |||||
| } else {\ | |||||
| /* Try to replace one of the leaf nodes with the new \ | |||||
| * one, but try a different slot each time. */\ | |||||
| pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ | |||||
| if (ssd > nodes_next[pos]->ssd)\ | |||||
| goto next_##NAME;\ | |||||
| heap_pos++;\ | |||||
| }\ | |||||
| *h = generation;\ | |||||
| u = nodes_next[pos];\ | |||||
| if(!u) {\ | |||||
| assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\ | |||||
| u = t++;\ | |||||
| nodes_next[pos] = u;\ | |||||
| u->path = pathn++;\ | |||||
| }\ | |||||
| u->ssd = ssd;\ | |||||
| u->step = STEP_INDEX;\ | |||||
| u->sample2 = nodes[j]->sample1;\ | |||||
| u->sample1 = dec_sample;\ | |||||
| paths[u->path].nibble = nibble;\ | |||||
| paths[u->path].prev = nodes[j]->path;\ | |||||
| /* Sift the newly inserted node up in the heap to \ | |||||
| * restore the heap property. */\ | |||||
| while (pos > 0) {\ | |||||
| int parent = (pos - 1) >> 1;\ | |||||
| if (nodes_next[parent]->ssd <= ssd)\ | |||||
| break;\ | |||||
| FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ | |||||
| pos = parent;\ | |||||
| }\ | |||||
| next_##NAME:; | |||||
| STORE_NODE(ms, FFMAX(16, (ff_adpcm_AdaptationTable[nibble] * step) >> 8)); | |||||
| } | |||||
| } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { | |||||
| #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ | |||||
| const int predictor = nodes[j]->sample1;\ | |||||
| const int div = (sample - predictor) * 4 / STEP_TABLE;\ | |||||
| int nmin = av_clip(div-range, -7, 6);\ | |||||
| int nmax = av_clip(div+range, -6, 7);\ | |||||
| if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ | |||||
| if(nmax<0) nmax--;\ | |||||
| for(nidx=nmin; nidx<=nmax; nidx++) {\ | |||||
| const int nibble = nidx<0 ? 7-nidx : nidx;\ | |||||
| int dec_sample = predictor + (STEP_TABLE * ff_adpcm_yamaha_difflookup[nibble]) / 8;\ | |||||
| STORE_NODE(NAME, STEP_INDEX);\ | |||||
| } | |||||
| LOOP_NODES(ima, ff_adpcm_step_table[step], av_clip(step + ff_adpcm_index_table[nibble], 0, 88)); | |||||
| } else { //CODEC_ID_ADPCM_YAMAHA | |||||
| LOOP_NODES(yamaha, step, av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, 127, 24567)); | |||||
| #undef LOOP_NODES | |||||
| #undef STORE_NODE | |||||
| } | |||||
| } | |||||
| u = nodes; | |||||
| nodes = nodes_next; | |||||
| nodes_next = u; | |||||
| generation++; | |||||
| if (generation == 255) { | |||||
| memset(hash, 0xff, 65536 * sizeof(*hash)); | |||||
| generation = 0; | |||||
| } | |||||
| // prevent overflow | |||||
| if(nodes[0]->ssd > (1<<28)) { | |||||
| for(j=1; j<frontier && nodes[j]; j++) | |||||
| nodes[j]->ssd -= nodes[0]->ssd; | |||||
| nodes[0]->ssd = 0; | |||||
| } | |||||
| // merge old paths to save memory | |||||
| if(i == froze + FREEZE_INTERVAL) { | |||||
| p = &paths[nodes[0]->path]; | |||||
| for(k=i; k>froze; k--) { | |||||
| dst[k] = p->nibble; | |||||
| p = &paths[p->prev]; | |||||
| } | |||||
| froze = i; | |||||
| pathn = 0; | |||||
| // other nodes might use paths that don't coincide with the frozen one. | |||||
| // checking which nodes do so is too slow, so just kill them all. | |||||
| // this also slightly improves quality, but I don't know why. | |||||
| memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); | |||||
| } | |||||
| } | |||||
| p = &paths[nodes[0]->path]; | |||||
| for(i=n-1; i>froze; i--) { | |||||
| dst[i] = p->nibble; | |||||
| p = &paths[p->prev]; | |||||
| } | |||||
| c->predictor = nodes[0]->sample1; | |||||
| c->sample1 = nodes[0]->sample1; | |||||
| c->sample2 = nodes[0]->sample2; | |||||
| c->step_index = nodes[0]->step; | |||||
| c->step = nodes[0]->step; | |||||
| c->idelta = nodes[0]->step; | |||||
| } | |||||
| static int adpcm_encode_frame(AVCodecContext *avctx, | |||||
| unsigned char *frame, int buf_size, void *data) | |||||
| { | |||||
| int n, i, st; | |||||
| short *samples; | |||||
| unsigned char *dst; | |||||
| ADPCMEncodeContext *c = avctx->priv_data; | |||||
| uint8_t *buf; | |||||
| dst = frame; | |||||
| samples = (short *)data; | |||||
| st= avctx->channels == 2; | |||||
| /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ | |||||
| switch(avctx->codec->id) { | |||||
| case CODEC_ID_ADPCM_IMA_WAV: | |||||
| n = avctx->frame_size / 8; | |||||
| c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ | |||||
| /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ | |||||
| bytestream_put_le16(&dst, c->status[0].prev_sample); | |||||
| *dst++ = (unsigned char)c->status[0].step_index; | |||||
| *dst++ = 0; /* unknown */ | |||||
| samples++; | |||||
| if (avctx->channels == 2) { | |||||
| c->status[1].prev_sample = (signed short)samples[0]; | |||||
| /* c->status[1].step_index = 0; */ | |||||
| bytestream_put_le16(&dst, c->status[1].prev_sample); | |||||
| *dst++ = (unsigned char)c->status[1].step_index; | |||||
| *dst++ = 0; | |||||
| samples++; | |||||
| } | |||||
| /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ | |||||
| if(avctx->trellis > 0) { | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); | |||||
| if(avctx->channels == 2) | |||||
| adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); | |||||
| for(i=0; i<n; i++) { | |||||
| *dst++ = buf[8*i+0] | (buf[8*i+1] << 4); | |||||
| *dst++ = buf[8*i+2] | (buf[8*i+3] << 4); | |||||
| *dst++ = buf[8*i+4] | (buf[8*i+5] << 4); | |||||
| *dst++ = buf[8*i+6] | (buf[8*i+7] << 4); | |||||
| if (avctx->channels == 2) { | |||||
| uint8_t *buf1 = buf + n*8; | |||||
| *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); | |||||
| *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); | |||||
| *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); | |||||
| *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); | |||||
| } | |||||
| } | |||||
| av_free(buf); | |||||
| } else | |||||
| for (; n>0; n--) { | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; | |||||
| dst++; | |||||
| /* right channel */ | |||||
| if (avctx->channels == 2) { | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; | |||||
| dst++; | |||||
| *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); | |||||
| *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; | |||||
| dst++; | |||||
| } | |||||
| samples += 8 * avctx->channels; | |||||
| } | |||||
| break; | |||||
| case CODEC_ID_ADPCM_IMA_QT: | |||||
| { | |||||
| int ch, i; | |||||
| PutBitContext pb; | |||||
| init_put_bits(&pb, dst, buf_size*8); | |||||
| for(ch=0; ch<avctx->channels; ch++){ | |||||
| put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); | |||||
| put_bits(&pb, 7, c->status[ch].step_index); | |||||
| if(avctx->trellis > 0) { | |||||
| uint8_t buf[64]; | |||||
| adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); | |||||
| for(i=0; i<64; i++) | |||||
| put_bits(&pb, 4, buf[i^1]); | |||||
| } else { | |||||
| for (i=0; i<64; i+=2){ | |||||
| int t1, t2; | |||||
| t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); | |||||
| t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); | |||||
| put_bits(&pb, 4, t2); | |||||
| put_bits(&pb, 4, t1); | |||||
| } | |||||
| } | |||||
| } | |||||
| flush_put_bits(&pb); | |||||
| dst += put_bits_count(&pb)>>3; | |||||
| break; | |||||
| } | |||||
| case CODEC_ID_ADPCM_SWF: | |||||
| { | |||||
| int i; | |||||
| PutBitContext pb; | |||||
| init_put_bits(&pb, dst, buf_size*8); | |||||
| n = avctx->frame_size-1; | |||||
| //Store AdpcmCodeSize | |||||
| put_bits(&pb, 2, 2); //Set 4bits flash adpcm format | |||||
| //Init the encoder state | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits | |||||
| put_sbits(&pb, 16, samples[i]); | |||||
| put_bits(&pb, 6, c->status[i].step_index); | |||||
| c->status[i].prev_sample = (signed short)samples[i]; | |||||
| } | |||||
| if(avctx->trellis > 0) { | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); | |||||
| adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); | |||||
| if (avctx->channels == 2) | |||||
| adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); | |||||
| for(i=0; i<n; i++) { | |||||
| put_bits(&pb, 4, buf[i]); | |||||
| if (avctx->channels == 2) | |||||
| put_bits(&pb, 4, buf[n+i]); | |||||
| } | |||||
| av_free(buf); | |||||
| } else { | |||||
| for (i=1; i<avctx->frame_size; i++) { | |||||
| put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); | |||||
| if (avctx->channels == 2) | |||||
| put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); | |||||
| } | |||||
| } | |||||
| flush_put_bits(&pb); | |||||
| dst += put_bits_count(&pb)>>3; | |||||
| break; | |||||
| } | |||||
| case CODEC_ID_ADPCM_MS: | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| int predictor=0; | |||||
| *dst++ = predictor; | |||||
| c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor]; | |||||
| c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor]; | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| if (c->status[i].idelta < 16) | |||||
| c->status[i].idelta = 16; | |||||
| bytestream_put_le16(&dst, c->status[i].idelta); | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| c->status[i].sample2= *samples++; | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++){ | |||||
| c->status[i].sample1= *samples++; | |||||
| bytestream_put_le16(&dst, c->status[i].sample1); | |||||
| } | |||||
| for(i=0; i<avctx->channels; i++) | |||||
| bytestream_put_le16(&dst, c->status[i].sample2); | |||||
| if(avctx->trellis > 0) { | |||||
| int n = avctx->block_align - 7*avctx->channels; | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); | |||||
| if(avctx->channels == 1) { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| for(i=0; i<n; i+=2) | |||||
| *dst++ = (buf[i] << 4) | buf[i+1]; | |||||
| } else { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); | |||||
| for(i=0; i<n; i++) | |||||
| *dst++ = (buf[i] << 4) | buf[n+i]; | |||||
| } | |||||
| av_free(buf); | |||||
| } else | |||||
| for(i=7*avctx->channels; i<avctx->block_align; i++) { | |||||
| int nibble; | |||||
| nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; | |||||
| nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); | |||||
| *dst++ = nibble; | |||||
| } | |||||
| break; | |||||
| case CODEC_ID_ADPCM_YAMAHA: | |||||
| n = avctx->frame_size / 2; | |||||
| if(avctx->trellis > 0) { | |||||
| FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); | |||||
| n *= 2; | |||||
| if(avctx->channels == 1) { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| for(i=0; i<n; i+=2) | |||||
| *dst++ = buf[i] | (buf[i+1] << 4); | |||||
| } else { | |||||
| adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); | |||||
| adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); | |||||
| for(i=0; i<n; i++) | |||||
| *dst++ = buf[i] | (buf[n+i] << 4); | |||||
| } | |||||
| av_free(buf); | |||||
| } else | |||||
| for (n *= avctx->channels; n>0; n--) { | |||||
| int nibble; | |||||
| nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); | |||||
| nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; | |||||
| *dst++ = nibble; | |||||
| } | |||||
| break; | |||||
| default: | |||||
| error: | |||||
| return -1; | |||||
| } | |||||
| return dst - frame; | |||||
| } | |||||
| #define ADPCM_ENCODER(id,name,long_name_) \ | |||||
| AVCodec ff_ ## name ## _encoder = { \ | |||||
| #name, \ | |||||
| AVMEDIA_TYPE_AUDIO, \ | |||||
| id, \ | |||||
| sizeof(ADPCMEncodeContext), \ | |||||
| adpcm_encode_init, \ | |||||
| adpcm_encode_frame, \ | |||||
| adpcm_encode_close, \ | |||||
| NULL, \ | |||||
| .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ | |||||
| .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ | |||||
| } | |||||
| ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); | |||||
| ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); | |||||
| ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); | |||||
| ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); | |||||
| ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); | |||||
| @@ -24,8 +24,11 @@ | |||||
| #include <string.h> | #include <string.h> | ||||
| #include <math.h> | #include <math.h> | ||||
| #include <stdint.h> | #include <stdint.h> | ||||
| #include <float.h> | |||||
| #include <xavs.h> | #include <xavs.h> | ||||
| #include "avcodec.h" | #include "avcodec.h" | ||||
| #include "internal.h" | |||||
| #include "libavutil/opt.h" | |||||
| #define END_OF_STREAM 0x001 | #define END_OF_STREAM 0x001 | ||||
| @@ -41,6 +44,15 @@ typedef struct XavsContext { | |||||
| int sei_size; | int sei_size; | ||||
| AVFrame out_pic; | AVFrame out_pic; | ||||
| int end_of_stream; | int end_of_stream; | ||||
| float crf; | |||||
| int cqp; | |||||
| int b_bias; | |||||
| float cplxblur; | |||||
| int direct_pred; | |||||
| int aud; | |||||
| int fast_pskip; | |||||
| int mbtree; | |||||
| int mixed_refs; | |||||
| } XavsContext; | } XavsContext; | ||||
| static void XAVS_log(void *p, int level, const char *fmt, va_list args) | static void XAVS_log(void *p, int level, const char *fmt, va_list args) | ||||
| @@ -181,13 +193,17 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| x4->params.pf_log = XAVS_log; | x4->params.pf_log = XAVS_log; | ||||
| x4->params.p_log_private = avctx; | x4->params.p_log_private = avctx; | ||||
| x4->params.i_keyint_max = avctx->gop_size; | x4->params.i_keyint_max = avctx->gop_size; | ||||
| x4->params.rc.i_bitrate = avctx->bit_rate / 1000; | |||||
| if (avctx->bit_rate) { | |||||
| x4->params.rc.i_bitrate = avctx->bit_rate / 1000; | |||||
| x4->params.rc.i_rc_method = XAVS_RC_ABR; | |||||
| } | |||||
| x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; | x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; | ||||
| x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; | x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; | ||||
| x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1; | x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1; | ||||
| if (avctx->flags & CODEC_FLAG_PASS2) { | if (avctx->flags & CODEC_FLAG_PASS2) { | ||||
| x4->params.rc.b_stat_read = 1; | x4->params.rc.b_stat_read = 1; | ||||
| } else { | } else { | ||||
| #if FF_API_X264_GLOBAL_OPTS | |||||
| if (avctx->crf) { | if (avctx->crf) { | ||||
| x4->params.rc.i_rc_method = XAVS_RC_CRF; | x4->params.rc.i_rc_method = XAVS_RC_CRF; | ||||
| x4->params.rc.f_rf_constant = avctx->crf; | x4->params.rc.f_rf_constant = avctx->crf; | ||||
| @@ -195,19 +211,63 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| x4->params.rc.i_rc_method = XAVS_RC_CQP; | x4->params.rc.i_rc_method = XAVS_RC_CQP; | ||||
| x4->params.rc.i_qp_constant = avctx->cqp; | x4->params.rc.i_qp_constant = avctx->cqp; | ||||
| } | } | ||||
| #endif | |||||
| if (x4->crf >= 0) { | |||||
| x4->params.rc.i_rc_method = XAVS_RC_CRF; | |||||
| x4->params.rc.f_rf_constant = x4->crf; | |||||
| } else if (x4->cqp >= 0) { | |||||
| x4->params.rc.i_rc_method = XAVS_RC_CQP; | |||||
| x4->params.rc.i_qp_constant = x4->cqp; | |||||
| } | |||||
| } | } | ||||
| /* if neither crf nor cqp modes are selected we have to enable the RC */ | |||||
| /* we do it this way because we cannot check if the bitrate has been set */ | |||||
| if (!(avctx->crf || (avctx->cqp > -1))) | |||||
| x4->params.rc.i_rc_method = XAVS_RC_ABR; | |||||
| #if FF_API_X264_GLOBAL_OPTS | |||||
| if (avctx->bframebias) | |||||
| x4->params.i_bframe_bias = avctx->bframebias; | |||||
| if (avctx->deblockalpha) | |||||
| x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha; | |||||
| if (avctx->deblockbeta) | |||||
| x4->params.i_deblocking_filter_beta = avctx->deblockbeta; | |||||
| if (avctx->complexityblur >= 0) | |||||
| x4->params.rc.f_complexity_blur = avctx->complexityblur; | |||||
| if (avctx->directpred >= 0) | |||||
| x4->params.analyse.i_direct_mv_pred = avctx->directpred; | |||||
| if (avctx->partitions) { | |||||
| if (avctx->partitions & XAVS_PART_I8X8) | |||||
| x4->params.analyse.inter |= XAVS_ANALYSE_I8x8; | |||||
| if (avctx->partitions & XAVS_PART_P8X8) | |||||
| x4->params.analyse.inter |= XAVS_ANALYSE_PSUB16x16; | |||||
| if (avctx->partitions & XAVS_PART_B8X8) | |||||
| x4->params.analyse.inter |= XAVS_ANALYSE_BSUB16x16; | |||||
| } | |||||
| x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE); | |||||
| x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD; | |||||
| x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS; | |||||
| x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP; | |||||
| x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED; | |||||
| #endif | |||||
| if (x4->aud >= 0) | |||||
| x4->params.b_aud = x4->aud; | |||||
| if (x4->mbtree >= 0) | |||||
| x4->params.rc.b_mb_tree = x4->mbtree; | |||||
| if (x4->direct_pred >= 0) | |||||
| x4->params.analyse.i_direct_mv_pred = x4->direct_pred; | |||||
| if (x4->fast_pskip >= 0) | |||||
| x4->params.analyse.b_fast_pskip = x4->fast_pskip; | |||||
| if (x4->mixed_refs >= 0) | |||||
| x4->params.analyse.b_mixed_references = x4->mixed_refs; | |||||
| if (x4->b_bias != INT_MIN) | |||||
| x4->params.i_bframe_bias = x4->b_bias; | |||||
| if (x4->cplxblur >= 0) | |||||
| x4->params.rc.f_complexity_blur = x4->cplxblur; | |||||
| x4->params.i_bframe = avctx->max_b_frames; | x4->params.i_bframe = avctx->max_b_frames; | ||||
| /* cabac is not included in AVS JiZhun Profile */ | /* cabac is not included in AVS JiZhun Profile */ | ||||
| x4->params.b_cabac = 0; | x4->params.b_cabac = 0; | ||||
| x4->params.i_bframe_adaptive = avctx->b_frame_strategy; | x4->params.i_bframe_adaptive = avctx->b_frame_strategy; | ||||
| x4->params.i_bframe_bias = avctx->bframebias; | |||||
| avctx->has_b_frames = !!avctx->max_b_frames; | avctx->has_b_frames = !!avctx->max_b_frames; | ||||
| @@ -220,8 +280,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| x4->params.i_scenecut_threshold = avctx->scenechange_threshold; | x4->params.i_scenecut_threshold = avctx->scenechange_threshold; | ||||
| // x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER; | // x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER; | ||||
| x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha; | |||||
| x4->params.i_deblocking_filter_beta = avctx->deblockbeta; | |||||
| x4->params.rc.i_qp_min = avctx->qmin; | x4->params.rc.i_qp_min = avctx->qmin; | ||||
| x4->params.rc.i_qp_max = avctx->qmax; | x4->params.rc.i_qp_max = avctx->qmax; | ||||
| @@ -229,7 +287,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */ | x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */ | ||||
| x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */ | x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */ | ||||
| x4->params.rc.f_complexity_blur = avctx->complexityblur; | |||||
| x4->params.i_frame_reference = avctx->refs; | x4->params.i_frame_reference = avctx->refs; | ||||
| @@ -241,20 +298,6 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| x4->params.i_fps_num = avctx->time_base.den; | x4->params.i_fps_num = avctx->time_base.den; | ||||
| x4->params.i_fps_den = avctx->time_base.num; | x4->params.i_fps_den = avctx->time_base.num; | ||||
| x4->params.analyse.inter = XAVS_ANALYSE_I8x8 |XAVS_ANALYSE_PSUB16x16| XAVS_ANALYSE_BSUB16x16; | x4->params.analyse.inter = XAVS_ANALYSE_I8x8 |XAVS_ANALYSE_PSUB16x16| XAVS_ANALYSE_BSUB16x16; | ||||
| if (avctx->partitions) { | |||||
| if (avctx->partitions & XAVS_PART_I8X8) | |||||
| x4->params.analyse.inter |= XAVS_ANALYSE_I8x8; | |||||
| if (avctx->partitions & XAVS_PART_P8X8) | |||||
| x4->params.analyse.inter |= XAVS_ANALYSE_PSUB16x16; | |||||
| if (avctx->partitions & XAVS_PART_B8X8) | |||||
| x4->params.analyse.inter |= XAVS_ANALYSE_BSUB16x16; | |||||
| } | |||||
| x4->params.analyse.i_direct_mv_pred = avctx->directpred; | |||||
| x4->params.analyse.b_weighted_bipred = avctx->flags2 & CODEC_FLAG2_WPRED; | |||||
| switch (avctx->me_method) { | switch (avctx->me_method) { | ||||
| case ME_EPZS: | case ME_EPZS: | ||||
| @@ -279,11 +322,9 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| x4->params.analyse.i_me_range = avctx->me_range; | x4->params.analyse.i_me_range = avctx->me_range; | ||||
| x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality; | x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality; | ||||
| x4->params.analyse.b_mixed_references = avctx->flags2 & CODEC_FLAG2_MIXED_REFS; | |||||
| x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; | x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; | ||||
| /* AVS P2 only enables 8x8 transform */ | /* AVS P2 only enables 8x8 transform */ | ||||
| x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & CODEC_FLAG2_8X8DCT; | x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & CODEC_FLAG2_8X8DCT; | ||||
| x4->params.analyse.b_fast_pskip = avctx->flags2 & CODEC_FLAG2_FASTPSKIP; | |||||
| x4->params.analyse.i_trellis = avctx->trellis; | x4->params.analyse.i_trellis = avctx->trellis; | ||||
| x4->params.analyse.i_noise_reduction = avctx->noise_reduction; | x4->params.analyse.i_noise_reduction = avctx->noise_reduction; | ||||
| @@ -303,14 +344,12 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| /* TAG:do we have MB tree RC method */ | /* TAG:do we have MB tree RC method */ | ||||
| /* what is the RC method we are now using? Default NO */ | /* what is the RC method we are now using? Default NO */ | ||||
| x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE); | |||||
| x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor); | x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor); | ||||
| x4->params.rc.f_pb_factor = avctx->b_quant_factor; | x4->params.rc.f_pb_factor = avctx->b_quant_factor; | ||||
| x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; | x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; | ||||
| x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR; | x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR; | ||||
| x4->params.i_log_level = XAVS_LOG_DEBUG; | x4->params.i_log_level = XAVS_LOG_DEBUG; | ||||
| x4->params.b_aud = avctx->flags2 & CODEC_FLAG2_AUD; | |||||
| x4->params.i_threads = avctx->thread_count; | x4->params.i_threads = avctx->thread_count; | ||||
| x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT; | x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT; | ||||
| @@ -336,6 +375,37 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| #define OFFSET(x) offsetof(XavsContext, x) | |||||
| #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM | |||||
| static const AVOption options[] = { | |||||
| { "crf", "Select the quality for constant quality mode", OFFSET(crf), FF_OPT_TYPE_FLOAT, {-1 }, -1, FLT_MAX, VE }, | |||||
| { "qp", "Constant quantization parameter rate control method",OFFSET(cqp), FF_OPT_TYPE_INT, {-1 }, -1, INT_MAX, VE }, | |||||
| { "b-bias", "Influences how often B-frames are used", OFFSET(b_bias), FF_OPT_TYPE_INT, {INT_MIN}, INT_MIN, INT_MAX, VE }, | |||||
| { "cplxblur", "Reduce fluctuations in QP (before curve compression)", OFFSET(cplxblur), FF_OPT_TYPE_FLOAT, {-1 }, -1, FLT_MAX, VE}, | |||||
| { "direct-pred", "Direct MV prediction mode", OFFSET(direct_pred), FF_OPT_TYPE_INT, {-1 }, -1, INT_MAX, VE, "direct-pred" }, | |||||
| { "none", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_NONE }, 0, 0, VE, "direct-pred" }, | |||||
| { "spatial", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_SPATIAL }, 0, 0, VE, "direct-pred" }, | |||||
| { "temporal", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_TEMPORAL }, 0, 0, VE, "direct-pred" }, | |||||
| { "auto", NULL, 0, FF_OPT_TYPE_CONST, { XAVS_DIRECT_PRED_AUTO }, 0, 0, VE, "direct-pred" }, | |||||
| { "aud", "Use access unit delimiters.", OFFSET(aud), FF_OPT_TYPE_INT, {-1 }, -1, 1, VE}, | |||||
| { "mbtree", "Use macroblock tree ratecontrol.", OFFSET(mbtree), FF_OPT_TYPE_INT, {-1 }, -1, 1, VE}, | |||||
| { "mixed-refs", "One reference per partition, as opposed to one reference per macroblock", OFFSET(mixed_refs), FF_OPT_TYPE_INT, {-1}, -1, 1, VE }, | |||||
| { "fast-pskip", NULL, OFFSET(fast_pskip), FF_OPT_TYPE_INT, {-1 }, -1, 1, VE}, | |||||
| { NULL }, | |||||
| }; | |||||
| static const AVClass class = { | |||||
| .class_name = "libxavs", | |||||
| .item_name = av_default_item_name, | |||||
| .option = options, | |||||
| .version = LIBAVUTIL_VERSION_INT, | |||||
| }; | |||||
| static const AVCodecDefault xavs_defaults[] = { | |||||
| { "b", "0" }, | |||||
| { NULL }, | |||||
| }; | |||||
| AVCodec ff_libxavs_encoder = { | AVCodec ff_libxavs_encoder = { | ||||
| .name = "libxavs", | .name = "libxavs", | ||||
| .type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
| @@ -347,5 +417,7 @@ AVCodec ff_libxavs_encoder = { | |||||
| .capabilities = CODEC_CAP_DELAY, | .capabilities = CODEC_CAP_DELAY, | ||||
| .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE }, | .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE }, | ||||
| .long_name = NULL_IF_CONFIG_SMALL("libxavs - the Chinese Audio Video Standard Encoder"), | .long_name = NULL_IF_CONFIG_SMALL("libxavs - the Chinese Audio Video Standard Encoder"), | ||||
| .priv_class = &class, | |||||
| .defaults = xavs_defaults, | |||||
| }; | }; | ||||
| @@ -1336,6 +1336,13 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int | |||||
| } | } | ||||
| } | } | ||||
| s->mb_x = s->mb_y = 0; | s->mb_x = s->mb_y = 0; | ||||
| } else { | |||||
| int slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I; | |||||
| if (slice_type != s->pict_type) { | |||||
| av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n"); | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | |||||
| } | } | ||||
| r->si.end = end; | r->si.end = end; | ||||
| @@ -19,7 +19,7 @@ | |||||
| /** | /** | ||||
| * @file | * @file | ||||
| * filter fow showing textual video frame information | |||||
| * filter for showing textual video frame information | |||||
| */ | */ | ||||
| #include "libavutil/adler32.h" | #include "libavutil/adler32.h" | ||||
| @@ -286,15 +286,16 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
| for(i = 0; i < 7; i++) { | for(i = 0; i < 7; i++) { | ||||
| if(flags & 1) { | if(flags & 1) { | ||||
| int size; | int size; | ||||
| uint8_t *tmpbuf; | |||||
| size = avio_rl32(s->pb) - 4; | size = avio_rl32(s->pb) - 4; | ||||
| frame_size -= size; | frame_size -= size; | ||||
| frame_size -= 4; | frame_size -= 4; | ||||
| smk->curstream++; | smk->curstream++; | ||||
| smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size); | |||||
| if (!smk->bufs[smk->curstream]) { | |||||
| smk->buf_sizes[smk->curstream] = 0; | |||||
| tmpbuf = av_realloc(smk->bufs[smk->curstream], size); | |||||
| if (!tmpbuf) | |||||
| return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
| } | |||||
| smk->bufs[smk->curstream] = tmpbuf; | |||||
| smk->buf_sizes[smk->curstream] = size; | smk->buf_sizes[smk->curstream] = size; | ||||
| ret = avio_read(s->pb, smk->bufs[smk->curstream], size); | ret = avio_read(s->pb, smk->bufs[smk->curstream], size); | ||||
| if(ret != size) | if(ret != size) | ||||