You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1768 lines
65KB

  1. /*
  2. * ADPCM codecs
  3. * Copyright (c) 2001-2003 The ffmpeg Project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "get_bits.h"
  23. #include "put_bits.h"
  24. #include "bytestream.h"
  25. /**
  26. * @file
  27. * ADPCM codecs.
  28. * First version by Francois Revol (revol@free.fr)
  29. * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
  30. * by Mike Melanson (melanson@pcisys.net)
  31. * CD-ROM XA ADPCM codec by BERO
  32. * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
  33. * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
  34. * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
  35. * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
  36. * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
  37. * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
  38. * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
  39. *
  40. * Features and limitations:
  41. *
  42. * Reference documents:
  43. * http://www.pcisys.net/~melanson/codecs/simpleaudio.html
  44. * http://www.geocities.com/SiliconValley/8682/aud3.txt
  45. * http://openquicktime.sourceforge.net/plugins.htm
  46. * XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html
  47. * http://www.cs.ucla.edu/~leec/mediabench/applications.html
  48. * SoX source code http://home.sprynet.com/~cbagwell/sox.html
  49. *
  50. * CD-ROM XA:
  51. * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html
  52. * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html
  53. * readstr http://www.geocities.co.jp/Playtown/2004/
  54. */
  55. #define BLKSIZE 1024
  56. /* step_table[] and index_table[] are from the ADPCM reference source */
  57. /* This is the index table: */
  58. static const int index_table[16] = {
  59. -1, -1, -1, -1, 2, 4, 6, 8,
  60. -1, -1, -1, -1, 2, 4, 6, 8,
  61. };
  62. /**
  63. * This is the step table. Note that many programs use slight deviations from
  64. * this table, but such deviations are negligible:
  65. */
  66. static const int step_table[89] = {
  67. 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
  68. 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
  69. 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
  70. 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
  71. 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
  72. 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
  73. 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
  74. 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
  75. 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
  76. };
  77. /* These are for MS-ADPCM */
  78. /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */
  79. static const int AdaptationTable[] = {
  80. 230, 230, 230, 230, 307, 409, 512, 614,
  81. 768, 614, 512, 409, 307, 230, 230, 230
  82. };
  83. /** Divided by 4 to fit in 8-bit integers */
  84. static const uint8_t AdaptCoeff1[] = {
  85. 64, 128, 0, 48, 60, 115, 98
  86. };
  87. /** Divided by 4 to fit in 8-bit integers */
  88. static const int8_t AdaptCoeff2[] = {
  89. 0, -64, 0, 16, 0, -52, -58
  90. };
  91. /* These are for CD-ROM XA ADPCM */
  92. static const int xa_adpcm_table[5][2] = {
  93. { 0, 0 },
  94. { 60, 0 },
  95. { 115, -52 },
  96. { 98, -55 },
  97. { 122, -60 }
  98. };
  99. static const int ea_adpcm_table[] = {
  100. 0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
  101. 3, 4, 7, 8, 10, 11, 0, -1, -3, -4
  102. };
  103. // padded to zero where table size is less then 16
  104. static const int swf_index_tables[4][16] = {
  105. /*2*/ { -1, 2 },
  106. /*3*/ { -1, -1, 2, 4 },
  107. /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
  108. /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
  109. };
  110. static const int yamaha_indexscale[] = {
  111. 230, 230, 230, 230, 307, 409, 512, 614,
  112. 230, 230, 230, 230, 307, 409, 512, 614
  113. };
  114. static const int yamaha_difflookup[] = {
  115. 1, 3, 5, 7, 9, 11, 13, 15,
  116. -1, -3, -5, -7, -9, -11, -13, -15
  117. };
  118. /* end of tables */
  119. typedef struct ADPCMChannelStatus {
  120. int predictor;
  121. short int step_index;
  122. int step;
  123. /* for encoding */
  124. int prev_sample;
  125. /* MS version */
  126. short sample1;
  127. short sample2;
  128. int coeff1;
  129. int coeff2;
  130. int idelta;
  131. } ADPCMChannelStatus;
  132. typedef struct TrellisPath {
  133. int nibble;
  134. int prev;
  135. } TrellisPath;
  136. typedef struct TrellisNode {
  137. uint32_t ssd;
  138. int path;
  139. int sample1;
  140. int sample2;
  141. int step;
  142. } TrellisNode;
  143. typedef struct ADPCMContext {
  144. ADPCMChannelStatus status[6];
  145. TrellisPath *paths;
  146. TrellisNode *node_buf;
  147. TrellisNode **nodep_buf;
  148. uint8_t *trellis_hash;
  149. } ADPCMContext;
  150. #define FREEZE_INTERVAL 128
  151. /* XXX: implement encoding */
  152. #if CONFIG_ENCODERS
  153. static av_cold int adpcm_encode_init(AVCodecContext *avctx)
  154. {
  155. ADPCMContext *s = avctx->priv_data;
  156. uint8_t *extradata;
  157. int i;
  158. if (avctx->channels > 2)
  159. return -1; /* only stereo or mono =) */
  160. if(avctx->trellis && (unsigned)avctx->trellis > 16U){
  161. av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
  162. return -1;
  163. }
  164. if (avctx->trellis) {
  165. int frontier = 1 << avctx->trellis;
  166. int max_paths = frontier * FREEZE_INTERVAL;
  167. FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error);
  168. FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error);
  169. FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error);
  170. FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error);
  171. }
  172. switch(avctx->codec->id) {
  173. case CODEC_ID_ADPCM_IMA_WAV:
  174. avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */
  175. /* and we have 4 bytes per channel overhead */
  176. avctx->block_align = BLKSIZE;
  177. /* seems frame_size isn't taken into account... have to buffer the samples :-( */
  178. break;
  179. case CODEC_ID_ADPCM_IMA_QT:
  180. avctx->frame_size = 64;
  181. avctx->block_align = 34 * avctx->channels;
  182. break;
  183. case CODEC_ID_ADPCM_MS:
  184. avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */
  185. /* and we have 7 bytes per channel overhead */
  186. avctx->block_align = BLKSIZE;
  187. avctx->extradata_size = 32;
  188. extradata = avctx->extradata = av_malloc(avctx->extradata_size);
  189. if (!extradata)
  190. return AVERROR(ENOMEM);
  191. bytestream_put_le16(&extradata, avctx->frame_size);
  192. bytestream_put_le16(&extradata, 7); /* wNumCoef */
  193. for (i = 0; i < 7; i++) {
  194. bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4);
  195. bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4);
  196. }
  197. break;
  198. case CODEC_ID_ADPCM_YAMAHA:
  199. avctx->frame_size = BLKSIZE * avctx->channels;
  200. avctx->block_align = BLKSIZE;
  201. break;
  202. case CODEC_ID_ADPCM_SWF:
  203. if (avctx->sample_rate != 11025 &&
  204. avctx->sample_rate != 22050 &&
  205. avctx->sample_rate != 44100) {
  206. av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
  207. goto error;
  208. }
  209. avctx->frame_size = 512 * (avctx->sample_rate / 11025);
  210. break;
  211. default:
  212. goto error;
  213. }
  214. avctx->coded_frame= avcodec_alloc_frame();
  215. avctx->coded_frame->key_frame= 1;
  216. return 0;
  217. error:
  218. av_freep(&s->paths);
  219. av_freep(&s->node_buf);
  220. av_freep(&s->nodep_buf);
  221. av_freep(&s->trellis_hash);
  222. return -1;
  223. }
  224. static av_cold int adpcm_encode_close(AVCodecContext *avctx)
  225. {
  226. ADPCMContext *s = avctx->priv_data;
  227. av_freep(&avctx->coded_frame);
  228. av_freep(&s->paths);
  229. av_freep(&s->node_buf);
  230. av_freep(&s->nodep_buf);
  231. av_freep(&s->trellis_hash);
  232. return 0;
  233. }
  234. static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
  235. {
  236. int delta = sample - c->prev_sample;
  237. int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
  238. c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
  239. c->prev_sample = av_clip_int16(c->prev_sample);
  240. c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
  241. return nibble;
  242. }
  243. static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
  244. {
  245. int predictor, nibble, bias;
  246. predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
  247. nibble= sample - predictor;
  248. if(nibble>=0) bias= c->idelta/2;
  249. else bias=-c->idelta/2;
  250. nibble= (nibble + bias) / c->idelta;
  251. nibble= av_clip(nibble, -8, 7)&0x0F;
  252. predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
  253. c->sample2 = c->sample1;
  254. c->sample1 = av_clip_int16(predictor);
  255. c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
  256. if (c->idelta < 16) c->idelta = 16;
  257. return nibble;
  258. }
  259. static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
  260. {
  261. int nibble, delta;
  262. if(!c->step) {
  263. c->predictor = 0;
  264. c->step = 127;
  265. }
  266. delta = sample - c->predictor;
  267. nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
  268. c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
  269. c->predictor = av_clip_int16(c->predictor);
  270. c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
  271. c->step = av_clip(c->step, 127, 24567);
  272. return nibble;
  273. }
  274. static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
  275. uint8_t *dst, ADPCMChannelStatus *c, int n)
  276. {
  277. //FIXME 6% faster if frontier is a compile-time constant
  278. ADPCMContext *s = avctx->priv_data;
  279. const int frontier = 1 << avctx->trellis;
  280. const int stride = avctx->channels;
  281. const int version = avctx->codec->id;
  282. TrellisPath *paths = s->paths, *p;
  283. TrellisNode *node_buf = s->node_buf;
  284. TrellisNode **nodep_buf = s->nodep_buf;
  285. TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
  286. TrellisNode **nodes_next = nodep_buf + frontier;
  287. int pathn = 0, froze = -1, i, j, k, generation = 0;
  288. uint8_t *hash = s->trellis_hash;
  289. memset(hash, 0xff, 65536 * sizeof(*hash));
  290. memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
  291. nodes[0] = node_buf + frontier;
  292. nodes[0]->ssd = 0;
  293. nodes[0]->path = 0;
  294. nodes[0]->step = c->step_index;
  295. nodes[0]->sample1 = c->sample1;
  296. nodes[0]->sample2 = c->sample2;
  297. if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF))
  298. nodes[0]->sample1 = c->prev_sample;
  299. if(version == CODEC_ID_ADPCM_MS)
  300. nodes[0]->step = c->idelta;
  301. if(version == CODEC_ID_ADPCM_YAMAHA) {
  302. if(c->step == 0) {
  303. nodes[0]->step = 127;
  304. nodes[0]->sample1 = 0;
  305. } else {
  306. nodes[0]->step = c->step;
  307. nodes[0]->sample1 = c->predictor;
  308. }
  309. }
  310. for(i=0; i<n; i++) {
  311. TrellisNode *t = node_buf + frontier*(i&1);
  312. TrellisNode **u;
  313. int sample = samples[i*stride];
  314. int heap_pos = 0;
  315. memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
  316. for(j=0; j<frontier && nodes[j]; j++) {
  317. // higher j have higher ssd already, so they're likely to yield a suboptimal next sample too
  318. const int range = (j < frontier/2) ? 1 : 0;
  319. const int step = nodes[j]->step;
  320. int nidx;
  321. if(version == CODEC_ID_ADPCM_MS) {
  322. const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64;
  323. const int div = (sample - predictor) / step;
  324. const int nmin = av_clip(div-range, -8, 6);
  325. const int nmax = av_clip(div+range, -7, 7);
  326. for(nidx=nmin; nidx<=nmax; nidx++) {
  327. const int nibble = nidx & 0xf;
  328. int dec_sample = predictor + nidx * step;
  329. #define STORE_NODE(NAME, STEP_INDEX)\
  330. int d;\
  331. uint32_t ssd;\
  332. int pos;\
  333. TrellisNode *u;\
  334. uint8_t *h;\
  335. dec_sample = av_clip_int16(dec_sample);\
  336. d = sample - dec_sample;\
  337. ssd = nodes[j]->ssd + d*d;\
  338. /* Collapse any two states with the same previous sample value. \
  339. * One could also distinguish states by step and by 2nd to last
  340. * sample, but the effects of that are negligible.
  341. * Since nodes in the previous generation are iterated
  342. * through a heap, they're roughly ordered from better to
  343. * worse, but not strictly ordered. Therefore, an earlier
  344. * node with the same sample value is better in most cases
  345. * (and thus the current is skipped), but not strictly
  346. * in all cases. Only skipping samples where ssd >=
  347. * ssd of the earlier node with the same sample gives
  348. * slightly worse quality, though, for some reason. */ \
  349. h = &hash[(uint16_t) dec_sample];\
  350. if (*h == generation)\
  351. goto next_##NAME;\
  352. if (heap_pos < frontier) {\
  353. pos = heap_pos++;\
  354. } else {\
  355. /* Try to replace one of the leaf nodes with the new \
  356. * one, but try a different slot each time. */\
  357. pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\
  358. if (ssd > nodes_next[pos]->ssd)\
  359. goto next_##NAME;\
  360. heap_pos++;\
  361. }\
  362. *h = generation;\
  363. u = nodes_next[pos];\
  364. if(!u) {\
  365. assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\
  366. u = t++;\
  367. nodes_next[pos] = u;\
  368. u->path = pathn++;\
  369. }\
  370. u->ssd = ssd;\
  371. u->step = STEP_INDEX;\
  372. u->sample2 = nodes[j]->sample1;\
  373. u->sample1 = dec_sample;\
  374. paths[u->path].nibble = nibble;\
  375. paths[u->path].prev = nodes[j]->path;\
  376. /* Sift the newly inserted node down in the heap to \
  377. * restore the heap property. */\
  378. while (pos > 0) {\
  379. int parent = (pos - 1) >> 1;\
  380. if (nodes_next[parent]->ssd <= ssd)\
  381. break;\
  382. FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
  383. pos = parent;\
  384. }\
  385. next_##NAME:;
  386. STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8));
  387. }
  388. } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) {
  389. #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
  390. const int predictor = nodes[j]->sample1;\
  391. const int div = (sample - predictor) * 4 / STEP_TABLE;\
  392. int nmin = av_clip(div-range, -7, 6);\
  393. int nmax = av_clip(div+range, -6, 7);\
  394. if(nmin<=0) nmin--; /* distinguish -0 from +0 */\
  395. if(nmax<0) nmax--;\
  396. for(nidx=nmin; nidx<=nmax; nidx++) {\
  397. const int nibble = nidx<0 ? 7-nidx : nidx;\
  398. int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
  399. STORE_NODE(NAME, STEP_INDEX);\
  400. }
  401. LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
  402. } else { //CODEC_ID_ADPCM_YAMAHA
  403. LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
  404. #undef LOOP_NODES
  405. #undef STORE_NODE
  406. }
  407. }
  408. u = nodes;
  409. nodes = nodes_next;
  410. nodes_next = u;
  411. generation++;
  412. if (generation == 255) {
  413. memset(hash, 0xff, 65536 * sizeof(*hash));
  414. generation = 0;
  415. }
  416. // prevent overflow
  417. if(nodes[0]->ssd > (1<<28)) {
  418. for(j=1; j<frontier && nodes[j]; j++)
  419. nodes[j]->ssd -= nodes[0]->ssd;
  420. nodes[0]->ssd = 0;
  421. }
  422. // merge old paths to save memory
  423. if(i == froze + FREEZE_INTERVAL) {
  424. p = &paths[nodes[0]->path];
  425. for(k=i; k>froze; k--) {
  426. dst[k] = p->nibble;
  427. p = &paths[p->prev];
  428. }
  429. froze = i;
  430. pathn = 0;
  431. // other nodes might use paths that don't coincide with the frozen one.
  432. // checking which nodes do so is too slow, so just kill them all.
  433. // this also slightly improves quality, but I don't know why.
  434. memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
  435. }
  436. }
  437. p = &paths[nodes[0]->path];
  438. for(i=n-1; i>froze; i--) {
  439. dst[i] = p->nibble;
  440. p = &paths[p->prev];
  441. }
  442. c->predictor = nodes[0]->sample1;
  443. c->sample1 = nodes[0]->sample1;
  444. c->sample2 = nodes[0]->sample2;
  445. c->step_index = nodes[0]->step;
  446. c->step = nodes[0]->step;
  447. c->idelta = nodes[0]->step;
  448. }
  449. static int adpcm_encode_frame(AVCodecContext *avctx,
  450. unsigned char *frame, int buf_size, void *data)
  451. {
  452. int n, i, st;
  453. short *samples;
  454. unsigned char *dst;
  455. ADPCMContext *c = avctx->priv_data;
  456. uint8_t *buf;
  457. dst = frame;
  458. samples = (short *)data;
  459. st= avctx->channels == 2;
  460. /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
  461. switch(avctx->codec->id) {
  462. case CODEC_ID_ADPCM_IMA_WAV:
  463. n = avctx->frame_size / 8;
  464. c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
  465. /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */
  466. bytestream_put_le16(&dst, c->status[0].prev_sample);
  467. *dst++ = (unsigned char)c->status[0].step_index;
  468. *dst++ = 0; /* unknown */
  469. samples++;
  470. if (avctx->channels == 2) {
  471. c->status[1].prev_sample = (signed short)samples[0];
  472. /* c->status[1].step_index = 0; */
  473. bytestream_put_le16(&dst, c->status[1].prev_sample);
  474. *dst++ = (unsigned char)c->status[1].step_index;
  475. *dst++ = 0;
  476. samples++;
  477. }
  478. /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */
  479. if(avctx->trellis > 0) {
  480. FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error);
  481. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8);
  482. if(avctx->channels == 2)
  483. adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8);
  484. for(i=0; i<n; i++) {
  485. *dst++ = buf[8*i+0] | (buf[8*i+1] << 4);
  486. *dst++ = buf[8*i+2] | (buf[8*i+3] << 4);
  487. *dst++ = buf[8*i+4] | (buf[8*i+5] << 4);
  488. *dst++ = buf[8*i+6] | (buf[8*i+7] << 4);
  489. if (avctx->channels == 2) {
  490. uint8_t *buf1 = buf + n*8;
  491. *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4);
  492. *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4);
  493. *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4);
  494. *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4);
  495. }
  496. }
  497. av_free(buf);
  498. } else
  499. for (; n>0; n--) {
  500. *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
  501. *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
  502. dst++;
  503. *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
  504. *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
  505. dst++;
  506. *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
  507. *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
  508. dst++;
  509. *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
  510. *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
  511. dst++;
  512. /* right channel */
  513. if (avctx->channels == 2) {
  514. *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
  515. *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
  516. dst++;
  517. *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
  518. *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
  519. dst++;
  520. *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
  521. *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
  522. dst++;
  523. *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
  524. *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
  525. dst++;
  526. }
  527. samples += 8 * avctx->channels;
  528. }
  529. break;
  530. case CODEC_ID_ADPCM_IMA_QT:
  531. {
  532. int ch, i;
  533. PutBitContext pb;
  534. init_put_bits(&pb, dst, buf_size*8);
  535. for(ch=0; ch<avctx->channels; ch++){
  536. put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
  537. put_bits(&pb, 7, c->status[ch].step_index);
  538. if(avctx->trellis > 0) {
  539. uint8_t buf[64];
  540. adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
  541. for(i=0; i<64; i++)
  542. put_bits(&pb, 4, buf[i^1]);
  543. c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F;
  544. } else {
  545. for (i=0; i<64; i+=2){
  546. int t1, t2;
  547. t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
  548. t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
  549. put_bits(&pb, 4, t2);
  550. put_bits(&pb, 4, t1);
  551. }
  552. c->status[ch].prev_sample &= ~0x7F;
  553. }
  554. }
  555. flush_put_bits(&pb);
  556. dst += put_bits_count(&pb)>>3;
  557. break;
  558. }
  559. case CODEC_ID_ADPCM_SWF:
  560. {
  561. int i;
  562. PutBitContext pb;
  563. init_put_bits(&pb, dst, buf_size*8);
  564. n = avctx->frame_size-1;
  565. //Store AdpcmCodeSize
  566. put_bits(&pb, 2, 2); //Set 4bits flash adpcm format
  567. //Init the encoder state
  568. for(i=0; i<avctx->channels; i++){
  569. c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits
  570. put_sbits(&pb, 16, samples[i]);
  571. put_bits(&pb, 6, c->status[i].step_index);
  572. c->status[i].prev_sample = (signed short)samples[i];
  573. }
  574. if(avctx->trellis > 0) {
  575. FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error);
  576. adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n);
  577. if (avctx->channels == 2)
  578. adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n);
  579. for(i=0; i<n; i++) {
  580. put_bits(&pb, 4, buf[i]);
  581. if (avctx->channels == 2)
  582. put_bits(&pb, 4, buf[n+i]);
  583. }
  584. av_free(buf);
  585. } else {
  586. for (i=1; i<avctx->frame_size; i++) {
  587. put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
  588. if (avctx->channels == 2)
  589. put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
  590. }
  591. }
  592. flush_put_bits(&pb);
  593. dst += put_bits_count(&pb)>>3;
  594. break;
  595. }
  596. case CODEC_ID_ADPCM_MS:
  597. for(i=0; i<avctx->channels; i++){
  598. int predictor=0;
  599. *dst++ = predictor;
  600. c->status[i].coeff1 = AdaptCoeff1[predictor];
  601. c->status[i].coeff2 = AdaptCoeff2[predictor];
  602. }
  603. for(i=0; i<avctx->channels; i++){
  604. if (c->status[i].idelta < 16)
  605. c->status[i].idelta = 16;
  606. bytestream_put_le16(&dst, c->status[i].idelta);
  607. }
  608. for(i=0; i<avctx->channels; i++){
  609. c->status[i].sample2= *samples++;
  610. }
  611. for(i=0; i<avctx->channels; i++){
  612. c->status[i].sample1= *samples++;
  613. bytestream_put_le16(&dst, c->status[i].sample1);
  614. }
  615. for(i=0; i<avctx->channels; i++)
  616. bytestream_put_le16(&dst, c->status[i].sample2);
  617. if(avctx->trellis > 0) {
  618. int n = avctx->block_align - 7*avctx->channels;
  619. FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error);
  620. if(avctx->channels == 1) {
  621. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  622. for(i=0; i<n; i+=2)
  623. *dst++ = (buf[i] << 4) | buf[i+1];
  624. } else {
  625. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  626. adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n);
  627. for(i=0; i<n; i++)
  628. *dst++ = (buf[i] << 4) | buf[n+i];
  629. }
  630. av_free(buf);
  631. } else
  632. for(i=7*avctx->channels; i<avctx->block_align; i++) {
  633. int nibble;
  634. nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
  635. nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
  636. *dst++ = nibble;
  637. }
  638. break;
  639. case CODEC_ID_ADPCM_YAMAHA:
  640. n = avctx->frame_size / 2;
  641. if(avctx->trellis > 0) {
  642. FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error);
  643. n *= 2;
  644. if(avctx->channels == 1) {
  645. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  646. for(i=0; i<n; i+=2)
  647. *dst++ = buf[i] | (buf[i+1] << 4);
  648. } else {
  649. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  650. adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n);
  651. for(i=0; i<n; i++)
  652. *dst++ = buf[i] | (buf[n+i] << 4);
  653. }
  654. av_free(buf);
  655. } else
  656. for (n *= avctx->channels; n>0; n--) {
  657. int nibble;
  658. nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
  659. nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
  660. *dst++ = nibble;
  661. }
  662. break;
  663. default:
  664. error:
  665. return -1;
  666. }
  667. return dst - frame;
  668. }
  669. #endif //CONFIG_ENCODERS
  670. static av_cold int adpcm_decode_init(AVCodecContext * avctx)
  671. {
  672. ADPCMContext *c = avctx->priv_data;
  673. unsigned int max_channels = 2;
  674. switch(avctx->codec->id) {
  675. case CODEC_ID_ADPCM_EA_R1:
  676. case CODEC_ID_ADPCM_EA_R2:
  677. case CODEC_ID_ADPCM_EA_R3:
  678. max_channels = 6;
  679. break;
  680. }
  681. if(avctx->channels > max_channels){
  682. return -1;
  683. }
  684. switch(avctx->codec->id) {
  685. case CODEC_ID_ADPCM_CT:
  686. c->status[0].step = c->status[1].step = 511;
  687. break;
  688. case CODEC_ID_ADPCM_IMA_WAV:
  689. if (avctx->bits_per_coded_sample != 4) {
  690. av_log(avctx, AV_LOG_ERROR, "Only 4-bit ADPCM IMA WAV files are supported\n");
  691. return -1;
  692. }
  693. break;
  694. case CODEC_ID_ADPCM_IMA_WS:
  695. if (avctx->extradata && avctx->extradata_size == 2 * 4) {
  696. c->status[0].predictor = AV_RL32(avctx->extradata);
  697. c->status[1].predictor = AV_RL32(avctx->extradata + 4);
  698. }
  699. break;
  700. default:
  701. break;
  702. }
  703. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  704. return 0;
  705. }
  706. static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
  707. {
  708. int step_index;
  709. int predictor;
  710. int sign, delta, diff, step;
  711. step = step_table[c->step_index];
  712. step_index = c->step_index + index_table[(unsigned)nibble];
  713. if (step_index < 0) step_index = 0;
  714. else if (step_index > 88) step_index = 88;
  715. sign = nibble & 8;
  716. delta = nibble & 7;
  717. /* perform direct multiplication instead of series of jumps proposed by
  718. * the reference ADPCM implementation since modern CPUs can do the mults
  719. * quickly enough */
  720. diff = ((2 * delta + 1) * step) >> shift;
  721. predictor = c->predictor;
  722. if (sign) predictor -= diff;
  723. else predictor += diff;
  724. c->predictor = av_clip_int16(predictor);
  725. c->step_index = step_index;
  726. return (short)c->predictor;
  727. }
  728. static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
  729. {
  730. int predictor;
  731. predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
  732. predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
  733. c->sample2 = c->sample1;
  734. c->sample1 = av_clip_int16(predictor);
  735. c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
  736. if (c->idelta < 16) c->idelta = 16;
  737. return c->sample1;
  738. }
  739. static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
  740. {
  741. int sign, delta, diff;
  742. int new_step;
  743. sign = nibble & 8;
  744. delta = nibble & 7;
  745. /* perform direct multiplication instead of series of jumps proposed by
  746. * the reference ADPCM implementation since modern CPUs can do the mults
  747. * quickly enough */
  748. diff = ((2 * delta + 1) * c->step) >> 3;
  749. /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
  750. c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
  751. c->predictor = av_clip_int16(c->predictor);
  752. /* calculate new step and clamp it to range 511..32767 */
  753. new_step = (AdaptationTable[nibble & 7] * c->step) >> 8;
  754. c->step = av_clip(new_step, 511, 32767);
  755. return (short)c->predictor;
  756. }
  757. static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
  758. {
  759. int sign, delta, diff;
  760. sign = nibble & (1<<(size-1));
  761. delta = nibble & ((1<<(size-1))-1);
  762. diff = delta << (7 + c->step + shift);
  763. /* clamp result */
  764. c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
  765. /* calculate new step */
  766. if (delta >= (2*size - 3) && c->step < 3)
  767. c->step++;
  768. else if (delta == 0 && c->step > 0)
  769. c->step--;
  770. return (short) c->predictor;
  771. }
  772. static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
  773. {
  774. if(!c->step) {
  775. c->predictor = 0;
  776. c->step = 127;
  777. }
  778. c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
  779. c->predictor = av_clip_int16(c->predictor);
  780. c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
  781. c->step = av_clip(c->step, 127, 24567);
  782. return c->predictor;
  783. }
  784. static void xa_decode(short *out, const unsigned char *in,
  785. ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
  786. {
  787. int i, j;
  788. int shift,filter,f0,f1;
  789. int s_1,s_2;
  790. int d,s,t;
  791. for(i=0;i<4;i++) {
  792. shift = 12 - (in[4+i*2] & 15);
  793. filter = in[4+i*2] >> 4;
  794. f0 = xa_adpcm_table[filter][0];
  795. f1 = xa_adpcm_table[filter][1];
  796. s_1 = left->sample1;
  797. s_2 = left->sample2;
  798. for(j=0;j<28;j++) {
  799. d = in[16+i+j*4];
  800. t = (signed char)(d<<4)>>4;
  801. s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
  802. s_2 = s_1;
  803. s_1 = av_clip_int16(s);
  804. *out = s_1;
  805. out += inc;
  806. }
  807. if (inc==2) { /* stereo */
  808. left->sample1 = s_1;
  809. left->sample2 = s_2;
  810. s_1 = right->sample1;
  811. s_2 = right->sample2;
  812. out = out + 1 - 28*2;
  813. }
  814. shift = 12 - (in[5+i*2] & 15);
  815. filter = in[5+i*2] >> 4;
  816. f0 = xa_adpcm_table[filter][0];
  817. f1 = xa_adpcm_table[filter][1];
  818. for(j=0;j<28;j++) {
  819. d = in[16+i+j*4];
  820. t = (signed char)d >> 4;
  821. s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
  822. s_2 = s_1;
  823. s_1 = av_clip_int16(s);
  824. *out = s_1;
  825. out += inc;
  826. }
  827. if (inc==2) { /* stereo */
  828. right->sample1 = s_1;
  829. right->sample2 = s_2;
  830. out -= 1;
  831. } else {
  832. left->sample1 = s_1;
  833. left->sample2 = s_2;
  834. }
  835. }
  836. }
  837. /* DK3 ADPCM support macro */
  838. #define DK3_GET_NEXT_NIBBLE() \
  839. if (decode_top_nibble_next) \
  840. { \
  841. nibble = last_byte >> 4; \
  842. decode_top_nibble_next = 0; \
  843. } \
  844. else \
  845. { \
  846. last_byte = *src++; \
  847. if (src >= buf + buf_size) break; \
  848. nibble = last_byte & 0x0F; \
  849. decode_top_nibble_next = 1; \
  850. }
  851. static int adpcm_decode_frame(AVCodecContext *avctx,
  852. void *data, int *data_size,
  853. AVPacket *avpkt)
  854. {
  855. const uint8_t *buf = avpkt->data;
  856. int buf_size = avpkt->size;
  857. ADPCMContext *c = avctx->priv_data;
  858. ADPCMChannelStatus *cs;
  859. int n, m, channel, i;
  860. int block_predictor[2];
  861. short *samples;
  862. short *samples_end;
  863. const uint8_t *src;
  864. int st; /* stereo */
  865. /* DK3 ADPCM accounting variables */
  866. unsigned char last_byte = 0;
  867. unsigned char nibble;
  868. int decode_top_nibble_next = 0;
  869. int diff_channel;
  870. /* EA ADPCM state variables */
  871. uint32_t samples_in_chunk;
  872. int32_t previous_left_sample, previous_right_sample;
  873. int32_t current_left_sample, current_right_sample;
  874. int32_t next_left_sample, next_right_sample;
  875. int32_t coeff1l, coeff2l, coeff1r, coeff2r;
  876. uint8_t shift_left, shift_right;
  877. int count1, count2;
  878. int coeff[2][2], shift[2];//used in EA MAXIS ADPCM
  879. if (!buf_size)
  880. return 0;
  881. //should protect all 4bit ADPCM variants
  882. //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels
  883. //
  884. if(*data_size/4 < buf_size + 8)
  885. return -1;
  886. samples = data;
  887. samples_end= samples + *data_size/2;
  888. *data_size= 0;
  889. src = buf;
  890. st = avctx->channels == 2 ? 1 : 0;
  891. switch(avctx->codec->id) {
  892. case CODEC_ID_ADPCM_IMA_QT:
  893. n = buf_size - 2*avctx->channels;
  894. for (channel = 0; channel < avctx->channels; channel++) {
  895. cs = &(c->status[channel]);
  896. /* (pppppp) (piiiiiii) */
  897. /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
  898. cs->predictor = (*src++) << 8;
  899. cs->predictor |= (*src & 0x80);
  900. cs->predictor &= 0xFF80;
  901. /* sign extension */
  902. if(cs->predictor & 0x8000)
  903. cs->predictor -= 0x10000;
  904. cs->predictor = av_clip_int16(cs->predictor);
  905. cs->step_index = (*src++) & 0x7F;
  906. if (cs->step_index > 88){
  907. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
  908. cs->step_index = 88;
  909. }
  910. cs->step = step_table[cs->step_index];
  911. samples = (short*)data + channel;
  912. for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */
  913. *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
  914. samples += avctx->channels;
  915. *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3);
  916. samples += avctx->channels;
  917. src ++;
  918. }
  919. }
  920. if (st)
  921. samples--;
  922. break;
  923. case CODEC_ID_ADPCM_IMA_WAV:
  924. if (avctx->block_align != 0 && buf_size > avctx->block_align)
  925. buf_size = avctx->block_align;
  926. // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1;
  927. for(i=0; i<avctx->channels; i++){
  928. cs = &(c->status[i]);
  929. cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
  930. cs->step_index = *src++;
  931. if (cs->step_index > 88){
  932. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
  933. cs->step_index = 88;
  934. }
  935. if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */
  936. }
  937. while(src < buf + buf_size){
  938. for(m=0; m<4; m++){
  939. for(i=0; i<=st; i++)
  940. *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
  941. for(i=0; i<=st; i++)
  942. *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
  943. src++;
  944. }
  945. src += 4*st;
  946. }
  947. break;
  948. case CODEC_ID_ADPCM_4XM:
  949. cs = &(c->status[0]);
  950. c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
  951. if(st){
  952. c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
  953. }
  954. c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
  955. if(st){
  956. c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
  957. }
  958. if (cs->step_index < 0) cs->step_index = 0;
  959. if (cs->step_index > 88) cs->step_index = 88;
  960. m= (buf_size - (src - buf))>>st;
  961. for(i=0; i<m; i++) {
  962. *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
  963. if (st)
  964. *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
  965. *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
  966. if (st)
  967. *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
  968. }
  969. src += m<<st;
  970. break;
  971. case CODEC_ID_ADPCM_MS:
  972. if (avctx->block_align != 0 && buf_size > avctx->block_align)
  973. buf_size = avctx->block_align;
  974. n = buf_size - 7 * avctx->channels;
  975. if (n < 0)
  976. return -1;
  977. block_predictor[0] = av_clip(*src++, 0, 6);
  978. block_predictor[1] = 0;
  979. if (st)
  980. block_predictor[1] = av_clip(*src++, 0, 6);
  981. c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
  982. if (st){
  983. c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
  984. }
  985. c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]];
  986. c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]];
  987. c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]];
  988. c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]];
  989. c->status[0].sample1 = bytestream_get_le16(&src);
  990. if (st) c->status[1].sample1 = bytestream_get_le16(&src);
  991. c->status[0].sample2 = bytestream_get_le16(&src);
  992. if (st) c->status[1].sample2 = bytestream_get_le16(&src);
  993. *samples++ = c->status[0].sample2;
  994. if (st) *samples++ = c->status[1].sample2;
  995. *samples++ = c->status[0].sample1;
  996. if (st) *samples++ = c->status[1].sample1;
  997. for(;n>0;n--) {
  998. *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
  999. *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
  1000. src ++;
  1001. }
  1002. break;
  1003. case CODEC_ID_ADPCM_IMA_DK4:
  1004. if (avctx->block_align != 0 && buf_size > avctx->block_align)
  1005. buf_size = avctx->block_align;
  1006. c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
  1007. c->status[0].step_index = *src++;
  1008. src++;
  1009. *samples++ = c->status[0].predictor;
  1010. if (st) {
  1011. c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
  1012. c->status[1].step_index = *src++;
  1013. src++;
  1014. *samples++ = c->status[1].predictor;
  1015. }
  1016. while (src < buf + buf_size) {
  1017. /* take care of the top nibble (always left or mono channel) */
  1018. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1019. src[0] >> 4, 3);
  1020. /* take care of the bottom nibble, which is right sample for
  1021. * stereo, or another mono sample */
  1022. if (st)
  1023. *samples++ = adpcm_ima_expand_nibble(&c->status[1],
  1024. src[0] & 0x0F, 3);
  1025. else
  1026. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1027. src[0] & 0x0F, 3);
  1028. src++;
  1029. }
  1030. break;
  1031. case CODEC_ID_ADPCM_IMA_DK3:
  1032. if (avctx->block_align != 0 && buf_size > avctx->block_align)
  1033. buf_size = avctx->block_align;
  1034. if(buf_size + 16 > (samples_end - samples)*3/8)
  1035. return -1;
  1036. c->status[0].predictor = (int16_t)AV_RL16(src + 10);
  1037. c->status[1].predictor = (int16_t)AV_RL16(src + 12);
  1038. c->status[0].step_index = src[14];
  1039. c->status[1].step_index = src[15];
  1040. /* sign extend the predictors */
  1041. src += 16;
  1042. diff_channel = c->status[1].predictor;
  1043. /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when
  1044. * the buffer is consumed */
  1045. while (1) {
  1046. /* for this algorithm, c->status[0] is the sum channel and
  1047. * c->status[1] is the diff channel */
  1048. /* process the first predictor of the sum channel */
  1049. DK3_GET_NEXT_NIBBLE();
  1050. adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
  1051. /* process the diff channel predictor */
  1052. DK3_GET_NEXT_NIBBLE();
  1053. adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
  1054. /* process the first pair of stereo PCM samples */
  1055. diff_channel = (diff_channel + c->status[1].predictor) / 2;
  1056. *samples++ = c->status[0].predictor + c->status[1].predictor;
  1057. *samples++ = c->status[0].predictor - c->status[1].predictor;
  1058. /* process the second predictor of the sum channel */
  1059. DK3_GET_NEXT_NIBBLE();
  1060. adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
  1061. /* process the second pair of stereo PCM samples */
  1062. diff_channel = (diff_channel + c->status[1].predictor) / 2;
  1063. *samples++ = c->status[0].predictor + c->status[1].predictor;
  1064. *samples++ = c->status[0].predictor - c->status[1].predictor;
  1065. }
  1066. break;
  1067. case CODEC_ID_ADPCM_IMA_ISS:
  1068. c->status[0].predictor = (int16_t)AV_RL16(src + 0);
  1069. c->status[0].step_index = src[2];
  1070. src += 4;
  1071. if(st) {
  1072. c->status[1].predictor = (int16_t)AV_RL16(src + 0);
  1073. c->status[1].step_index = src[2];
  1074. src += 4;
  1075. }
  1076. while (src < buf + buf_size) {
  1077. if (st) {
  1078. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1079. src[0] >> 4 , 3);
  1080. *samples++ = adpcm_ima_expand_nibble(&c->status[1],
  1081. src[0] & 0x0F, 3);
  1082. } else {
  1083. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1084. src[0] & 0x0F, 3);
  1085. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1086. src[0] >> 4 , 3);
  1087. }
  1088. src++;
  1089. }
  1090. break;
  1091. case CODEC_ID_ADPCM_IMA_WS:
  1092. /* no per-block initialization; just start decoding the data */
  1093. while (src < buf + buf_size) {
  1094. if (st) {
  1095. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1096. src[0] >> 4 , 3);
  1097. *samples++ = adpcm_ima_expand_nibble(&c->status[1],
  1098. src[0] & 0x0F, 3);
  1099. } else {
  1100. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1101. src[0] >> 4 , 3);
  1102. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1103. src[0] & 0x0F, 3);
  1104. }
  1105. src++;
  1106. }
  1107. break;
  1108. case CODEC_ID_ADPCM_XA:
  1109. while (buf_size >= 128) {
  1110. xa_decode(samples, src, &c->status[0], &c->status[1],
  1111. avctx->channels);
  1112. src += 128;
  1113. samples += 28 * 8;
  1114. buf_size -= 128;
  1115. }
  1116. break;
  1117. case CODEC_ID_ADPCM_IMA_EA_EACS:
  1118. samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
  1119. if (samples_in_chunk > buf_size-4-(8<<st)) {
  1120. src += buf_size - 4;
  1121. break;
  1122. }
  1123. for (i=0; i<=st; i++)
  1124. c->status[i].step_index = bytestream_get_le32(&src);
  1125. for (i=0; i<=st; i++)
  1126. c->status[i].predictor = bytestream_get_le32(&src);
  1127. for (; samples_in_chunk; samples_in_chunk--, src++) {
  1128. *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
  1129. *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
  1130. }
  1131. break;
  1132. case CODEC_ID_ADPCM_IMA_EA_SEAD:
  1133. for (; src < buf+buf_size; src++) {
  1134. *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
  1135. *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
  1136. }
  1137. break;
  1138. case CODEC_ID_ADPCM_EA:
  1139. if (buf_size < 4 || AV_RL32(src) >= ((buf_size - 12) * 2)) {
  1140. src += buf_size;
  1141. break;
  1142. }
  1143. samples_in_chunk = AV_RL32(src);
  1144. src += 4;
  1145. current_left_sample = (int16_t)bytestream_get_le16(&src);
  1146. previous_left_sample = (int16_t)bytestream_get_le16(&src);
  1147. current_right_sample = (int16_t)bytestream_get_le16(&src);
  1148. previous_right_sample = (int16_t)bytestream_get_le16(&src);
  1149. for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
  1150. coeff1l = ea_adpcm_table[ *src >> 4 ];
  1151. coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
  1152. coeff1r = ea_adpcm_table[*src & 0x0F];
  1153. coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
  1154. src++;
  1155. shift_left = (*src >> 4 ) + 8;
  1156. shift_right = (*src & 0x0F) + 8;
  1157. src++;
  1158. for (count2 = 0; count2 < 28; count2++) {
  1159. next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left;
  1160. next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right;
  1161. src++;
  1162. next_left_sample = (next_left_sample +
  1163. (current_left_sample * coeff1l) +
  1164. (previous_left_sample * coeff2l) + 0x80) >> 8;
  1165. next_right_sample = (next_right_sample +
  1166. (current_right_sample * coeff1r) +
  1167. (previous_right_sample * coeff2r) + 0x80) >> 8;
  1168. previous_left_sample = current_left_sample;
  1169. current_left_sample = av_clip_int16(next_left_sample);
  1170. previous_right_sample = current_right_sample;
  1171. current_right_sample = av_clip_int16(next_right_sample);
  1172. *samples++ = (unsigned short)current_left_sample;
  1173. *samples++ = (unsigned short)current_right_sample;
  1174. }
  1175. }
  1176. if (src - buf == buf_size - 2)
  1177. src += 2; // Skip terminating 0x0000
  1178. break;
  1179. case CODEC_ID_ADPCM_EA_MAXIS_XA:
  1180. for(channel = 0; channel < avctx->channels; channel++) {
  1181. for (i=0; i<2; i++)
  1182. coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
  1183. shift[channel] = (*src & 0x0F) + 8;
  1184. src++;
  1185. }
  1186. for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) {
  1187. for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
  1188. for(channel = 0; channel < avctx->channels; channel++) {
  1189. int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel];
  1190. sample = (sample +
  1191. c->status[channel].sample1 * coeff[channel][0] +
  1192. c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
  1193. c->status[channel].sample2 = c->status[channel].sample1;
  1194. c->status[channel].sample1 = av_clip_int16(sample);
  1195. *samples++ = c->status[channel].sample1;
  1196. }
  1197. }
  1198. src+=avctx->channels;
  1199. }
  1200. break;
  1201. case CODEC_ID_ADPCM_EA_R1:
  1202. case CODEC_ID_ADPCM_EA_R2:
  1203. case CODEC_ID_ADPCM_EA_R3: {
  1204. /* channel numbering
  1205. 2chan: 0=fl, 1=fr
  1206. 4chan: 0=fl, 1=rl, 2=fr, 3=rr
  1207. 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
  1208. const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
  1209. int32_t previous_sample, current_sample, next_sample;
  1210. int32_t coeff1, coeff2;
  1211. uint8_t shift;
  1212. unsigned int channel;
  1213. uint16_t *samplesC;
  1214. const uint8_t *srcC;
  1215. const uint8_t *src_end = buf + buf_size;
  1216. samples_in_chunk = (big_endian ? bytestream_get_be32(&src)
  1217. : bytestream_get_le32(&src)) / 28;
  1218. if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) ||
  1219. 28*samples_in_chunk*avctx->channels > samples_end-samples) {
  1220. src += buf_size - 4;
  1221. break;
  1222. }
  1223. for (channel=0; channel<avctx->channels; channel++) {
  1224. int32_t offset = (big_endian ? bytestream_get_be32(&src)
  1225. : bytestream_get_le32(&src))
  1226. + (avctx->channels-channel-1) * 4;
  1227. if ((offset < 0) || (offset >= src_end - src - 4)) break;
  1228. srcC = src + offset;
  1229. samplesC = samples + channel;
  1230. if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
  1231. current_sample = (int16_t)bytestream_get_le16(&srcC);
  1232. previous_sample = (int16_t)bytestream_get_le16(&srcC);
  1233. } else {
  1234. current_sample = c->status[channel].predictor;
  1235. previous_sample = c->status[channel].prev_sample;
  1236. }
  1237. for (count1=0; count1<samples_in_chunk; count1++) {
  1238. if (*srcC == 0xEE) { /* only seen in R2 and R3 */
  1239. srcC++;
  1240. if (srcC > src_end - 30*2) break;
  1241. current_sample = (int16_t)bytestream_get_be16(&srcC);
  1242. previous_sample = (int16_t)bytestream_get_be16(&srcC);
  1243. for (count2=0; count2<28; count2++) {
  1244. *samplesC = (int16_t)bytestream_get_be16(&srcC);
  1245. samplesC += avctx->channels;
  1246. }
  1247. } else {
  1248. coeff1 = ea_adpcm_table[ *srcC>>4 ];
  1249. coeff2 = ea_adpcm_table[(*srcC>>4) + 4];
  1250. shift = (*srcC++ & 0x0F) + 8;
  1251. if (srcC > src_end - 14) break;
  1252. for (count2=0; count2<28; count2++) {
  1253. if (count2 & 1)
  1254. next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift;
  1255. else
  1256. next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift;
  1257. next_sample += (current_sample * coeff1) +
  1258. (previous_sample * coeff2);
  1259. next_sample = av_clip_int16(next_sample >> 8);
  1260. previous_sample = current_sample;
  1261. current_sample = next_sample;
  1262. *samplesC = current_sample;
  1263. samplesC += avctx->channels;
  1264. }
  1265. }
  1266. }
  1267. if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
  1268. c->status[channel].predictor = current_sample;
  1269. c->status[channel].prev_sample = previous_sample;
  1270. }
  1271. }
  1272. src = src + buf_size - (4 + 4*avctx->channels);
  1273. samples += 28 * samples_in_chunk * avctx->channels;
  1274. break;
  1275. }
  1276. case CODEC_ID_ADPCM_EA_XAS:
  1277. if (samples_end-samples < 32*4*avctx->channels
  1278. || buf_size < (4+15)*4*avctx->channels) {
  1279. src += buf_size;
  1280. break;
  1281. }
  1282. for (channel=0; channel<avctx->channels; channel++) {
  1283. int coeff[2][4], shift[4];
  1284. short *s2, *s = &samples[channel];
  1285. for (n=0; n<4; n++, s+=32*avctx->channels) {
  1286. for (i=0; i<2; i++)
  1287. coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
  1288. shift[n] = (src[2]&0x0F) + 8;
  1289. for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
  1290. s2[0] = (src[0]&0xF0) + (src[1]<<8);
  1291. }
  1292. for (m=2; m<32; m+=2) {
  1293. s = &samples[m*avctx->channels + channel];
  1294. for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
  1295. for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
  1296. int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n];
  1297. int pred = s2[-1*avctx->channels] * coeff[0][n]
  1298. + s2[-2*avctx->channels] * coeff[1][n];
  1299. s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
  1300. }
  1301. }
  1302. }
  1303. }
  1304. samples += 32*4*avctx->channels;
  1305. break;
  1306. case CODEC_ID_ADPCM_IMA_AMV:
  1307. case CODEC_ID_ADPCM_IMA_SMJPEG:
  1308. c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
  1309. c->status[0].step_index = bytestream_get_le16(&src);
  1310. if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
  1311. src+=4;
  1312. while (src < buf + buf_size) {
  1313. char hi, lo;
  1314. lo = *src & 0x0F;
  1315. hi = *src >> 4;
  1316. if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
  1317. FFSWAP(char, hi, lo);
  1318. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1319. lo, 3);
  1320. *samples++ = adpcm_ima_expand_nibble(&c->status[0],
  1321. hi, 3);
  1322. src++;
  1323. }
  1324. break;
  1325. case CODEC_ID_ADPCM_CT:
  1326. while (src < buf + buf_size) {
  1327. if (st) {
  1328. *samples++ = adpcm_ct_expand_nibble(&c->status[0],
  1329. src[0] >> 4);
  1330. *samples++ = adpcm_ct_expand_nibble(&c->status[1],
  1331. src[0] & 0x0F);
  1332. } else {
  1333. *samples++ = adpcm_ct_expand_nibble(&c->status[0],
  1334. src[0] >> 4);
  1335. *samples++ = adpcm_ct_expand_nibble(&c->status[0],
  1336. src[0] & 0x0F);
  1337. }
  1338. src++;
  1339. }
  1340. break;
  1341. case CODEC_ID_ADPCM_SBPRO_4:
  1342. case CODEC_ID_ADPCM_SBPRO_3:
  1343. case CODEC_ID_ADPCM_SBPRO_2:
  1344. if (!c->status[0].step_index) {
  1345. /* the first byte is a raw sample */
  1346. *samples++ = 128 * (*src++ - 0x80);
  1347. if (st)
  1348. *samples++ = 128 * (*src++ - 0x80);
  1349. c->status[0].step_index = 1;
  1350. }
  1351. if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
  1352. while (src < buf + buf_size) {
  1353. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1354. src[0] >> 4, 4, 0);
  1355. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1356. src[0] & 0x0F, 4, 0);
  1357. src++;
  1358. }
  1359. } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
  1360. while (src < buf + buf_size && samples + 2 < samples_end) {
  1361. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1362. src[0] >> 5 , 3, 0);
  1363. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1364. (src[0] >> 2) & 0x07, 3, 0);
  1365. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1366. src[0] & 0x03, 2, 0);
  1367. src++;
  1368. }
  1369. } else {
  1370. while (src < buf + buf_size && samples + 3 < samples_end) {
  1371. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1372. src[0] >> 6 , 2, 2);
  1373. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1374. (src[0] >> 4) & 0x03, 2, 2);
  1375. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1376. (src[0] >> 2) & 0x03, 2, 2);
  1377. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1378. src[0] & 0x03, 2, 2);
  1379. src++;
  1380. }
  1381. }
  1382. break;
  1383. case CODEC_ID_ADPCM_SWF:
  1384. {
  1385. GetBitContext gb;
  1386. const int *table;
  1387. int k0, signmask, nb_bits, count;
  1388. int size = buf_size*8;
  1389. init_get_bits(&gb, buf, size);
  1390. //read bits & initial values
  1391. nb_bits = get_bits(&gb, 2)+2;
  1392. //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits);
  1393. table = swf_index_tables[nb_bits-2];
  1394. k0 = 1 << (nb_bits-2);
  1395. signmask = 1 << (nb_bits-1);
  1396. while (get_bits_count(&gb) <= size - 22*avctx->channels) {
  1397. for (i = 0; i < avctx->channels; i++) {
  1398. *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
  1399. c->status[i].step_index = get_bits(&gb, 6);
  1400. }
  1401. for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
  1402. int i;
  1403. for (i = 0; i < avctx->channels; i++) {
  1404. // similar to IMA adpcm
  1405. int delta = get_bits(&gb, nb_bits);
  1406. int step = step_table[c->status[i].step_index];
  1407. long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
  1408. int k = k0;
  1409. do {
  1410. if (delta & k)
  1411. vpdiff += step;
  1412. step >>= 1;
  1413. k >>= 1;
  1414. } while(k);
  1415. vpdiff += step;
  1416. if (delta & signmask)
  1417. c->status[i].predictor -= vpdiff;
  1418. else
  1419. c->status[i].predictor += vpdiff;
  1420. c->status[i].step_index += table[delta & (~signmask)];
  1421. c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
  1422. c->status[i].predictor = av_clip_int16(c->status[i].predictor);
  1423. *samples++ = c->status[i].predictor;
  1424. if (samples >= samples_end) {
  1425. av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
  1426. return -1;
  1427. }
  1428. }
  1429. }
  1430. }
  1431. src += buf_size;
  1432. break;
  1433. }
  1434. case CODEC_ID_ADPCM_YAMAHA:
  1435. while (src < buf + buf_size) {
  1436. if (st) {
  1437. *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
  1438. src[0] & 0x0F);
  1439. *samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
  1440. src[0] >> 4 );
  1441. } else {
  1442. *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
  1443. src[0] & 0x0F);
  1444. *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
  1445. src[0] >> 4 );
  1446. }
  1447. src++;
  1448. }
  1449. break;
  1450. case CODEC_ID_ADPCM_THP:
  1451. {
  1452. int table[2][16];
  1453. unsigned int samplecnt;
  1454. int prev[2][2];
  1455. int ch;
  1456. if (buf_size < 80) {
  1457. av_log(avctx, AV_LOG_ERROR, "frame too small\n");
  1458. return -1;
  1459. }
  1460. src+=4;
  1461. samplecnt = bytestream_get_be32(&src);
  1462. for (i = 0; i < 32; i++)
  1463. table[0][i] = (int16_t)bytestream_get_be16(&src);
  1464. /* Initialize the previous sample. */
  1465. for (i = 0; i < 4; i++)
  1466. prev[0][i] = (int16_t)bytestream_get_be16(&src);
  1467. if (samplecnt >= (samples_end - samples) / (st + 1)) {
  1468. av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
  1469. return -1;
  1470. }
  1471. for (ch = 0; ch <= st; ch++) {
  1472. samples = (unsigned short *) data + ch;
  1473. /* Read in every sample for this channel. */
  1474. for (i = 0; i < samplecnt / 14; i++) {
  1475. int index = (*src >> 4) & 7;
  1476. unsigned int exp = 28 - (*src++ & 15);
  1477. int factor1 = table[ch][index * 2];
  1478. int factor2 = table[ch][index * 2 + 1];
  1479. /* Decode 14 samples. */
  1480. for (n = 0; n < 14; n++) {
  1481. int32_t sampledat;
  1482. if(n&1) sampledat= *src++ <<28;
  1483. else sampledat= (*src&0xF0)<<24;
  1484. sampledat = ((prev[ch][0]*factor1
  1485. + prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
  1486. *samples = av_clip_int16(sampledat);
  1487. prev[ch][1] = prev[ch][0];
  1488. prev[ch][0] = *samples++;
  1489. /* In case of stereo, skip one sample, this sample
  1490. is for the other channel. */
  1491. samples += st;
  1492. }
  1493. }
  1494. }
  1495. /* In the previous loop, in case stereo is used, samples is
  1496. increased exactly one time too often. */
  1497. samples -= st;
  1498. break;
  1499. }
  1500. default:
  1501. return -1;
  1502. }
  1503. *data_size = (uint8_t *)samples - (uint8_t *)data;
  1504. return src - buf;
  1505. }
  1506. #if CONFIG_ENCODERS
  1507. #define ADPCM_ENCODER(id,name,long_name_) \
  1508. AVCodec name ## _encoder = { \
  1509. #name, \
  1510. AVMEDIA_TYPE_AUDIO, \
  1511. id, \
  1512. sizeof(ADPCMContext), \
  1513. adpcm_encode_init, \
  1514. adpcm_encode_frame, \
  1515. adpcm_encode_close, \
  1516. NULL, \
  1517. .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \
  1518. .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
  1519. };
  1520. #else
  1521. #define ADPCM_ENCODER(id,name,long_name_)
  1522. #endif
  1523. #if CONFIG_DECODERS
  1524. #define ADPCM_DECODER(id,name,long_name_) \
  1525. AVCodec name ## _decoder = { \
  1526. #name, \
  1527. AVMEDIA_TYPE_AUDIO, \
  1528. id, \
  1529. sizeof(ADPCMContext), \
  1530. adpcm_decode_init, \
  1531. NULL, \
  1532. NULL, \
  1533. adpcm_decode_frame, \
  1534. .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
  1535. };
  1536. #else
  1537. #define ADPCM_DECODER(id,name,long_name_)
  1538. #endif
  1539. #define ADPCM_CODEC(id,name,long_name_) \
  1540. ADPCM_ENCODER(id,name,long_name_) ADPCM_DECODER(id,name,long_name_)
  1541. /* Note: Do not forget to add new entries to the Makefile as well. */
  1542. ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
  1543. ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
  1544. ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
  1545. ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
  1546. ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1");
  1547. ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
  1548. ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
  1549. ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
  1550. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
  1551. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
  1552. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
  1553. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
  1554. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
  1555. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
  1556. ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
  1557. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
  1558. ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
  1559. ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
  1560. ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
  1561. ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
  1562. ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
  1563. ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
  1564. ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
  1565. ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP");
  1566. ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
  1567. ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");