You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2160 lines
83KB

  1. /*
  2. * Copyright (c) 2001-2003 The FFmpeg project
  3. *
  4. * first version by Francois Revol (revol@free.fr)
  5. * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
  6. * by Mike Melanson (melanson@pcisys.net)
  7. * CD-ROM XA ADPCM codec by BERO
  8. * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
  9. * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
  10. * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
  11. * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
  12. * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
  13. * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
  14. * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
  15. * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
  16. * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
  17. * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
  18. * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
  19. * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
  20. *
  21. * This file is part of FFmpeg.
  22. *
  23. * FFmpeg is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU Lesser General Public
  25. * License as published by the Free Software Foundation; either
  26. * version 2.1 of the License, or (at your option) any later version.
  27. *
  28. * FFmpeg is distributed in the hope that it will be useful,
  29. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  31. * Lesser General Public License for more details.
  32. *
  33. * You should have received a copy of the GNU Lesser General Public
  34. * License along with FFmpeg; if not, write to the Free Software
  35. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  36. */
  37. #include "avcodec.h"
  38. #include "get_bits.h"
  39. #include "bytestream.h"
  40. #include "adpcm.h"
  41. #include "adpcm_data.h"
  42. #include "internal.h"
  43. /**
  44. * @file
  45. * ADPCM decoders
  46. * Features and limitations:
  47. *
  48. * Reference documents:
  49. * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
  50. * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
  51. * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
  52. * http://openquicktime.sourceforge.net/
  53. * XAnim sources (xa_codec.c) http://xanim.polter.net/
  54. * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
  55. * SoX source code http://sox.sourceforge.net/
  56. *
  57. * CD-ROM XA:
  58. * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
  59. * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
  60. * readstr http://www.geocities.co.jp/Playtown/2004/
  61. */
  62. /* These are for CD-ROM XA ADPCM */
  63. static const int8_t xa_adpcm_table[5][2] = {
  64. { 0, 0 },
  65. { 60, 0 },
  66. { 115, -52 },
  67. { 98, -55 },
  68. { 122, -60 }
  69. };
  70. static const int16_t ea_adpcm_table[] = {
  71. 0, 240, 460, 392,
  72. 0, 0, -208, -220,
  73. 0, 1, 3, 4,
  74. 7, 8, 10, 11,
  75. 0, -1, -3, -4
  76. };
  77. // padded to zero where table size is less then 16
  78. static const int8_t swf_index_tables[4][16] = {
  79. /*2*/ { -1, 2 },
  80. /*3*/ { -1, -1, 2, 4 },
  81. /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
  82. /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
  83. };
  84. static const int8_t zork_index_table[8] = {
  85. -1, -1, -1, 1, 4, 7, 10, 12,
  86. };
  87. static const int8_t mtf_index_table[16] = {
  88. 8, 6, 4, 2, -1, -1, -1, -1,
  89. -1, -1, -1, -1, 2, 4, 6, 8,
  90. };
  91. /* end of tables */
  92. typedef struct ADPCMDecodeContext {
  93. ADPCMChannelStatus status[14];
  94. int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
  95. int has_status;
  96. } ADPCMDecodeContext;
  97. static av_cold int adpcm_decode_init(AVCodecContext * avctx)
  98. {
  99. ADPCMDecodeContext *c = avctx->priv_data;
  100. unsigned int min_channels = 1;
  101. unsigned int max_channels = 2;
  102. switch(avctx->codec->id) {
  103. case AV_CODEC_ID_ADPCM_IMA_CUNNING:
  104. max_channels = 1;
  105. break;
  106. case AV_CODEC_ID_ADPCM_DTK:
  107. case AV_CODEC_ID_ADPCM_EA:
  108. min_channels = 2;
  109. break;
  110. case AV_CODEC_ID_ADPCM_AFC:
  111. case AV_CODEC_ID_ADPCM_EA_R1:
  112. case AV_CODEC_ID_ADPCM_EA_R2:
  113. case AV_CODEC_ID_ADPCM_EA_R3:
  114. case AV_CODEC_ID_ADPCM_EA_XAS:
  115. case AV_CODEC_ID_ADPCM_MS:
  116. max_channels = 6;
  117. break;
  118. case AV_CODEC_ID_ADPCM_MTAF:
  119. min_channels = 2;
  120. max_channels = 8;
  121. if (avctx->channels & 1) {
  122. avpriv_request_sample(avctx, "channel count %d\n", avctx->channels);
  123. return AVERROR_PATCHWELCOME;
  124. }
  125. break;
  126. case AV_CODEC_ID_ADPCM_PSX:
  127. max_channels = 8;
  128. break;
  129. case AV_CODEC_ID_ADPCM_IMA_DAT4:
  130. case AV_CODEC_ID_ADPCM_THP:
  131. case AV_CODEC_ID_ADPCM_THP_LE:
  132. max_channels = 14;
  133. break;
  134. }
  135. if (avctx->channels < min_channels || avctx->channels > max_channels) {
  136. av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
  137. return AVERROR(EINVAL);
  138. }
  139. switch(avctx->codec->id) {
  140. case AV_CODEC_ID_ADPCM_CT:
  141. c->status[0].step = c->status[1].step = 511;
  142. break;
  143. case AV_CODEC_ID_ADPCM_IMA_WAV:
  144. if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
  145. return AVERROR_INVALIDDATA;
  146. break;
  147. case AV_CODEC_ID_ADPCM_IMA_APC:
  148. if (avctx->extradata && avctx->extradata_size >= 8) {
  149. c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
  150. c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
  151. }
  152. break;
  153. case AV_CODEC_ID_ADPCM_IMA_APM:
  154. if (avctx->extradata) {
  155. if (avctx->extradata_size >= 28) {
  156. c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
  157. c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
  158. c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
  159. c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
  160. } else if (avctx->extradata_size >= 16) {
  161. c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
  162. c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
  163. c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
  164. c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
  165. }
  166. }
  167. break;
  168. case AV_CODEC_ID_ADPCM_IMA_WS:
  169. if (avctx->extradata && avctx->extradata_size >= 2)
  170. c->vqa_version = AV_RL16(avctx->extradata);
  171. break;
  172. case AV_CODEC_ID_ADPCM_ARGO:
  173. if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
  174. return AVERROR_INVALIDDATA;
  175. break;
  176. case AV_CODEC_ID_ADPCM_ZORK:
  177. if (avctx->bits_per_coded_sample != 8)
  178. return AVERROR_INVALIDDATA;
  179. break;
  180. default:
  181. break;
  182. }
  183. switch (avctx->codec->id) {
  184. case AV_CODEC_ID_ADPCM_AICA:
  185. case AV_CODEC_ID_ADPCM_IMA_DAT4:
  186. case AV_CODEC_ID_ADPCM_IMA_QT:
  187. case AV_CODEC_ID_ADPCM_IMA_WAV:
  188. case AV_CODEC_ID_ADPCM_4XM:
  189. case AV_CODEC_ID_ADPCM_XA:
  190. case AV_CODEC_ID_ADPCM_EA_R1:
  191. case AV_CODEC_ID_ADPCM_EA_R2:
  192. case AV_CODEC_ID_ADPCM_EA_R3:
  193. case AV_CODEC_ID_ADPCM_EA_XAS:
  194. case AV_CODEC_ID_ADPCM_THP:
  195. case AV_CODEC_ID_ADPCM_THP_LE:
  196. case AV_CODEC_ID_ADPCM_AFC:
  197. case AV_CODEC_ID_ADPCM_DTK:
  198. case AV_CODEC_ID_ADPCM_PSX:
  199. case AV_CODEC_ID_ADPCM_MTAF:
  200. case AV_CODEC_ID_ADPCM_ARGO:
  201. case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
  202. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  203. break;
  204. case AV_CODEC_ID_ADPCM_IMA_WS:
  205. avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
  206. AV_SAMPLE_FMT_S16;
  207. break;
  208. case AV_CODEC_ID_ADPCM_MS:
  209. avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
  210. AV_SAMPLE_FMT_S16;
  211. break;
  212. default:
  213. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  214. }
  215. return 0;
  216. }
  217. static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
  218. {
  219. int delta, pred, step, add;
  220. pred = c->predictor;
  221. delta = nibble & 7;
  222. step = c->step;
  223. add = (delta * 2 + 1) * step;
  224. if (add < 0)
  225. add = add + 7;
  226. if ((nibble & 8) == 0)
  227. pred = av_clip(pred + (add >> 3), -32767, 32767);
  228. else
  229. pred = av_clip(pred - (add >> 3), -32767, 32767);
  230. switch (delta) {
  231. case 7:
  232. step *= 0x99;
  233. break;
  234. case 6:
  235. c->step = av_clip(c->step * 2, 127, 24576);
  236. c->predictor = pred;
  237. return pred;
  238. case 5:
  239. step *= 0x66;
  240. break;
  241. case 4:
  242. step *= 0x4d;
  243. break;
  244. default:
  245. step *= 0x39;
  246. break;
  247. }
  248. if (step < 0)
  249. step += 0x3f;
  250. c->step = step >> 6;
  251. c->step = av_clip(c->step, 127, 24576);
  252. c->predictor = pred;
  253. return pred;
  254. }
  255. static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
  256. {
  257. int step_index;
  258. int predictor;
  259. int sign, delta, diff, step;
  260. step = ff_adpcm_step_table[c->step_index];
  261. step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
  262. step_index = av_clip(step_index, 0, 88);
  263. sign = nibble & 8;
  264. delta = nibble & 7;
  265. /* perform direct multiplication instead of series of jumps proposed by
  266. * the reference ADPCM implementation since modern CPUs can do the mults
  267. * quickly enough */
  268. diff = ((2 * delta + 1) * step) >> shift;
  269. predictor = c->predictor;
  270. if (sign) predictor -= diff;
  271. else predictor += diff;
  272. c->predictor = av_clip_int16(predictor);
  273. c->step_index = step_index;
  274. return (int16_t)c->predictor;
  275. }
  276. static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
  277. {
  278. int step_index;
  279. int predictor;
  280. int sign, delta, diff, step;
  281. step = ff_adpcm_step_table[c->step_index];
  282. step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
  283. step_index = av_clip(step_index, 0, 88);
  284. sign = nibble & 8;
  285. delta = nibble & 7;
  286. diff = (delta * step) >> shift;
  287. predictor = c->predictor;
  288. if (sign) predictor -= diff;
  289. else predictor += diff;
  290. c->predictor = av_clip_int16(predictor);
  291. c->step_index = step_index;
  292. return (int16_t)c->predictor;
  293. }
  294. static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
  295. {
  296. int step_index, step, delta, predictor;
  297. step = ff_adpcm_step_table[c->step_index];
  298. delta = step * (2 * nibble - 15);
  299. predictor = c->predictor + delta;
  300. step_index = c->step_index + mtf_index_table[(unsigned)nibble];
  301. c->predictor = av_clip_int16(predictor >> 4);
  302. c->step_index = av_clip(step_index, 0, 88);
  303. return (int16_t)c->predictor;
  304. }
  305. static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
  306. {
  307. int step_index;
  308. int predictor;
  309. int step;
  310. nibble = sign_extend(nibble & 0xF, 4);
  311. step = ff_adpcm_ima_cunning_step_table[c->step_index];
  312. step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
  313. step_index = av_clip(step_index, 0, 60);
  314. predictor = c->predictor + step * nibble;
  315. c->predictor = av_clip_int16(predictor);
  316. c->step_index = step_index;
  317. return c->predictor;
  318. }
  319. static inline int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
  320. {
  321. int nibble, step_index, predictor, sign, delta, diff, step, shift;
  322. shift = bps - 1;
  323. nibble = get_bits_le(gb, bps),
  324. step = ff_adpcm_step_table[c->step_index];
  325. step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
  326. step_index = av_clip(step_index, 0, 88);
  327. sign = nibble & (1 << shift);
  328. delta = av_mod_uintp2(nibble, shift);
  329. diff = ((2 * delta + 1) * step) >> shift;
  330. predictor = c->predictor;
  331. if (sign) predictor -= diff;
  332. else predictor += diff;
  333. c->predictor = av_clip_int16(predictor);
  334. c->step_index = step_index;
  335. return (int16_t)c->predictor;
  336. }
  337. static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
  338. {
  339. int step_index;
  340. int predictor;
  341. int diff, step;
  342. step = ff_adpcm_step_table[c->step_index];
  343. step_index = c->step_index + ff_adpcm_index_table[nibble];
  344. step_index = av_clip(step_index, 0, 88);
  345. diff = step >> 3;
  346. if (nibble & 4) diff += step;
  347. if (nibble & 2) diff += step >> 1;
  348. if (nibble & 1) diff += step >> 2;
  349. if (nibble & 8)
  350. predictor = c->predictor - diff;
  351. else
  352. predictor = c->predictor + diff;
  353. c->predictor = av_clip_int16(predictor);
  354. c->step_index = step_index;
  355. return c->predictor;
  356. }
  357. static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
  358. {
  359. int predictor;
  360. predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
  361. predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
  362. c->sample2 = c->sample1;
  363. c->sample1 = av_clip_int16(predictor);
  364. c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
  365. if (c->idelta < 16) c->idelta = 16;
  366. if (c->idelta > INT_MAX/768) {
  367. av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
  368. c->idelta = INT_MAX/768;
  369. }
  370. return c->sample1;
  371. }
  372. static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
  373. {
  374. int step_index, predictor, sign, delta, diff, step;
  375. step = ff_adpcm_oki_step_table[c->step_index];
  376. step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
  377. step_index = av_clip(step_index, 0, 48);
  378. sign = nibble & 8;
  379. delta = nibble & 7;
  380. diff = ((2 * delta + 1) * step) >> 3;
  381. predictor = c->predictor;
  382. if (sign) predictor -= diff;
  383. else predictor += diff;
  384. c->predictor = av_clip_intp2(predictor, 11);
  385. c->step_index = step_index;
  386. return c->predictor * 16;
  387. }
  388. static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
  389. {
  390. int sign, delta, diff;
  391. int new_step;
  392. sign = nibble & 8;
  393. delta = nibble & 7;
  394. /* perform direct multiplication instead of series of jumps proposed by
  395. * the reference ADPCM implementation since modern CPUs can do the mults
  396. * quickly enough */
  397. diff = ((2 * delta + 1) * c->step) >> 3;
  398. /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
  399. c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
  400. c->predictor = av_clip_int16(c->predictor);
  401. /* calculate new step and clamp it to range 511..32767 */
  402. new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
  403. c->step = av_clip(new_step, 511, 32767);
  404. return (int16_t)c->predictor;
  405. }
  406. static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
  407. {
  408. int sign, delta, diff;
  409. sign = nibble & (1<<(size-1));
  410. delta = nibble & ((1<<(size-1))-1);
  411. diff = delta << (7 + c->step + shift);
  412. /* clamp result */
  413. c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
  414. /* calculate new step */
  415. if (delta >= (2*size - 3) && c->step < 3)
  416. c->step++;
  417. else if (delta == 0 && c->step > 0)
  418. c->step--;
  419. return (int16_t) c->predictor;
  420. }
  421. static inline int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
  422. {
  423. if(!c->step) {
  424. c->predictor = 0;
  425. c->step = 127;
  426. }
  427. c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
  428. c->predictor = av_clip_int16(c->predictor);
  429. c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
  430. c->step = av_clip(c->step, 127, 24576);
  431. return c->predictor;
  432. }
  433. static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
  434. {
  435. c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
  436. c->predictor = av_clip_int16(c->predictor);
  437. c->step += ff_adpcm_index_table[nibble];
  438. c->step = av_clip_uintp2(c->step, 5);
  439. return c->predictor;
  440. }
  441. static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
  442. {
  443. int16_t index = c->step_index;
  444. uint32_t lookup_sample = ff_adpcm_step_table[index];
  445. int32_t sample = 0;
  446. if (nibble & 0x40)
  447. sample += lookup_sample;
  448. if (nibble & 0x20)
  449. sample += lookup_sample >> 1;
  450. if (nibble & 0x10)
  451. sample += lookup_sample >> 2;
  452. if (nibble & 0x08)
  453. sample += lookup_sample >> 3;
  454. if (nibble & 0x04)
  455. sample += lookup_sample >> 4;
  456. if (nibble & 0x02)
  457. sample += lookup_sample >> 5;
  458. if (nibble & 0x01)
  459. sample += lookup_sample >> 6;
  460. if (nibble & 0x80)
  461. sample = -sample;
  462. sample += c->predictor;
  463. sample = av_clip_int16(sample);
  464. index += zork_index_table[(nibble >> 4) & 7];
  465. index = av_clip(index, 0, 88);
  466. c->predictor = sample;
  467. c->step_index = index;
  468. return sample;
  469. }
  470. static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
  471. const uint8_t *in, ADPCMChannelStatus *left,
  472. ADPCMChannelStatus *right, int channels, int sample_offset)
  473. {
  474. int i, j;
  475. int shift,filter,f0,f1;
  476. int s_1,s_2;
  477. int d,s,t;
  478. out0 += sample_offset;
  479. if (channels == 1)
  480. out1 = out0 + 28;
  481. else
  482. out1 += sample_offset;
  483. for(i=0;i<4;i++) {
  484. shift = 12 - (in[4+i*2] & 15);
  485. filter = in[4+i*2] >> 4;
  486. if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
  487. avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
  488. filter=0;
  489. }
  490. if (shift < 0) {
  491. avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
  492. shift = 0;
  493. }
  494. f0 = xa_adpcm_table[filter][0];
  495. f1 = xa_adpcm_table[filter][1];
  496. s_1 = left->sample1;
  497. s_2 = left->sample2;
  498. for(j=0;j<28;j++) {
  499. d = in[16+i+j*4];
  500. t = sign_extend(d, 4);
  501. s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
  502. s_2 = s_1;
  503. s_1 = av_clip_int16(s);
  504. out0[j] = s_1;
  505. }
  506. if (channels == 2) {
  507. left->sample1 = s_1;
  508. left->sample2 = s_2;
  509. s_1 = right->sample1;
  510. s_2 = right->sample2;
  511. }
  512. shift = 12 - (in[5+i*2] & 15);
  513. filter = in[5+i*2] >> 4;
  514. if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
  515. avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
  516. filter=0;
  517. }
  518. if (shift < 0) {
  519. avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
  520. shift = 0;
  521. }
  522. f0 = xa_adpcm_table[filter][0];
  523. f1 = xa_adpcm_table[filter][1];
  524. for(j=0;j<28;j++) {
  525. d = in[16+i+j*4];
  526. t = sign_extend(d >> 4, 4);
  527. s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
  528. s_2 = s_1;
  529. s_1 = av_clip_int16(s);
  530. out1[j] = s_1;
  531. }
  532. if (channels == 2) {
  533. right->sample1 = s_1;
  534. right->sample2 = s_2;
  535. } else {
  536. left->sample1 = s_1;
  537. left->sample2 = s_2;
  538. }
  539. out0 += 28 * (3 - channels);
  540. out1 += 28 * (3 - channels);
  541. }
  542. return 0;
  543. }
  544. static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
  545. {
  546. ADPCMDecodeContext *c = avctx->priv_data;
  547. GetBitContext gb;
  548. const int8_t *table;
  549. int k0, signmask, nb_bits, count;
  550. int size = buf_size*8;
  551. int i;
  552. init_get_bits(&gb, buf, size);
  553. //read bits & initial values
  554. nb_bits = get_bits(&gb, 2)+2;
  555. table = swf_index_tables[nb_bits-2];
  556. k0 = 1 << (nb_bits-2);
  557. signmask = 1 << (nb_bits-1);
  558. while (get_bits_count(&gb) <= size - 22*avctx->channels) {
  559. for (i = 0; i < avctx->channels; i++) {
  560. *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
  561. c->status[i].step_index = get_bits(&gb, 6);
  562. }
  563. for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
  564. int i;
  565. for (i = 0; i < avctx->channels; i++) {
  566. // similar to IMA adpcm
  567. int delta = get_bits(&gb, nb_bits);
  568. int step = ff_adpcm_step_table[c->status[i].step_index];
  569. int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
  570. int k = k0;
  571. do {
  572. if (delta & k)
  573. vpdiff += step;
  574. step >>= 1;
  575. k >>= 1;
  576. } while(k);
  577. vpdiff += step;
  578. if (delta & signmask)
  579. c->status[i].predictor -= vpdiff;
  580. else
  581. c->status[i].predictor += vpdiff;
  582. c->status[i].step_index += table[delta & (~signmask)];
  583. c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
  584. c->status[i].predictor = av_clip_int16(c->status[i].predictor);
  585. *samples++ = c->status[i].predictor;
  586. }
  587. }
  588. }
  589. }
  590. int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
  591. {
  592. int sample = sign_extend(nibble, 4) * (1 << shift);
  593. if (flag)
  594. sample += (8 * cs->sample1) - (4 * cs->sample2);
  595. else
  596. sample += 4 * cs->sample1;
  597. sample = av_clip_int16(sample >> 2);
  598. cs->sample2 = cs->sample1;
  599. cs->sample1 = sample;
  600. return sample;
  601. }
  602. /**
  603. * Get the number of samples (per channel) that will be decoded from the packet.
  604. * In one case, this is actually the maximum number of samples possible to
  605. * decode with the given buf_size.
  606. *
  607. * @param[out] coded_samples set to the number of samples as coded in the
  608. * packet, or 0 if the codec does not encode the
  609. * number of samples in each frame.
  610. * @param[out] approx_nb_samples set to non-zero if the number of samples
  611. * returned is an approximation.
  612. */
  613. static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
  614. int buf_size, int *coded_samples, int *approx_nb_samples)
  615. {
  616. ADPCMDecodeContext *s = avctx->priv_data;
  617. int nb_samples = 0;
  618. int ch = avctx->channels;
  619. int has_coded_samples = 0;
  620. int header_size;
  621. *coded_samples = 0;
  622. *approx_nb_samples = 0;
  623. if(ch <= 0)
  624. return 0;
  625. switch (avctx->codec->id) {
  626. /* constant, only check buf_size */
  627. case AV_CODEC_ID_ADPCM_EA_XAS:
  628. if (buf_size < 76 * ch)
  629. return 0;
  630. nb_samples = 128;
  631. break;
  632. case AV_CODEC_ID_ADPCM_IMA_QT:
  633. if (buf_size < 34 * ch)
  634. return 0;
  635. nb_samples = 64;
  636. break;
  637. /* simple 4-bit adpcm */
  638. case AV_CODEC_ID_ADPCM_CT:
  639. case AV_CODEC_ID_ADPCM_IMA_APC:
  640. case AV_CODEC_ID_ADPCM_IMA_CUNNING:
  641. case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
  642. case AV_CODEC_ID_ADPCM_IMA_OKI:
  643. case AV_CODEC_ID_ADPCM_IMA_WS:
  644. case AV_CODEC_ID_ADPCM_YAMAHA:
  645. case AV_CODEC_ID_ADPCM_AICA:
  646. case AV_CODEC_ID_ADPCM_IMA_SSI:
  647. case AV_CODEC_ID_ADPCM_IMA_APM:
  648. case AV_CODEC_ID_ADPCM_IMA_ALP:
  649. case AV_CODEC_ID_ADPCM_IMA_MTF:
  650. nb_samples = buf_size * 2 / ch;
  651. break;
  652. }
  653. if (nb_samples)
  654. return nb_samples;
  655. /* simple 4-bit adpcm, with header */
  656. header_size = 0;
  657. switch (avctx->codec->id) {
  658. case AV_CODEC_ID_ADPCM_4XM:
  659. case AV_CODEC_ID_ADPCM_AGM:
  660. case AV_CODEC_ID_ADPCM_IMA_DAT4:
  661. case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
  662. case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
  663. case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
  664. case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
  665. }
  666. if (header_size > 0)
  667. return (buf_size - header_size) * 2 / ch;
  668. /* more complex formats */
  669. switch (avctx->codec->id) {
  670. case AV_CODEC_ID_ADPCM_EA:
  671. has_coded_samples = 1;
  672. *coded_samples = bytestream2_get_le32(gb);
  673. *coded_samples -= *coded_samples % 28;
  674. nb_samples = (buf_size - 12) / 30 * 28;
  675. break;
  676. case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
  677. has_coded_samples = 1;
  678. *coded_samples = bytestream2_get_le32(gb);
  679. nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
  680. break;
  681. case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
  682. nb_samples = (buf_size - ch) / ch * 2;
  683. break;
  684. case AV_CODEC_ID_ADPCM_EA_R1:
  685. case AV_CODEC_ID_ADPCM_EA_R2:
  686. case AV_CODEC_ID_ADPCM_EA_R3:
  687. /* maximum number of samples */
  688. /* has internal offsets and a per-frame switch to signal raw 16-bit */
  689. has_coded_samples = 1;
  690. switch (avctx->codec->id) {
  691. case AV_CODEC_ID_ADPCM_EA_R1:
  692. header_size = 4 + 9 * ch;
  693. *coded_samples = bytestream2_get_le32(gb);
  694. break;
  695. case AV_CODEC_ID_ADPCM_EA_R2:
  696. header_size = 4 + 5 * ch;
  697. *coded_samples = bytestream2_get_le32(gb);
  698. break;
  699. case AV_CODEC_ID_ADPCM_EA_R3:
  700. header_size = 4 + 5 * ch;
  701. *coded_samples = bytestream2_get_be32(gb);
  702. break;
  703. }
  704. *coded_samples -= *coded_samples % 28;
  705. nb_samples = (buf_size - header_size) * 2 / ch;
  706. nb_samples -= nb_samples % 28;
  707. *approx_nb_samples = 1;
  708. break;
  709. case AV_CODEC_ID_ADPCM_IMA_DK3:
  710. if (avctx->block_align > 0)
  711. buf_size = FFMIN(buf_size, avctx->block_align);
  712. nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
  713. break;
  714. case AV_CODEC_ID_ADPCM_IMA_DK4:
  715. if (avctx->block_align > 0)
  716. buf_size = FFMIN(buf_size, avctx->block_align);
  717. if (buf_size < 4 * ch)
  718. return AVERROR_INVALIDDATA;
  719. nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
  720. break;
  721. case AV_CODEC_ID_ADPCM_IMA_RAD:
  722. if (avctx->block_align > 0)
  723. buf_size = FFMIN(buf_size, avctx->block_align);
  724. nb_samples = (buf_size - 4 * ch) * 2 / ch;
  725. break;
  726. case AV_CODEC_ID_ADPCM_IMA_WAV:
  727. {
  728. int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
  729. int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
  730. if (avctx->block_align > 0)
  731. buf_size = FFMIN(buf_size, avctx->block_align);
  732. if (buf_size < 4 * ch)
  733. return AVERROR_INVALIDDATA;
  734. nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
  735. break;
  736. }
  737. case AV_CODEC_ID_ADPCM_MS:
  738. if (avctx->block_align > 0)
  739. buf_size = FFMIN(buf_size, avctx->block_align);
  740. nb_samples = (buf_size - 6 * ch) * 2 / ch;
  741. break;
  742. case AV_CODEC_ID_ADPCM_MTAF:
  743. if (avctx->block_align > 0)
  744. buf_size = FFMIN(buf_size, avctx->block_align);
  745. nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
  746. break;
  747. case AV_CODEC_ID_ADPCM_SBPRO_2:
  748. case AV_CODEC_ID_ADPCM_SBPRO_3:
  749. case AV_CODEC_ID_ADPCM_SBPRO_4:
  750. {
  751. int samples_per_byte;
  752. switch (avctx->codec->id) {
  753. case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
  754. case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
  755. case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
  756. }
  757. if (!s->status[0].step_index) {
  758. if (buf_size < ch)
  759. return AVERROR_INVALIDDATA;
  760. nb_samples++;
  761. buf_size -= ch;
  762. }
  763. nb_samples += buf_size * samples_per_byte / ch;
  764. break;
  765. }
  766. case AV_CODEC_ID_ADPCM_SWF:
  767. {
  768. int buf_bits = buf_size * 8 - 2;
  769. int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
  770. int block_hdr_size = 22 * ch;
  771. int block_size = block_hdr_size + nbits * ch * 4095;
  772. int nblocks = buf_bits / block_size;
  773. int bits_left = buf_bits - nblocks * block_size;
  774. nb_samples = nblocks * 4096;
  775. if (bits_left >= block_hdr_size)
  776. nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
  777. break;
  778. }
  779. case AV_CODEC_ID_ADPCM_THP:
  780. case AV_CODEC_ID_ADPCM_THP_LE:
  781. if (avctx->extradata) {
  782. nb_samples = buf_size * 14 / (8 * ch);
  783. break;
  784. }
  785. has_coded_samples = 1;
  786. bytestream2_skip(gb, 4); // channel size
  787. *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
  788. bytestream2_get_le32(gb) :
  789. bytestream2_get_be32(gb);
  790. buf_size -= 8 + 36 * ch;
  791. buf_size /= ch;
  792. nb_samples = buf_size / 8 * 14;
  793. if (buf_size % 8 > 1)
  794. nb_samples += (buf_size % 8 - 1) * 2;
  795. *approx_nb_samples = 1;
  796. break;
  797. case AV_CODEC_ID_ADPCM_AFC:
  798. nb_samples = buf_size / (9 * ch) * 16;
  799. break;
  800. case AV_CODEC_ID_ADPCM_XA:
  801. nb_samples = (buf_size / 128) * 224 / ch;
  802. break;
  803. case AV_CODEC_ID_ADPCM_DTK:
  804. case AV_CODEC_ID_ADPCM_PSX:
  805. nb_samples = buf_size / (16 * ch) * 28;
  806. break;
  807. case AV_CODEC_ID_ADPCM_ARGO:
  808. nb_samples = buf_size / avctx->block_align * 32;
  809. break;
  810. case AV_CODEC_ID_ADPCM_ZORK:
  811. nb_samples = buf_size / ch;
  812. break;
  813. }
  814. /* validate coded sample count */
  815. if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
  816. return AVERROR_INVALIDDATA;
  817. return nb_samples;
  818. }
  819. static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
  820. int *got_frame_ptr, AVPacket *avpkt)
  821. {
  822. AVFrame *frame = data;
  823. const uint8_t *buf = avpkt->data;
  824. int buf_size = avpkt->size;
  825. ADPCMDecodeContext *c = avctx->priv_data;
  826. ADPCMChannelStatus *cs;
  827. int n, m, channel, i;
  828. int16_t *samples;
  829. int16_t **samples_p;
  830. int st; /* stereo */
  831. int count1, count2;
  832. int nb_samples, coded_samples, approx_nb_samples, ret;
  833. GetByteContext gb;
  834. bytestream2_init(&gb, buf, buf_size);
  835. nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
  836. if (nb_samples <= 0) {
  837. av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
  838. return AVERROR_INVALIDDATA;
  839. }
  840. /* get output buffer */
  841. frame->nb_samples = nb_samples;
  842. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  843. return ret;
  844. samples = (int16_t *)frame->data[0];
  845. samples_p = (int16_t **)frame->extended_data;
  846. /* use coded_samples when applicable */
  847. /* it is always <= nb_samples, so the output buffer will be large enough */
  848. if (coded_samples) {
  849. if (!approx_nb_samples && coded_samples != nb_samples)
  850. av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
  851. frame->nb_samples = nb_samples = coded_samples;
  852. }
  853. st = avctx->channels == 2 ? 1 : 0;
  854. switch(avctx->codec->id) {
  855. case AV_CODEC_ID_ADPCM_IMA_QT:
  856. /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
  857. Channel data is interleaved per-chunk. */
  858. for (channel = 0; channel < avctx->channels; channel++) {
  859. int predictor;
  860. int step_index;
  861. cs = &(c->status[channel]);
  862. /* (pppppp) (piiiiiii) */
  863. /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
  864. predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
  865. step_index = predictor & 0x7F;
  866. predictor &= ~0x7F;
  867. if (cs->step_index == step_index) {
  868. int diff = predictor - cs->predictor;
  869. if (diff < 0)
  870. diff = - diff;
  871. if (diff > 0x7f)
  872. goto update;
  873. } else {
  874. update:
  875. cs->step_index = step_index;
  876. cs->predictor = predictor;
  877. }
  878. if (cs->step_index > 88u){
  879. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  880. channel, cs->step_index);
  881. return AVERROR_INVALIDDATA;
  882. }
  883. samples = samples_p[channel];
  884. for (m = 0; m < 64; m += 2) {
  885. int byte = bytestream2_get_byteu(&gb);
  886. samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
  887. samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
  888. }
  889. }
  890. break;
  891. case AV_CODEC_ID_ADPCM_IMA_WAV:
  892. for(i=0; i<avctx->channels; i++){
  893. cs = &(c->status[i]);
  894. cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
  895. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  896. if (cs->step_index > 88u){
  897. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  898. i, cs->step_index);
  899. return AVERROR_INVALIDDATA;
  900. }
  901. }
  902. if (avctx->bits_per_coded_sample != 4) {
  903. int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
  904. int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
  905. uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
  906. GetBitContext g;
  907. for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
  908. for (i = 0; i < avctx->channels; i++) {
  909. int j;
  910. cs = &c->status[i];
  911. samples = &samples_p[i][1 + n * samples_per_block];
  912. for (j = 0; j < block_size; j++) {
  913. temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
  914. (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
  915. }
  916. ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
  917. if (ret < 0)
  918. return ret;
  919. for (m = 0; m < samples_per_block; m++) {
  920. samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
  921. avctx->bits_per_coded_sample);
  922. }
  923. }
  924. }
  925. bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
  926. } else {
  927. for (n = 0; n < (nb_samples - 1) / 8; n++) {
  928. for (i = 0; i < avctx->channels; i++) {
  929. cs = &c->status[i];
  930. samples = &samples_p[i][1 + n * 8];
  931. for (m = 0; m < 8; m += 2) {
  932. int v = bytestream2_get_byteu(&gb);
  933. samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
  934. samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
  935. }
  936. }
  937. }
  938. }
  939. break;
  940. case AV_CODEC_ID_ADPCM_4XM:
  941. for (i = 0; i < avctx->channels; i++)
  942. c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  943. for (i = 0; i < avctx->channels; i++) {
  944. c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  945. if (c->status[i].step_index > 88u) {
  946. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  947. i, c->status[i].step_index);
  948. return AVERROR_INVALIDDATA;
  949. }
  950. }
  951. for (i = 0; i < avctx->channels; i++) {
  952. samples = (int16_t *)frame->data[i];
  953. cs = &c->status[i];
  954. for (n = nb_samples >> 1; n > 0; n--) {
  955. int v = bytestream2_get_byteu(&gb);
  956. *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
  957. *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
  958. }
  959. }
  960. break;
  961. case AV_CODEC_ID_ADPCM_AGM:
  962. for (i = 0; i < avctx->channels; i++)
  963. c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  964. for (i = 0; i < avctx->channels; i++)
  965. c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
  966. for (n = 0; n < nb_samples >> (1 - st); n++) {
  967. int v = bytestream2_get_byteu(&gb);
  968. *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
  969. *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
  970. }
  971. break;
  972. case AV_CODEC_ID_ADPCM_MS:
  973. {
  974. int block_predictor;
  975. if (avctx->channels > 2) {
  976. for (channel = 0; channel < avctx->channels; channel++) {
  977. samples = samples_p[channel];
  978. block_predictor = bytestream2_get_byteu(&gb);
  979. if (block_predictor > 6) {
  980. av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
  981. channel, block_predictor);
  982. return AVERROR_INVALIDDATA;
  983. }
  984. c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
  985. c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
  986. c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
  987. c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
  988. c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
  989. *samples++ = c->status[channel].sample2;
  990. *samples++ = c->status[channel].sample1;
  991. for(n = (nb_samples - 2) >> 1; n > 0; n--) {
  992. int byte = bytestream2_get_byteu(&gb);
  993. *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
  994. *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
  995. }
  996. }
  997. } else {
  998. block_predictor = bytestream2_get_byteu(&gb);
  999. if (block_predictor > 6) {
  1000. av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
  1001. block_predictor);
  1002. return AVERROR_INVALIDDATA;
  1003. }
  1004. c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
  1005. c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
  1006. if (st) {
  1007. block_predictor = bytestream2_get_byteu(&gb);
  1008. if (block_predictor > 6) {
  1009. av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
  1010. block_predictor);
  1011. return AVERROR_INVALIDDATA;
  1012. }
  1013. c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
  1014. c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
  1015. }
  1016. c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
  1017. if (st){
  1018. c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
  1019. }
  1020. c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
  1021. if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
  1022. c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
  1023. if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
  1024. *samples++ = c->status[0].sample2;
  1025. if (st) *samples++ = c->status[1].sample2;
  1026. *samples++ = c->status[0].sample1;
  1027. if (st) *samples++ = c->status[1].sample1;
  1028. for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
  1029. int byte = bytestream2_get_byteu(&gb);
  1030. *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
  1031. *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
  1032. }
  1033. }
  1034. break;
  1035. }
  1036. case AV_CODEC_ID_ADPCM_MTAF:
  1037. for (channel = 0; channel < avctx->channels; channel+=2) {
  1038. bytestream2_skipu(&gb, 4);
  1039. c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
  1040. c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
  1041. c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1042. bytestream2_skipu(&gb, 2);
  1043. c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1044. bytestream2_skipu(&gb, 2);
  1045. for (n = 0; n < nb_samples; n+=2) {
  1046. int v = bytestream2_get_byteu(&gb);
  1047. samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
  1048. samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
  1049. }
  1050. for (n = 0; n < nb_samples; n+=2) {
  1051. int v = bytestream2_get_byteu(&gb);
  1052. samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
  1053. samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
  1054. }
  1055. }
  1056. break;
  1057. case AV_CODEC_ID_ADPCM_IMA_DK4:
  1058. for (channel = 0; channel < avctx->channels; channel++) {
  1059. cs = &c->status[channel];
  1060. cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
  1061. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  1062. if (cs->step_index > 88u){
  1063. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  1064. channel, cs->step_index);
  1065. return AVERROR_INVALIDDATA;
  1066. }
  1067. }
  1068. for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
  1069. int v = bytestream2_get_byteu(&gb);
  1070. *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
  1071. *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
  1072. }
  1073. break;
  1074. case AV_CODEC_ID_ADPCM_IMA_DK3:
  1075. {
  1076. int last_byte = 0;
  1077. int nibble;
  1078. int decode_top_nibble_next = 0;
  1079. int diff_channel;
  1080. const int16_t *samples_end = samples + avctx->channels * nb_samples;
  1081. bytestream2_skipu(&gb, 10);
  1082. c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1083. c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1084. c->status[0].step_index = bytestream2_get_byteu(&gb);
  1085. c->status[1].step_index = bytestream2_get_byteu(&gb);
  1086. if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
  1087. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
  1088. c->status[0].step_index, c->status[1].step_index);
  1089. return AVERROR_INVALIDDATA;
  1090. }
  1091. /* sign extend the predictors */
  1092. diff_channel = c->status[1].predictor;
  1093. /* DK3 ADPCM support macro */
  1094. #define DK3_GET_NEXT_NIBBLE() \
  1095. if (decode_top_nibble_next) { \
  1096. nibble = last_byte >> 4; \
  1097. decode_top_nibble_next = 0; \
  1098. } else { \
  1099. last_byte = bytestream2_get_byteu(&gb); \
  1100. nibble = last_byte & 0x0F; \
  1101. decode_top_nibble_next = 1; \
  1102. }
  1103. while (samples < samples_end) {
  1104. /* for this algorithm, c->status[0] is the sum channel and
  1105. * c->status[1] is the diff channel */
  1106. /* process the first predictor of the sum channel */
  1107. DK3_GET_NEXT_NIBBLE();
  1108. adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
  1109. /* process the diff channel predictor */
  1110. DK3_GET_NEXT_NIBBLE();
  1111. adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
  1112. /* process the first pair of stereo PCM samples */
  1113. diff_channel = (diff_channel + c->status[1].predictor) / 2;
  1114. *samples++ = c->status[0].predictor + c->status[1].predictor;
  1115. *samples++ = c->status[0].predictor - c->status[1].predictor;
  1116. /* process the second predictor of the sum channel */
  1117. DK3_GET_NEXT_NIBBLE();
  1118. adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
  1119. /* process the second pair of stereo PCM samples */
  1120. diff_channel = (diff_channel + c->status[1].predictor) / 2;
  1121. *samples++ = c->status[0].predictor + c->status[1].predictor;
  1122. *samples++ = c->status[0].predictor - c->status[1].predictor;
  1123. }
  1124. if ((bytestream2_tell(&gb) & 1))
  1125. bytestream2_skip(&gb, 1);
  1126. break;
  1127. }
  1128. case AV_CODEC_ID_ADPCM_IMA_ISS:
  1129. for (channel = 0; channel < avctx->channels; channel++) {
  1130. cs = &c->status[channel];
  1131. cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1132. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  1133. if (cs->step_index > 88u){
  1134. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  1135. channel, cs->step_index);
  1136. return AVERROR_INVALIDDATA;
  1137. }
  1138. }
  1139. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1140. int v1, v2;
  1141. int v = bytestream2_get_byteu(&gb);
  1142. /* nibbles are swapped for mono */
  1143. if (st) {
  1144. v1 = v >> 4;
  1145. v2 = v & 0x0F;
  1146. } else {
  1147. v2 = v >> 4;
  1148. v1 = v & 0x0F;
  1149. }
  1150. *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
  1151. *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
  1152. }
  1153. break;
  1154. case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
  1155. for (channel = 0; channel < avctx->channels; channel++) {
  1156. cs = &c->status[channel];
  1157. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  1158. cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1159. if (cs->step_index > 88u){
  1160. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  1161. channel, cs->step_index);
  1162. return AVERROR_INVALIDDATA;
  1163. }
  1164. }
  1165. for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
  1166. for (channel = 0; channel < avctx->channels; channel++) {
  1167. samples = samples_p[channel] + 256 * subframe;
  1168. for (n = 0; n < 256; n += 2) {
  1169. int v = bytestream2_get_byteu(&gb);
  1170. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
  1171. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
  1172. }
  1173. }
  1174. }
  1175. break;
  1176. case AV_CODEC_ID_ADPCM_IMA_DAT4:
  1177. for (channel = 0; channel < avctx->channels; channel++) {
  1178. cs = &c->status[channel];
  1179. samples = samples_p[channel];
  1180. bytestream2_skip(&gb, 4);
  1181. for (n = 0; n < nb_samples; n += 2) {
  1182. int v = bytestream2_get_byteu(&gb);
  1183. *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
  1184. *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
  1185. }
  1186. }
  1187. break;
  1188. case AV_CODEC_ID_ADPCM_IMA_APC:
  1189. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1190. int v = bytestream2_get_byteu(&gb);
  1191. *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
  1192. *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
  1193. }
  1194. break;
  1195. case AV_CODEC_ID_ADPCM_IMA_SSI:
  1196. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1197. int v = bytestream2_get_byteu(&gb);
  1198. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
  1199. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
  1200. }
  1201. break;
  1202. case AV_CODEC_ID_ADPCM_IMA_APM:
  1203. for (n = nb_samples / 2; n > 0; n--) {
  1204. for (channel = 0; channel < avctx->channels; channel++) {
  1205. int v = bytestream2_get_byteu(&gb);
  1206. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
  1207. samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
  1208. }
  1209. samples += avctx->channels;
  1210. }
  1211. break;
  1212. case AV_CODEC_ID_ADPCM_IMA_ALP:
  1213. for (n = nb_samples / 2; n > 0; n--) {
  1214. for (channel = 0; channel < avctx->channels; channel++) {
  1215. int v = bytestream2_get_byteu(&gb);
  1216. *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
  1217. samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
  1218. }
  1219. samples += avctx->channels;
  1220. }
  1221. break;
  1222. case AV_CODEC_ID_ADPCM_IMA_CUNNING:
  1223. for (n = 0; n < nb_samples / 2; n++) {
  1224. int v = bytestream2_get_byteu(&gb);
  1225. *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v & 0x0F);
  1226. *samples++ = adpcm_ima_cunning_expand_nibble(&c->status[0], v >> 4);
  1227. }
  1228. break;
  1229. case AV_CODEC_ID_ADPCM_IMA_OKI:
  1230. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1231. int v = bytestream2_get_byteu(&gb);
  1232. *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
  1233. *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
  1234. }
  1235. break;
  1236. case AV_CODEC_ID_ADPCM_IMA_RAD:
  1237. for (channel = 0; channel < avctx->channels; channel++) {
  1238. cs = &c->status[channel];
  1239. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  1240. cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1241. if (cs->step_index > 88u){
  1242. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  1243. channel, cs->step_index);
  1244. return AVERROR_INVALIDDATA;
  1245. }
  1246. }
  1247. for (n = 0; n < nb_samples / 2; n++) {
  1248. int byte[2];
  1249. byte[0] = bytestream2_get_byteu(&gb);
  1250. if (st)
  1251. byte[1] = bytestream2_get_byteu(&gb);
  1252. for(channel = 0; channel < avctx->channels; channel++) {
  1253. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
  1254. }
  1255. for(channel = 0; channel < avctx->channels; channel++) {
  1256. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
  1257. }
  1258. }
  1259. break;
  1260. case AV_CODEC_ID_ADPCM_IMA_WS:
  1261. if (c->vqa_version == 3) {
  1262. for (channel = 0; channel < avctx->channels; channel++) {
  1263. int16_t *smp = samples_p[channel];
  1264. for (n = nb_samples / 2; n > 0; n--) {
  1265. int v = bytestream2_get_byteu(&gb);
  1266. *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
  1267. *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
  1268. }
  1269. }
  1270. } else {
  1271. for (n = nb_samples / 2; n > 0; n--) {
  1272. for (channel = 0; channel < avctx->channels; channel++) {
  1273. int v = bytestream2_get_byteu(&gb);
  1274. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
  1275. samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
  1276. }
  1277. samples += avctx->channels;
  1278. }
  1279. }
  1280. bytestream2_seek(&gb, 0, SEEK_END);
  1281. break;
  1282. case AV_CODEC_ID_ADPCM_XA:
  1283. {
  1284. int16_t *out0 = samples_p[0];
  1285. int16_t *out1 = samples_p[1];
  1286. int samples_per_block = 28 * (3 - avctx->channels) * 4;
  1287. int sample_offset = 0;
  1288. int bytes_remaining;
  1289. while (bytestream2_get_bytes_left(&gb) >= 128) {
  1290. if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
  1291. &c->status[0], &c->status[1],
  1292. avctx->channels, sample_offset)) < 0)
  1293. return ret;
  1294. bytestream2_skipu(&gb, 128);
  1295. sample_offset += samples_per_block;
  1296. }
  1297. /* Less than a full block of data left, e.g. when reading from
  1298. * 2324 byte per sector XA; the remainder is padding */
  1299. bytes_remaining = bytestream2_get_bytes_left(&gb);
  1300. if (bytes_remaining > 0) {
  1301. bytestream2_skip(&gb, bytes_remaining);
  1302. }
  1303. break;
  1304. }
  1305. case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
  1306. for (i=0; i<=st; i++) {
  1307. c->status[i].step_index = bytestream2_get_le32u(&gb);
  1308. if (c->status[i].step_index > 88u) {
  1309. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  1310. i, c->status[i].step_index);
  1311. return AVERROR_INVALIDDATA;
  1312. }
  1313. }
  1314. for (i=0; i<=st; i++) {
  1315. c->status[i].predictor = bytestream2_get_le32u(&gb);
  1316. if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
  1317. return AVERROR_INVALIDDATA;
  1318. }
  1319. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1320. int byte = bytestream2_get_byteu(&gb);
  1321. *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
  1322. *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
  1323. }
  1324. break;
  1325. case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
  1326. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1327. int byte = bytestream2_get_byteu(&gb);
  1328. *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
  1329. *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
  1330. }
  1331. break;
  1332. case AV_CODEC_ID_ADPCM_EA:
  1333. {
  1334. int previous_left_sample, previous_right_sample;
  1335. int current_left_sample, current_right_sample;
  1336. int next_left_sample, next_right_sample;
  1337. int coeff1l, coeff2l, coeff1r, coeff2r;
  1338. int shift_left, shift_right;
  1339. /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
  1340. each coding 28 stereo samples. */
  1341. if(avctx->channels != 2)
  1342. return AVERROR_INVALIDDATA;
  1343. current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  1344. previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  1345. current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  1346. previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  1347. for (count1 = 0; count1 < nb_samples / 28; count1++) {
  1348. int byte = bytestream2_get_byteu(&gb);
  1349. coeff1l = ea_adpcm_table[ byte >> 4 ];
  1350. coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
  1351. coeff1r = ea_adpcm_table[ byte & 0x0F];
  1352. coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
  1353. byte = bytestream2_get_byteu(&gb);
  1354. shift_left = 20 - (byte >> 4);
  1355. shift_right = 20 - (byte & 0x0F);
  1356. for (count2 = 0; count2 < 28; count2++) {
  1357. byte = bytestream2_get_byteu(&gb);
  1358. next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
  1359. next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
  1360. next_left_sample = (next_left_sample +
  1361. (current_left_sample * coeff1l) +
  1362. (previous_left_sample * coeff2l) + 0x80) >> 8;
  1363. next_right_sample = (next_right_sample +
  1364. (current_right_sample * coeff1r) +
  1365. (previous_right_sample * coeff2r) + 0x80) >> 8;
  1366. previous_left_sample = current_left_sample;
  1367. current_left_sample = av_clip_int16(next_left_sample);
  1368. previous_right_sample = current_right_sample;
  1369. current_right_sample = av_clip_int16(next_right_sample);
  1370. *samples++ = current_left_sample;
  1371. *samples++ = current_right_sample;
  1372. }
  1373. }
  1374. bytestream2_skip(&gb, 2); // Skip terminating 0x0000
  1375. break;
  1376. }
  1377. case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
  1378. {
  1379. int coeff[2][2], shift[2];
  1380. for(channel = 0; channel < avctx->channels; channel++) {
  1381. int byte = bytestream2_get_byteu(&gb);
  1382. for (i=0; i<2; i++)
  1383. coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
  1384. shift[channel] = 20 - (byte & 0x0F);
  1385. }
  1386. for (count1 = 0; count1 < nb_samples / 2; count1++) {
  1387. int byte[2];
  1388. byte[0] = bytestream2_get_byteu(&gb);
  1389. if (st) byte[1] = bytestream2_get_byteu(&gb);
  1390. for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
  1391. for(channel = 0; channel < avctx->channels; channel++) {
  1392. int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
  1393. sample = (sample +
  1394. c->status[channel].sample1 * coeff[channel][0] +
  1395. c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
  1396. c->status[channel].sample2 = c->status[channel].sample1;
  1397. c->status[channel].sample1 = av_clip_int16(sample);
  1398. *samples++ = c->status[channel].sample1;
  1399. }
  1400. }
  1401. }
  1402. bytestream2_seek(&gb, 0, SEEK_END);
  1403. break;
  1404. }
  1405. case AV_CODEC_ID_ADPCM_EA_R1:
  1406. case AV_CODEC_ID_ADPCM_EA_R2:
  1407. case AV_CODEC_ID_ADPCM_EA_R3: {
  1408. /* channel numbering
  1409. 2chan: 0=fl, 1=fr
  1410. 4chan: 0=fl, 1=rl, 2=fr, 3=rr
  1411. 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
  1412. const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
  1413. int previous_sample, current_sample, next_sample;
  1414. int coeff1, coeff2;
  1415. int shift;
  1416. unsigned int channel;
  1417. uint16_t *samplesC;
  1418. int count = 0;
  1419. int offsets[6];
  1420. for (channel=0; channel<avctx->channels; channel++)
  1421. offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
  1422. bytestream2_get_le32(&gb)) +
  1423. (avctx->channels + 1) * 4;
  1424. for (channel=0; channel<avctx->channels; channel++) {
  1425. bytestream2_seek(&gb, offsets[channel], SEEK_SET);
  1426. samplesC = samples_p[channel];
  1427. if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
  1428. current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
  1429. previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
  1430. } else {
  1431. current_sample = c->status[channel].predictor;
  1432. previous_sample = c->status[channel].prev_sample;
  1433. }
  1434. for (count1 = 0; count1 < nb_samples / 28; count1++) {
  1435. int byte = bytestream2_get_byte(&gb);
  1436. if (byte == 0xEE) { /* only seen in R2 and R3 */
  1437. current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
  1438. previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
  1439. for (count2=0; count2<28; count2++)
  1440. *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
  1441. } else {
  1442. coeff1 = ea_adpcm_table[ byte >> 4 ];
  1443. coeff2 = ea_adpcm_table[(byte >> 4) + 4];
  1444. shift = 20 - (byte & 0x0F);
  1445. for (count2=0; count2<28; count2++) {
  1446. if (count2 & 1)
  1447. next_sample = (unsigned)sign_extend(byte, 4) << shift;
  1448. else {
  1449. byte = bytestream2_get_byte(&gb);
  1450. next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
  1451. }
  1452. next_sample += (current_sample * coeff1) +
  1453. (previous_sample * coeff2);
  1454. next_sample = av_clip_int16(next_sample >> 8);
  1455. previous_sample = current_sample;
  1456. current_sample = next_sample;
  1457. *samplesC++ = current_sample;
  1458. }
  1459. }
  1460. }
  1461. if (!count) {
  1462. count = count1;
  1463. } else if (count != count1) {
  1464. av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
  1465. count = FFMAX(count, count1);
  1466. }
  1467. if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
  1468. c->status[channel].predictor = current_sample;
  1469. c->status[channel].prev_sample = previous_sample;
  1470. }
  1471. }
  1472. frame->nb_samples = count * 28;
  1473. bytestream2_seek(&gb, 0, SEEK_END);
  1474. break;
  1475. }
  1476. case AV_CODEC_ID_ADPCM_EA_XAS:
  1477. for (channel=0; channel<avctx->channels; channel++) {
  1478. int coeff[2][4], shift[4];
  1479. int16_t *s = samples_p[channel];
  1480. for (n = 0; n < 4; n++, s += 32) {
  1481. int val = sign_extend(bytestream2_get_le16u(&gb), 16);
  1482. for (i=0; i<2; i++)
  1483. coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
  1484. s[0] = val & ~0x0F;
  1485. val = sign_extend(bytestream2_get_le16u(&gb), 16);
  1486. shift[n] = 20 - (val & 0x0F);
  1487. s[1] = val & ~0x0F;
  1488. }
  1489. for (m=2; m<32; m+=2) {
  1490. s = &samples_p[channel][m];
  1491. for (n = 0; n < 4; n++, s += 32) {
  1492. int level, pred;
  1493. int byte = bytestream2_get_byteu(&gb);
  1494. level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
  1495. pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
  1496. s[0] = av_clip_int16((level + pred + 0x80) >> 8);
  1497. level = sign_extend(byte, 4) * (1 << shift[n]);
  1498. pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
  1499. s[1] = av_clip_int16((level + pred + 0x80) >> 8);
  1500. }
  1501. }
  1502. }
  1503. break;
  1504. case AV_CODEC_ID_ADPCM_IMA_AMV:
  1505. c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1506. c->status[0].step_index = bytestream2_get_byteu(&gb);
  1507. bytestream2_skipu(&gb, 5);
  1508. if (c->status[0].step_index > 88u) {
  1509. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
  1510. c->status[0].step_index);
  1511. return AVERROR_INVALIDDATA;
  1512. }
  1513. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1514. int v = bytestream2_get_byteu(&gb);
  1515. *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
  1516. *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
  1517. }
  1518. break;
  1519. case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
  1520. for (i = 0; i < avctx->channels; i++) {
  1521. c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
  1522. c->status[i].step_index = bytestream2_get_byteu(&gb);
  1523. bytestream2_skipu(&gb, 1);
  1524. if (c->status[i].step_index > 88u) {
  1525. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
  1526. c->status[i].step_index);
  1527. return AVERROR_INVALIDDATA;
  1528. }
  1529. }
  1530. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1531. int v = bytestream2_get_byteu(&gb);
  1532. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
  1533. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
  1534. }
  1535. break;
  1536. case AV_CODEC_ID_ADPCM_CT:
  1537. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1538. int v = bytestream2_get_byteu(&gb);
  1539. *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
  1540. *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
  1541. }
  1542. break;
  1543. case AV_CODEC_ID_ADPCM_SBPRO_4:
  1544. case AV_CODEC_ID_ADPCM_SBPRO_3:
  1545. case AV_CODEC_ID_ADPCM_SBPRO_2:
  1546. if (!c->status[0].step_index) {
  1547. /* the first byte is a raw sample */
  1548. *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
  1549. if (st)
  1550. *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
  1551. c->status[0].step_index = 1;
  1552. nb_samples--;
  1553. }
  1554. if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
  1555. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1556. int byte = bytestream2_get_byteu(&gb);
  1557. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1558. byte >> 4, 4, 0);
  1559. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1560. byte & 0x0F, 4, 0);
  1561. }
  1562. } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
  1563. for (n = (nb_samples<<st) / 3; n > 0; n--) {
  1564. int byte = bytestream2_get_byteu(&gb);
  1565. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1566. byte >> 5 , 3, 0);
  1567. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1568. (byte >> 2) & 0x07, 3, 0);
  1569. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1570. byte & 0x03, 2, 0);
  1571. }
  1572. } else {
  1573. for (n = nb_samples >> (2 - st); n > 0; n--) {
  1574. int byte = bytestream2_get_byteu(&gb);
  1575. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1576. byte >> 6 , 2, 2);
  1577. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1578. (byte >> 4) & 0x03, 2, 2);
  1579. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1580. (byte >> 2) & 0x03, 2, 2);
  1581. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1582. byte & 0x03, 2, 2);
  1583. }
  1584. }
  1585. break;
  1586. case AV_CODEC_ID_ADPCM_SWF:
  1587. adpcm_swf_decode(avctx, buf, buf_size, samples);
  1588. bytestream2_seek(&gb, 0, SEEK_END);
  1589. break;
  1590. case AV_CODEC_ID_ADPCM_YAMAHA:
  1591. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1592. int v = bytestream2_get_byteu(&gb);
  1593. *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
  1594. *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
  1595. }
  1596. break;
  1597. case AV_CODEC_ID_ADPCM_AICA:
  1598. if (!c->has_status) {
  1599. for (channel = 0; channel < avctx->channels; channel++)
  1600. c->status[channel].step = 0;
  1601. c->has_status = 1;
  1602. }
  1603. for (channel = 0; channel < avctx->channels; channel++) {
  1604. samples = samples_p[channel];
  1605. for (n = nb_samples >> 1; n > 0; n--) {
  1606. int v = bytestream2_get_byteu(&gb);
  1607. *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
  1608. *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
  1609. }
  1610. }
  1611. break;
  1612. case AV_CODEC_ID_ADPCM_AFC:
  1613. {
  1614. int samples_per_block;
  1615. int blocks;
  1616. if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
  1617. samples_per_block = avctx->extradata[0] / 16;
  1618. blocks = nb_samples / avctx->extradata[0];
  1619. } else {
  1620. samples_per_block = nb_samples / 16;
  1621. blocks = 1;
  1622. }
  1623. for (m = 0; m < blocks; m++) {
  1624. for (channel = 0; channel < avctx->channels; channel++) {
  1625. int prev1 = c->status[channel].sample1;
  1626. int prev2 = c->status[channel].sample2;
  1627. samples = samples_p[channel] + m * 16;
  1628. /* Read in every sample for this channel. */
  1629. for (i = 0; i < samples_per_block; i++) {
  1630. int byte = bytestream2_get_byteu(&gb);
  1631. int scale = 1 << (byte >> 4);
  1632. int index = byte & 0xf;
  1633. int factor1 = ff_adpcm_afc_coeffs[0][index];
  1634. int factor2 = ff_adpcm_afc_coeffs[1][index];
  1635. /* Decode 16 samples. */
  1636. for (n = 0; n < 16; n++) {
  1637. int32_t sampledat;
  1638. if (n & 1) {
  1639. sampledat = sign_extend(byte, 4);
  1640. } else {
  1641. byte = bytestream2_get_byteu(&gb);
  1642. sampledat = sign_extend(byte >> 4, 4);
  1643. }
  1644. sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
  1645. sampledat * scale;
  1646. *samples = av_clip_int16(sampledat);
  1647. prev2 = prev1;
  1648. prev1 = *samples++;
  1649. }
  1650. }
  1651. c->status[channel].sample1 = prev1;
  1652. c->status[channel].sample2 = prev2;
  1653. }
  1654. }
  1655. bytestream2_seek(&gb, 0, SEEK_END);
  1656. break;
  1657. }
  1658. case AV_CODEC_ID_ADPCM_THP:
  1659. case AV_CODEC_ID_ADPCM_THP_LE:
  1660. {
  1661. int table[14][16];
  1662. int ch;
  1663. #define THP_GET16(g) \
  1664. sign_extend( \
  1665. avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
  1666. bytestream2_get_le16u(&(g)) : \
  1667. bytestream2_get_be16u(&(g)), 16)
  1668. if (avctx->extradata) {
  1669. GetByteContext tb;
  1670. if (avctx->extradata_size < 32 * avctx->channels) {
  1671. av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
  1672. return AVERROR_INVALIDDATA;
  1673. }
  1674. bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
  1675. for (i = 0; i < avctx->channels; i++)
  1676. for (n = 0; n < 16; n++)
  1677. table[i][n] = THP_GET16(tb);
  1678. } else {
  1679. for (i = 0; i < avctx->channels; i++)
  1680. for (n = 0; n < 16; n++)
  1681. table[i][n] = THP_GET16(gb);
  1682. if (!c->has_status) {
  1683. /* Initialize the previous sample. */
  1684. for (i = 0; i < avctx->channels; i++) {
  1685. c->status[i].sample1 = THP_GET16(gb);
  1686. c->status[i].sample2 = THP_GET16(gb);
  1687. }
  1688. c->has_status = 1;
  1689. } else {
  1690. bytestream2_skip(&gb, avctx->channels * 4);
  1691. }
  1692. }
  1693. for (ch = 0; ch < avctx->channels; ch++) {
  1694. samples = samples_p[ch];
  1695. /* Read in every sample for this channel. */
  1696. for (i = 0; i < (nb_samples + 13) / 14; i++) {
  1697. int byte = bytestream2_get_byteu(&gb);
  1698. int index = (byte >> 4) & 7;
  1699. unsigned int exp = byte & 0x0F;
  1700. int64_t factor1 = table[ch][index * 2];
  1701. int64_t factor2 = table[ch][index * 2 + 1];
  1702. /* Decode 14 samples. */
  1703. for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
  1704. int32_t sampledat;
  1705. if (n & 1) {
  1706. sampledat = sign_extend(byte, 4);
  1707. } else {
  1708. byte = bytestream2_get_byteu(&gb);
  1709. sampledat = sign_extend(byte >> 4, 4);
  1710. }
  1711. sampledat = ((c->status[ch].sample1 * factor1
  1712. + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
  1713. *samples = av_clip_int16(sampledat);
  1714. c->status[ch].sample2 = c->status[ch].sample1;
  1715. c->status[ch].sample1 = *samples++;
  1716. }
  1717. }
  1718. }
  1719. break;
  1720. }
  1721. case AV_CODEC_ID_ADPCM_DTK:
  1722. for (channel = 0; channel < avctx->channels; channel++) {
  1723. samples = samples_p[channel];
  1724. /* Read in every sample for this channel. */
  1725. for (i = 0; i < nb_samples / 28; i++) {
  1726. int byte, header;
  1727. if (channel)
  1728. bytestream2_skipu(&gb, 1);
  1729. header = bytestream2_get_byteu(&gb);
  1730. bytestream2_skipu(&gb, 3 - channel);
  1731. /* Decode 28 samples. */
  1732. for (n = 0; n < 28; n++) {
  1733. int32_t sampledat, prev;
  1734. switch (header >> 4) {
  1735. case 1:
  1736. prev = (c->status[channel].sample1 * 0x3c);
  1737. break;
  1738. case 2:
  1739. prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
  1740. break;
  1741. case 3:
  1742. prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
  1743. break;
  1744. default:
  1745. prev = 0;
  1746. }
  1747. prev = av_clip_intp2((prev + 0x20) >> 6, 21);
  1748. byte = bytestream2_get_byteu(&gb);
  1749. if (!channel)
  1750. sampledat = sign_extend(byte, 4);
  1751. else
  1752. sampledat = sign_extend(byte >> 4, 4);
  1753. sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
  1754. *samples++ = av_clip_int16(sampledat >> 6);
  1755. c->status[channel].sample2 = c->status[channel].sample1;
  1756. c->status[channel].sample1 = sampledat;
  1757. }
  1758. }
  1759. if (!channel)
  1760. bytestream2_seek(&gb, 0, SEEK_SET);
  1761. }
  1762. break;
  1763. case AV_CODEC_ID_ADPCM_PSX:
  1764. for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
  1765. int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
  1766. for (channel = 0; channel < avctx->channels; channel++) {
  1767. samples = samples_p[channel] + block * nb_samples_per_block;
  1768. /* Read in every sample for this channel. */
  1769. for (i = 0; i < nb_samples_per_block / 28; i++) {
  1770. int filter, shift, flag, byte;
  1771. filter = bytestream2_get_byteu(&gb);
  1772. shift = filter & 0xf;
  1773. filter = filter >> 4;
  1774. if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
  1775. return AVERROR_INVALIDDATA;
  1776. flag = bytestream2_get_byteu(&gb);
  1777. /* Decode 28 samples. */
  1778. for (n = 0; n < 28; n++) {
  1779. int sample = 0, scale;
  1780. if (flag < 0x07) {
  1781. if (n & 1) {
  1782. scale = sign_extend(byte >> 4, 4);
  1783. } else {
  1784. byte = bytestream2_get_byteu(&gb);
  1785. scale = sign_extend(byte, 4);
  1786. }
  1787. scale = scale * (1 << 12);
  1788. sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
  1789. }
  1790. *samples++ = av_clip_int16(sample);
  1791. c->status[channel].sample2 = c->status[channel].sample1;
  1792. c->status[channel].sample1 = sample;
  1793. }
  1794. }
  1795. }
  1796. }
  1797. break;
  1798. case AV_CODEC_ID_ADPCM_ARGO:
  1799. /*
  1800. * The format of each block:
  1801. * uint8_t left_control;
  1802. * uint4_t left_samples[nb_samples];
  1803. * ---- and if stereo ----
  1804. * uint8_t right_control;
  1805. * uint4_t right_samples[nb_samples];
  1806. *
  1807. * Format of the control byte:
  1808. * MSB [SSSSRDRR] LSB
  1809. * S = (Shift Amount - 2)
  1810. * D = Decoder flag.
  1811. * R = Reserved
  1812. *
  1813. * Each block relies on the previous two samples of each channel.
  1814. * They should be 0 initially.
  1815. */
  1816. for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
  1817. for (channel = 0; channel < avctx->channels; channel++) {
  1818. int control, shift;
  1819. samples = samples_p[channel] + block * 32;
  1820. cs = c->status + channel;
  1821. /* Get the control byte and decode the samples, 2 at a time. */
  1822. control = bytestream2_get_byteu(&gb);
  1823. shift = (control >> 4) + 2;
  1824. for (n = 0; n < 16; n++) {
  1825. int sample = bytestream2_get_byteu(&gb);
  1826. *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
  1827. *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
  1828. }
  1829. }
  1830. }
  1831. break;
  1832. case AV_CODEC_ID_ADPCM_ZORK:
  1833. if (!c->has_status) {
  1834. for (channel = 0; channel < avctx->channels; channel++) {
  1835. c->status[channel].predictor = 0;
  1836. c->status[channel].step_index = 0;
  1837. }
  1838. c->has_status = 1;
  1839. }
  1840. for (n = 0; n < nb_samples * avctx->channels; n++) {
  1841. int v = bytestream2_get_byteu(&gb);
  1842. *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
  1843. }
  1844. break;
  1845. case AV_CODEC_ID_ADPCM_IMA_MTF:
  1846. for (n = nb_samples / 2; n > 0; n--) {
  1847. for (channel = 0; channel < avctx->channels; channel++) {
  1848. int v = bytestream2_get_byteu(&gb);
  1849. *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
  1850. samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
  1851. }
  1852. samples += avctx->channels;
  1853. }
  1854. break;
  1855. default:
  1856. av_assert0(0); // unsupported codec_id should not happen
  1857. }
  1858. if (avpkt->size && bytestream2_tell(&gb) == 0) {
  1859. av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
  1860. return AVERROR_INVALIDDATA;
  1861. }
  1862. *got_frame_ptr = 1;
  1863. if (avpkt->size < bytestream2_tell(&gb)) {
  1864. av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
  1865. return avpkt->size;
  1866. }
  1867. return bytestream2_tell(&gb);
  1868. }
  1869. static void adpcm_flush(AVCodecContext *avctx)
  1870. {
  1871. ADPCMDecodeContext *c = avctx->priv_data;
  1872. c->has_status = 0;
  1873. }
  1874. static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
  1875. AV_SAMPLE_FMT_NONE };
  1876. static const enum AVSampleFormat sample_fmts_s16p[] = { AV_SAMPLE_FMT_S16P,
  1877. AV_SAMPLE_FMT_NONE };
  1878. static const enum AVSampleFormat sample_fmts_both[] = { AV_SAMPLE_FMT_S16,
  1879. AV_SAMPLE_FMT_S16P,
  1880. AV_SAMPLE_FMT_NONE };
  1881. #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
  1882. AVCodec ff_ ## name_ ## _decoder = { \
  1883. .name = #name_, \
  1884. .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
  1885. .type = AVMEDIA_TYPE_AUDIO, \
  1886. .id = id_, \
  1887. .priv_data_size = sizeof(ADPCMDecodeContext), \
  1888. .init = adpcm_decode_init, \
  1889. .decode = adpcm_decode_frame, \
  1890. .flush = adpcm_flush, \
  1891. .capabilities = AV_CODEC_CAP_DR1, \
  1892. .sample_fmts = sample_fmts_, \
  1893. }
  1894. /* Note: Do not forget to add new entries to the Makefile as well. */
  1895. ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
  1896. ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
  1897. ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
  1898. ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
  1899. ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
  1900. ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
  1901. ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
  1902. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
  1903. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
  1904. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
  1905. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
  1906. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
  1907. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
  1908. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
  1909. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
  1910. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
  1911. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
  1912. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
  1913. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
  1914. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
  1915. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
  1916. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
  1917. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
  1918. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
  1919. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
  1920. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
  1921. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
  1922. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
  1923. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
  1924. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
  1925. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
  1926. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
  1927. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
  1928. ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
  1929. ADPCM_DECODER(AV_CODEC_ID_ADPCM_MTAF, sample_fmts_s16p, adpcm_mtaf, "ADPCM MTAF");
  1930. ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
  1931. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
  1932. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
  1933. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
  1934. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
  1935. ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
  1936. ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
  1937. ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
  1938. ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
  1939. ADPCM_DECODER(AV_CODEC_ID_ADPCM_ZORK, sample_fmts_s16, adpcm_zork, "ADPCM Zork");