You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1636 lines
62KB

  1. /*
  2. * Copyright (c) 2001-2003 The FFmpeg Project
  3. *
  4. * first version by Francois Revol (revol@free.fr)
  5. * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
  6. * by Mike Melanson (melanson@pcisys.net)
  7. * CD-ROM XA ADPCM codec by BERO
  8. * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
  9. * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
  10. * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
  11. * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
  12. * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
  13. * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
  14. * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
  15. *
  16. * This file is part of FFmpeg.
  17. *
  18. * FFmpeg is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU Lesser General Public
  20. * License as published by the Free Software Foundation; either
  21. * version 2.1 of the License, or (at your option) any later version.
  22. *
  23. * FFmpeg is distributed in the hope that it will be useful,
  24. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  26. * Lesser General Public License for more details.
  27. *
  28. * You should have received a copy of the GNU Lesser General Public
  29. * License along with FFmpeg; if not, write to the Free Software
  30. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  31. */
  32. #include "avcodec.h"
  33. #include "get_bits.h"
  34. #include "bytestream.h"
  35. #include "adpcm.h"
  36. #include "adpcm_data.h"
  37. #include "internal.h"
  38. /**
  39. * @file
  40. * ADPCM decoders
  41. * Features and limitations:
  42. *
  43. * Reference documents:
  44. * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
  45. * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
  46. * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
  47. * http://openquicktime.sourceforge.net/
  48. * XAnim sources (xa_codec.c) http://xanim.polter.net/
  49. * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
  50. * SoX source code http://sox.sourceforge.net/
  51. *
  52. * CD-ROM XA:
  53. * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
  54. * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
  55. * readstr http://www.geocities.co.jp/Playtown/2004/
  56. */
  57. /* These are for CD-ROM XA ADPCM */
  58. static const int xa_adpcm_table[5][2] = {
  59. { 0, 0 },
  60. { 60, 0 },
  61. { 115, -52 },
  62. { 98, -55 },
  63. { 122, -60 }
  64. };
  65. static const int ea_adpcm_table[] = {
  66. 0, 240, 460, 392,
  67. 0, 0, -208, -220,
  68. 0, 1, 3, 4,
  69. 7, 8, 10, 11,
  70. 0, -1, -3, -4
  71. };
  72. // padded to zero where table size is less then 16
  73. static const int swf_index_tables[4][16] = {
  74. /*2*/ { -1, 2 },
  75. /*3*/ { -1, -1, 2, 4 },
  76. /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
  77. /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
  78. };
  79. /* end of tables */
  80. typedef struct ADPCMDecodeContext {
  81. ADPCMChannelStatus status[10];
  82. int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
  83. int has_status;
  84. } ADPCMDecodeContext;
  85. static av_cold int adpcm_decode_init(AVCodecContext * avctx)
  86. {
  87. ADPCMDecodeContext *c = avctx->priv_data;
  88. unsigned int min_channels = 1;
  89. unsigned int max_channels = 2;
  90. switch(avctx->codec->id) {
  91. case AV_CODEC_ID_ADPCM_DTK:
  92. case AV_CODEC_ID_ADPCM_EA:
  93. min_channels = 2;
  94. break;
  95. case AV_CODEC_ID_ADPCM_AFC:
  96. case AV_CODEC_ID_ADPCM_EA_R1:
  97. case AV_CODEC_ID_ADPCM_EA_R2:
  98. case AV_CODEC_ID_ADPCM_EA_R3:
  99. case AV_CODEC_ID_ADPCM_EA_XAS:
  100. max_channels = 6;
  101. break;
  102. case AV_CODEC_ID_ADPCM_THP:
  103. case AV_CODEC_ID_ADPCM_THP_LE:
  104. max_channels = 10;
  105. break;
  106. }
  107. if (avctx->channels < min_channels || avctx->channels > max_channels) {
  108. av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
  109. return AVERROR(EINVAL);
  110. }
  111. switch(avctx->codec->id) {
  112. case AV_CODEC_ID_ADPCM_CT:
  113. c->status[0].step = c->status[1].step = 511;
  114. break;
  115. case AV_CODEC_ID_ADPCM_IMA_WAV:
  116. if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
  117. return AVERROR_INVALIDDATA;
  118. break;
  119. case AV_CODEC_ID_ADPCM_IMA_APC:
  120. if (avctx->extradata && avctx->extradata_size >= 8) {
  121. c->status[0].predictor = AV_RL32(avctx->extradata);
  122. c->status[1].predictor = AV_RL32(avctx->extradata + 4);
  123. }
  124. break;
  125. case AV_CODEC_ID_ADPCM_IMA_WS:
  126. if (avctx->extradata && avctx->extradata_size >= 2)
  127. c->vqa_version = AV_RL16(avctx->extradata);
  128. break;
  129. default:
  130. break;
  131. }
  132. switch(avctx->codec->id) {
  133. case AV_CODEC_ID_ADPCM_IMA_QT:
  134. case AV_CODEC_ID_ADPCM_IMA_WAV:
  135. case AV_CODEC_ID_ADPCM_4XM:
  136. case AV_CODEC_ID_ADPCM_XA:
  137. case AV_CODEC_ID_ADPCM_EA_R1:
  138. case AV_CODEC_ID_ADPCM_EA_R2:
  139. case AV_CODEC_ID_ADPCM_EA_R3:
  140. case AV_CODEC_ID_ADPCM_EA_XAS:
  141. case AV_CODEC_ID_ADPCM_THP:
  142. case AV_CODEC_ID_ADPCM_THP_LE:
  143. case AV_CODEC_ID_ADPCM_AFC:
  144. case AV_CODEC_ID_ADPCM_DTK:
  145. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  146. break;
  147. case AV_CODEC_ID_ADPCM_IMA_WS:
  148. avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
  149. AV_SAMPLE_FMT_S16;
  150. break;
  151. default:
  152. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  153. }
  154. return 0;
  155. }
  156. static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
  157. {
  158. int step_index;
  159. int predictor;
  160. int sign, delta, diff, step;
  161. step = ff_adpcm_step_table[c->step_index];
  162. step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
  163. step_index = av_clip(step_index, 0, 88);
  164. sign = nibble & 8;
  165. delta = nibble & 7;
  166. /* perform direct multiplication instead of series of jumps proposed by
  167. * the reference ADPCM implementation since modern CPUs can do the mults
  168. * quickly enough */
  169. diff = ((2 * delta + 1) * step) >> shift;
  170. predictor = c->predictor;
  171. if (sign) predictor -= diff;
  172. else predictor += diff;
  173. c->predictor = av_clip_int16(predictor);
  174. c->step_index = step_index;
  175. return (short)c->predictor;
  176. }
  177. static inline int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
  178. {
  179. int nibble, step_index, predictor, sign, delta, diff, step, shift;
  180. shift = bps - 1;
  181. nibble = get_bits_le(gb, bps),
  182. step = ff_adpcm_step_table[c->step_index];
  183. step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
  184. step_index = av_clip(step_index, 0, 88);
  185. sign = nibble & (1 << shift);
  186. delta = av_mod_uintp2(nibble, shift);
  187. diff = ((2 * delta + 1) * step) >> shift;
  188. predictor = c->predictor;
  189. if (sign) predictor -= diff;
  190. else predictor += diff;
  191. c->predictor = av_clip_int16(predictor);
  192. c->step_index = step_index;
  193. return (int16_t)c->predictor;
  194. }
  195. static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble, int shift)
  196. {
  197. int step_index;
  198. int predictor;
  199. int diff, step;
  200. step = ff_adpcm_step_table[c->step_index];
  201. step_index = c->step_index + ff_adpcm_index_table[nibble];
  202. step_index = av_clip(step_index, 0, 88);
  203. diff = step >> 3;
  204. if (nibble & 4) diff += step;
  205. if (nibble & 2) diff += step >> 1;
  206. if (nibble & 1) diff += step >> 2;
  207. if (nibble & 8)
  208. predictor = c->predictor - diff;
  209. else
  210. predictor = c->predictor + diff;
  211. c->predictor = av_clip_int16(predictor);
  212. c->step_index = step_index;
  213. return c->predictor;
  214. }
  215. static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
  216. {
  217. int predictor;
  218. predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
  219. predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
  220. c->sample2 = c->sample1;
  221. c->sample1 = av_clip_int16(predictor);
  222. c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
  223. if (c->idelta < 16) c->idelta = 16;
  224. if (c->idelta > INT_MAX/768) {
  225. av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
  226. c->idelta = INT_MAX/768;
  227. }
  228. return c->sample1;
  229. }
  230. static inline short adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
  231. {
  232. int step_index, predictor, sign, delta, diff, step;
  233. step = ff_adpcm_oki_step_table[c->step_index];
  234. step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
  235. step_index = av_clip(step_index, 0, 48);
  236. sign = nibble & 8;
  237. delta = nibble & 7;
  238. diff = ((2 * delta + 1) * step) >> 3;
  239. predictor = c->predictor;
  240. if (sign) predictor -= diff;
  241. else predictor += diff;
  242. c->predictor = av_clip_intp2(predictor, 11);
  243. c->step_index = step_index;
  244. return c->predictor * 16;
  245. }
  246. static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
  247. {
  248. int sign, delta, diff;
  249. int new_step;
  250. sign = nibble & 8;
  251. delta = nibble & 7;
  252. /* perform direct multiplication instead of series of jumps proposed by
  253. * the reference ADPCM implementation since modern CPUs can do the mults
  254. * quickly enough */
  255. diff = ((2 * delta + 1) * c->step) >> 3;
  256. /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
  257. c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
  258. c->predictor = av_clip_int16(c->predictor);
  259. /* calculate new step and clamp it to range 511..32767 */
  260. new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
  261. c->step = av_clip(new_step, 511, 32767);
  262. return (short)c->predictor;
  263. }
  264. static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
  265. {
  266. int sign, delta, diff;
  267. sign = nibble & (1<<(size-1));
  268. delta = nibble & ((1<<(size-1))-1);
  269. diff = delta << (7 + c->step + shift);
  270. /* clamp result */
  271. c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
  272. /* calculate new step */
  273. if (delta >= (2*size - 3) && c->step < 3)
  274. c->step++;
  275. else if (delta == 0 && c->step > 0)
  276. c->step--;
  277. return (short) c->predictor;
  278. }
  279. static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
  280. {
  281. if(!c->step) {
  282. c->predictor = 0;
  283. c->step = 127;
  284. }
  285. c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
  286. c->predictor = av_clip_int16(c->predictor);
  287. c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
  288. c->step = av_clip(c->step, 127, 24567);
  289. return c->predictor;
  290. }
  291. static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
  292. const uint8_t *in, ADPCMChannelStatus *left,
  293. ADPCMChannelStatus *right, int channels, int sample_offset)
  294. {
  295. int i, j;
  296. int shift,filter,f0,f1;
  297. int s_1,s_2;
  298. int d,s,t;
  299. out0 += sample_offset;
  300. if (channels == 1)
  301. out1 = out0 + 28;
  302. else
  303. out1 += sample_offset;
  304. for(i=0;i<4;i++) {
  305. shift = 12 - (in[4+i*2] & 15);
  306. filter = in[4+i*2] >> 4;
  307. if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
  308. avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
  309. filter=0;
  310. }
  311. f0 = xa_adpcm_table[filter][0];
  312. f1 = xa_adpcm_table[filter][1];
  313. s_1 = left->sample1;
  314. s_2 = left->sample2;
  315. for(j=0;j<28;j++) {
  316. d = in[16+i+j*4];
  317. t = sign_extend(d, 4);
  318. s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
  319. s_2 = s_1;
  320. s_1 = av_clip_int16(s);
  321. out0[j] = s_1;
  322. }
  323. if (channels == 2) {
  324. left->sample1 = s_1;
  325. left->sample2 = s_2;
  326. s_1 = right->sample1;
  327. s_2 = right->sample2;
  328. }
  329. shift = 12 - (in[5+i*2] & 15);
  330. filter = in[5+i*2] >> 4;
  331. if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
  332. avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
  333. filter=0;
  334. }
  335. f0 = xa_adpcm_table[filter][0];
  336. f1 = xa_adpcm_table[filter][1];
  337. for(j=0;j<28;j++) {
  338. d = in[16+i+j*4];
  339. t = sign_extend(d >> 4, 4);
  340. s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
  341. s_2 = s_1;
  342. s_1 = av_clip_int16(s);
  343. out1[j] = s_1;
  344. }
  345. if (channels == 2) {
  346. right->sample1 = s_1;
  347. right->sample2 = s_2;
  348. } else {
  349. left->sample1 = s_1;
  350. left->sample2 = s_2;
  351. }
  352. out0 += 28 * (3 - channels);
  353. out1 += 28 * (3 - channels);
  354. }
  355. return 0;
  356. }
  357. static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
  358. {
  359. ADPCMDecodeContext *c = avctx->priv_data;
  360. GetBitContext gb;
  361. const int *table;
  362. int k0, signmask, nb_bits, count;
  363. int size = buf_size*8;
  364. int i;
  365. init_get_bits(&gb, buf, size);
  366. //read bits & initial values
  367. nb_bits = get_bits(&gb, 2)+2;
  368. table = swf_index_tables[nb_bits-2];
  369. k0 = 1 << (nb_bits-2);
  370. signmask = 1 << (nb_bits-1);
  371. while (get_bits_count(&gb) <= size - 22*avctx->channels) {
  372. for (i = 0; i < avctx->channels; i++) {
  373. *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
  374. c->status[i].step_index = get_bits(&gb, 6);
  375. }
  376. for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
  377. int i;
  378. for (i = 0; i < avctx->channels; i++) {
  379. // similar to IMA adpcm
  380. int delta = get_bits(&gb, nb_bits);
  381. int step = ff_adpcm_step_table[c->status[i].step_index];
  382. long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
  383. int k = k0;
  384. do {
  385. if (delta & k)
  386. vpdiff += step;
  387. step >>= 1;
  388. k >>= 1;
  389. } while(k);
  390. vpdiff += step;
  391. if (delta & signmask)
  392. c->status[i].predictor -= vpdiff;
  393. else
  394. c->status[i].predictor += vpdiff;
  395. c->status[i].step_index += table[delta & (~signmask)];
  396. c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
  397. c->status[i].predictor = av_clip_int16(c->status[i].predictor);
  398. *samples++ = c->status[i].predictor;
  399. }
  400. }
  401. }
  402. }
  403. /**
  404. * Get the number of samples that will be decoded from the packet.
  405. * In one case, this is actually the maximum number of samples possible to
  406. * decode with the given buf_size.
  407. *
  408. * @param[out] coded_samples set to the number of samples as coded in the
  409. * packet, or 0 if the codec does not encode the
  410. * number of samples in each frame.
  411. * @param[out] approx_nb_samples set to non-zero if the number of samples
  412. * returned is an approximation.
  413. */
  414. static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
  415. int buf_size, int *coded_samples, int *approx_nb_samples)
  416. {
  417. ADPCMDecodeContext *s = avctx->priv_data;
  418. int nb_samples = 0;
  419. int ch = avctx->channels;
  420. int has_coded_samples = 0;
  421. int header_size;
  422. *coded_samples = 0;
  423. *approx_nb_samples = 0;
  424. if(ch <= 0)
  425. return 0;
  426. switch (avctx->codec->id) {
  427. /* constant, only check buf_size */
  428. case AV_CODEC_ID_ADPCM_EA_XAS:
  429. if (buf_size < 76 * ch)
  430. return 0;
  431. nb_samples = 128;
  432. break;
  433. case AV_CODEC_ID_ADPCM_IMA_QT:
  434. if (buf_size < 34 * ch)
  435. return 0;
  436. nb_samples = 64;
  437. break;
  438. /* simple 4-bit adpcm */
  439. case AV_CODEC_ID_ADPCM_CT:
  440. case AV_CODEC_ID_ADPCM_IMA_APC:
  441. case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
  442. case AV_CODEC_ID_ADPCM_IMA_OKI:
  443. case AV_CODEC_ID_ADPCM_IMA_WS:
  444. case AV_CODEC_ID_ADPCM_YAMAHA:
  445. nb_samples = buf_size * 2 / ch;
  446. break;
  447. }
  448. if (nb_samples)
  449. return nb_samples;
  450. /* simple 4-bit adpcm, with header */
  451. header_size = 0;
  452. switch (avctx->codec->id) {
  453. case AV_CODEC_ID_ADPCM_4XM:
  454. case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
  455. case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
  456. case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
  457. }
  458. if (header_size > 0)
  459. return (buf_size - header_size) * 2 / ch;
  460. /* more complex formats */
  461. switch (avctx->codec->id) {
  462. case AV_CODEC_ID_ADPCM_EA:
  463. has_coded_samples = 1;
  464. *coded_samples = bytestream2_get_le32(gb);
  465. *coded_samples -= *coded_samples % 28;
  466. nb_samples = (buf_size - 12) / 30 * 28;
  467. break;
  468. case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
  469. has_coded_samples = 1;
  470. *coded_samples = bytestream2_get_le32(gb);
  471. nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
  472. break;
  473. case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
  474. nb_samples = (buf_size - ch) / ch * 2;
  475. break;
  476. case AV_CODEC_ID_ADPCM_EA_R1:
  477. case AV_CODEC_ID_ADPCM_EA_R2:
  478. case AV_CODEC_ID_ADPCM_EA_R3:
  479. /* maximum number of samples */
  480. /* has internal offsets and a per-frame switch to signal raw 16-bit */
  481. has_coded_samples = 1;
  482. switch (avctx->codec->id) {
  483. case AV_CODEC_ID_ADPCM_EA_R1:
  484. header_size = 4 + 9 * ch;
  485. *coded_samples = bytestream2_get_le32(gb);
  486. break;
  487. case AV_CODEC_ID_ADPCM_EA_R2:
  488. header_size = 4 + 5 * ch;
  489. *coded_samples = bytestream2_get_le32(gb);
  490. break;
  491. case AV_CODEC_ID_ADPCM_EA_R3:
  492. header_size = 4 + 5 * ch;
  493. *coded_samples = bytestream2_get_be32(gb);
  494. break;
  495. }
  496. *coded_samples -= *coded_samples % 28;
  497. nb_samples = (buf_size - header_size) * 2 / ch;
  498. nb_samples -= nb_samples % 28;
  499. *approx_nb_samples = 1;
  500. break;
  501. case AV_CODEC_ID_ADPCM_IMA_DK3:
  502. if (avctx->block_align > 0)
  503. buf_size = FFMIN(buf_size, avctx->block_align);
  504. nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
  505. break;
  506. case AV_CODEC_ID_ADPCM_IMA_DK4:
  507. if (avctx->block_align > 0)
  508. buf_size = FFMIN(buf_size, avctx->block_align);
  509. if (buf_size < 4 * ch)
  510. return AVERROR_INVALIDDATA;
  511. nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
  512. break;
  513. case AV_CODEC_ID_ADPCM_IMA_RAD:
  514. if (avctx->block_align > 0)
  515. buf_size = FFMIN(buf_size, avctx->block_align);
  516. nb_samples = (buf_size - 4 * ch) * 2 / ch;
  517. break;
  518. case AV_CODEC_ID_ADPCM_IMA_WAV:
  519. {
  520. int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
  521. int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
  522. if (avctx->block_align > 0)
  523. buf_size = FFMIN(buf_size, avctx->block_align);
  524. if (buf_size < 4 * ch)
  525. return AVERROR_INVALIDDATA;
  526. nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
  527. break;
  528. }
  529. case AV_CODEC_ID_ADPCM_MS:
  530. if (avctx->block_align > 0)
  531. buf_size = FFMIN(buf_size, avctx->block_align);
  532. nb_samples = (buf_size - 6 * ch) * 2 / ch;
  533. break;
  534. case AV_CODEC_ID_ADPCM_SBPRO_2:
  535. case AV_CODEC_ID_ADPCM_SBPRO_3:
  536. case AV_CODEC_ID_ADPCM_SBPRO_4:
  537. {
  538. int samples_per_byte;
  539. switch (avctx->codec->id) {
  540. case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
  541. case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
  542. case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
  543. }
  544. if (!s->status[0].step_index) {
  545. if (buf_size < ch)
  546. return AVERROR_INVALIDDATA;
  547. nb_samples++;
  548. buf_size -= ch;
  549. }
  550. nb_samples += buf_size * samples_per_byte / ch;
  551. break;
  552. }
  553. case AV_CODEC_ID_ADPCM_SWF:
  554. {
  555. int buf_bits = buf_size * 8 - 2;
  556. int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
  557. int block_hdr_size = 22 * ch;
  558. int block_size = block_hdr_size + nbits * ch * 4095;
  559. int nblocks = buf_bits / block_size;
  560. int bits_left = buf_bits - nblocks * block_size;
  561. nb_samples = nblocks * 4096;
  562. if (bits_left >= block_hdr_size)
  563. nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
  564. break;
  565. }
  566. case AV_CODEC_ID_ADPCM_THP:
  567. case AV_CODEC_ID_ADPCM_THP_LE:
  568. if (avctx->extradata) {
  569. nb_samples = buf_size * 14 / (8 * ch);
  570. break;
  571. }
  572. has_coded_samples = 1;
  573. bytestream2_skip(gb, 4); // channel size
  574. *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
  575. bytestream2_get_le32(gb) :
  576. bytestream2_get_be32(gb);
  577. buf_size -= 8 + 36 * ch;
  578. buf_size /= ch;
  579. nb_samples = buf_size / 8 * 14;
  580. if (buf_size % 8 > 1)
  581. nb_samples += (buf_size % 8 - 1) * 2;
  582. *approx_nb_samples = 1;
  583. break;
  584. case AV_CODEC_ID_ADPCM_AFC:
  585. nb_samples = buf_size / (9 * ch) * 16;
  586. break;
  587. case AV_CODEC_ID_ADPCM_XA:
  588. nb_samples = (buf_size / 128) * 224 / ch;
  589. break;
  590. case AV_CODEC_ID_ADPCM_DTK:
  591. nb_samples = buf_size / (16 * ch) * 28;
  592. break;
  593. }
  594. /* validate coded sample count */
  595. if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
  596. return AVERROR_INVALIDDATA;
  597. return nb_samples;
  598. }
  599. static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
  600. int *got_frame_ptr, AVPacket *avpkt)
  601. {
  602. AVFrame *frame = data;
  603. const uint8_t *buf = avpkt->data;
  604. int buf_size = avpkt->size;
  605. ADPCMDecodeContext *c = avctx->priv_data;
  606. ADPCMChannelStatus *cs;
  607. int n, m, channel, i;
  608. short *samples;
  609. int16_t **samples_p;
  610. int st; /* stereo */
  611. int count1, count2;
  612. int nb_samples, coded_samples, approx_nb_samples, ret;
  613. GetByteContext gb;
  614. bytestream2_init(&gb, buf, buf_size);
  615. nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
  616. if (nb_samples <= 0) {
  617. av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
  618. return AVERROR_INVALIDDATA;
  619. }
  620. /* get output buffer */
  621. frame->nb_samples = nb_samples;
  622. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  623. return ret;
  624. samples = (short *)frame->data[0];
  625. samples_p = (int16_t **)frame->extended_data;
  626. /* use coded_samples when applicable */
  627. /* it is always <= nb_samples, so the output buffer will be large enough */
  628. if (coded_samples) {
  629. if (!approx_nb_samples && coded_samples != nb_samples)
  630. av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
  631. frame->nb_samples = nb_samples = coded_samples;
  632. }
  633. st = avctx->channels == 2 ? 1 : 0;
  634. switch(avctx->codec->id) {
  635. case AV_CODEC_ID_ADPCM_IMA_QT:
  636. /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
  637. Channel data is interleaved per-chunk. */
  638. for (channel = 0; channel < avctx->channels; channel++) {
  639. int predictor;
  640. int step_index;
  641. cs = &(c->status[channel]);
  642. /* (pppppp) (piiiiiii) */
  643. /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
  644. predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
  645. step_index = predictor & 0x7F;
  646. predictor &= ~0x7F;
  647. if (cs->step_index == step_index) {
  648. int diff = predictor - cs->predictor;
  649. if (diff < 0)
  650. diff = - diff;
  651. if (diff > 0x7f)
  652. goto update;
  653. } else {
  654. update:
  655. cs->step_index = step_index;
  656. cs->predictor = predictor;
  657. }
  658. if (cs->step_index > 88u){
  659. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  660. channel, cs->step_index);
  661. return AVERROR_INVALIDDATA;
  662. }
  663. samples = samples_p[channel];
  664. for (m = 0; m < 64; m += 2) {
  665. int byte = bytestream2_get_byteu(&gb);
  666. samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F, 3);
  667. samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 , 3);
  668. }
  669. }
  670. break;
  671. case AV_CODEC_ID_ADPCM_IMA_WAV:
  672. for(i=0; i<avctx->channels; i++){
  673. cs = &(c->status[i]);
  674. cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
  675. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  676. if (cs->step_index > 88u){
  677. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  678. i, cs->step_index);
  679. return AVERROR_INVALIDDATA;
  680. }
  681. }
  682. if (avctx->bits_per_coded_sample != 4) {
  683. int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
  684. GetBitContext g;
  685. ret = init_get_bits8(&g, gb.buffer, bytestream2_get_bytes_left(&gb));
  686. if (ret < 0)
  687. return ret;
  688. for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
  689. for (i = 0; i < avctx->channels; i++) {
  690. cs = &c->status[i];
  691. samples = &samples_p[i][1 + n * samples_per_block];
  692. for (m = 0; m < samples_per_block; m++) {
  693. samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
  694. avctx->bits_per_coded_sample);
  695. }
  696. }
  697. }
  698. bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
  699. } else {
  700. for (n = 0; n < (nb_samples - 1) / 8; n++) {
  701. for (i = 0; i < avctx->channels; i++) {
  702. cs = &c->status[i];
  703. samples = &samples_p[i][1 + n * 8];
  704. for (m = 0; m < 8; m += 2) {
  705. int v = bytestream2_get_byteu(&gb);
  706. samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
  707. samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
  708. }
  709. }
  710. }
  711. }
  712. break;
  713. case AV_CODEC_ID_ADPCM_4XM:
  714. for (i = 0; i < avctx->channels; i++)
  715. c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  716. for (i = 0; i < avctx->channels; i++) {
  717. c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  718. if (c->status[i].step_index > 88u) {
  719. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  720. i, c->status[i].step_index);
  721. return AVERROR_INVALIDDATA;
  722. }
  723. }
  724. for (i = 0; i < avctx->channels; i++) {
  725. samples = (int16_t *)frame->data[i];
  726. cs = &c->status[i];
  727. for (n = nb_samples >> 1; n > 0; n--) {
  728. int v = bytestream2_get_byteu(&gb);
  729. *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
  730. *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
  731. }
  732. }
  733. break;
  734. case AV_CODEC_ID_ADPCM_MS:
  735. {
  736. int block_predictor;
  737. block_predictor = bytestream2_get_byteu(&gb);
  738. if (block_predictor > 6) {
  739. av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
  740. block_predictor);
  741. return AVERROR_INVALIDDATA;
  742. }
  743. c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
  744. c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
  745. if (st) {
  746. block_predictor = bytestream2_get_byteu(&gb);
  747. if (block_predictor > 6) {
  748. av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
  749. block_predictor);
  750. return AVERROR_INVALIDDATA;
  751. }
  752. c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
  753. c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
  754. }
  755. c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
  756. if (st){
  757. c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
  758. }
  759. c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
  760. if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
  761. c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
  762. if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
  763. *samples++ = c->status[0].sample2;
  764. if (st) *samples++ = c->status[1].sample2;
  765. *samples++ = c->status[0].sample1;
  766. if (st) *samples++ = c->status[1].sample1;
  767. for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
  768. int byte = bytestream2_get_byteu(&gb);
  769. *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
  770. *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
  771. }
  772. break;
  773. }
  774. case AV_CODEC_ID_ADPCM_IMA_DK4:
  775. for (channel = 0; channel < avctx->channels; channel++) {
  776. cs = &c->status[channel];
  777. cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
  778. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  779. if (cs->step_index > 88u){
  780. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  781. channel, cs->step_index);
  782. return AVERROR_INVALIDDATA;
  783. }
  784. }
  785. for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
  786. int v = bytestream2_get_byteu(&gb);
  787. *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
  788. *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
  789. }
  790. break;
  791. case AV_CODEC_ID_ADPCM_IMA_DK3:
  792. {
  793. int last_byte = 0;
  794. int nibble;
  795. int decode_top_nibble_next = 0;
  796. int diff_channel;
  797. const int16_t *samples_end = samples + avctx->channels * nb_samples;
  798. bytestream2_skipu(&gb, 10);
  799. c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  800. c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  801. c->status[0].step_index = bytestream2_get_byteu(&gb);
  802. c->status[1].step_index = bytestream2_get_byteu(&gb);
  803. if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
  804. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
  805. c->status[0].step_index, c->status[1].step_index);
  806. return AVERROR_INVALIDDATA;
  807. }
  808. /* sign extend the predictors */
  809. diff_channel = c->status[1].predictor;
  810. /* DK3 ADPCM support macro */
  811. #define DK3_GET_NEXT_NIBBLE() \
  812. if (decode_top_nibble_next) { \
  813. nibble = last_byte >> 4; \
  814. decode_top_nibble_next = 0; \
  815. } else { \
  816. last_byte = bytestream2_get_byteu(&gb); \
  817. nibble = last_byte & 0x0F; \
  818. decode_top_nibble_next = 1; \
  819. }
  820. while (samples < samples_end) {
  821. /* for this algorithm, c->status[0] is the sum channel and
  822. * c->status[1] is the diff channel */
  823. /* process the first predictor of the sum channel */
  824. DK3_GET_NEXT_NIBBLE();
  825. adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
  826. /* process the diff channel predictor */
  827. DK3_GET_NEXT_NIBBLE();
  828. adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
  829. /* process the first pair of stereo PCM samples */
  830. diff_channel = (diff_channel + c->status[1].predictor) / 2;
  831. *samples++ = c->status[0].predictor + c->status[1].predictor;
  832. *samples++ = c->status[0].predictor - c->status[1].predictor;
  833. /* process the second predictor of the sum channel */
  834. DK3_GET_NEXT_NIBBLE();
  835. adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
  836. /* process the second pair of stereo PCM samples */
  837. diff_channel = (diff_channel + c->status[1].predictor) / 2;
  838. *samples++ = c->status[0].predictor + c->status[1].predictor;
  839. *samples++ = c->status[0].predictor - c->status[1].predictor;
  840. }
  841. if ((bytestream2_tell(&gb) & 1))
  842. bytestream2_skip(&gb, 1);
  843. break;
  844. }
  845. case AV_CODEC_ID_ADPCM_IMA_ISS:
  846. for (channel = 0; channel < avctx->channels; channel++) {
  847. cs = &c->status[channel];
  848. cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  849. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  850. if (cs->step_index > 88u){
  851. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  852. channel, cs->step_index);
  853. return AVERROR_INVALIDDATA;
  854. }
  855. }
  856. for (n = nb_samples >> (1 - st); n > 0; n--) {
  857. int v1, v2;
  858. int v = bytestream2_get_byteu(&gb);
  859. /* nibbles are swapped for mono */
  860. if (st) {
  861. v1 = v >> 4;
  862. v2 = v & 0x0F;
  863. } else {
  864. v2 = v >> 4;
  865. v1 = v & 0x0F;
  866. }
  867. *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
  868. *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
  869. }
  870. break;
  871. case AV_CODEC_ID_ADPCM_IMA_APC:
  872. while (bytestream2_get_bytes_left(&gb) > 0) {
  873. int v = bytestream2_get_byteu(&gb);
  874. *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
  875. *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
  876. }
  877. break;
  878. case AV_CODEC_ID_ADPCM_IMA_OKI:
  879. while (bytestream2_get_bytes_left(&gb) > 0) {
  880. int v = bytestream2_get_byteu(&gb);
  881. *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
  882. *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
  883. }
  884. break;
  885. case AV_CODEC_ID_ADPCM_IMA_RAD:
  886. for (channel = 0; channel < avctx->channels; channel++) {
  887. cs = &c->status[channel];
  888. cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
  889. cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  890. if (cs->step_index > 88u){
  891. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  892. channel, cs->step_index);
  893. return AVERROR_INVALIDDATA;
  894. }
  895. }
  896. for (n = 0; n < nb_samples / 2; n++) {
  897. int byte[2];
  898. byte[0] = bytestream2_get_byteu(&gb);
  899. if (st)
  900. byte[1] = bytestream2_get_byteu(&gb);
  901. for(channel = 0; channel < avctx->channels; channel++) {
  902. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
  903. }
  904. for(channel = 0; channel < avctx->channels; channel++) {
  905. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
  906. }
  907. }
  908. break;
  909. case AV_CODEC_ID_ADPCM_IMA_WS:
  910. if (c->vqa_version == 3) {
  911. for (channel = 0; channel < avctx->channels; channel++) {
  912. int16_t *smp = samples_p[channel];
  913. for (n = nb_samples / 2; n > 0; n--) {
  914. int v = bytestream2_get_byteu(&gb);
  915. *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
  916. *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
  917. }
  918. }
  919. } else {
  920. for (n = nb_samples / 2; n > 0; n--) {
  921. for (channel = 0; channel < avctx->channels; channel++) {
  922. int v = bytestream2_get_byteu(&gb);
  923. *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
  924. samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
  925. }
  926. samples += avctx->channels;
  927. }
  928. }
  929. bytestream2_seek(&gb, 0, SEEK_END);
  930. break;
  931. case AV_CODEC_ID_ADPCM_XA:
  932. {
  933. int16_t *out0 = samples_p[0];
  934. int16_t *out1 = samples_p[1];
  935. int samples_per_block = 28 * (3 - avctx->channels) * 4;
  936. int sample_offset = 0;
  937. while (bytestream2_get_bytes_left(&gb) >= 128) {
  938. if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
  939. &c->status[0], &c->status[1],
  940. avctx->channels, sample_offset)) < 0)
  941. return ret;
  942. bytestream2_skipu(&gb, 128);
  943. sample_offset += samples_per_block;
  944. }
  945. break;
  946. }
  947. case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
  948. for (i=0; i<=st; i++) {
  949. c->status[i].step_index = bytestream2_get_le32u(&gb);
  950. if (c->status[i].step_index > 88u) {
  951. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
  952. i, c->status[i].step_index);
  953. return AVERROR_INVALIDDATA;
  954. }
  955. }
  956. for (i=0; i<=st; i++) {
  957. c->status[i].predictor = bytestream2_get_le32u(&gb);
  958. if (FFABS(c->status[i].predictor) > (1<<16))
  959. return AVERROR_INVALIDDATA;
  960. }
  961. for (n = nb_samples >> (1 - st); n > 0; n--) {
  962. int byte = bytestream2_get_byteu(&gb);
  963. *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
  964. *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
  965. }
  966. break;
  967. case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
  968. for (n = nb_samples >> (1 - st); n > 0; n--) {
  969. int byte = bytestream2_get_byteu(&gb);
  970. *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
  971. *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
  972. }
  973. break;
  974. case AV_CODEC_ID_ADPCM_EA:
  975. {
  976. int previous_left_sample, previous_right_sample;
  977. int current_left_sample, current_right_sample;
  978. int next_left_sample, next_right_sample;
  979. int coeff1l, coeff2l, coeff1r, coeff2r;
  980. int shift_left, shift_right;
  981. /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
  982. each coding 28 stereo samples. */
  983. if(avctx->channels != 2)
  984. return AVERROR_INVALIDDATA;
  985. current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  986. previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  987. current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  988. previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
  989. for (count1 = 0; count1 < nb_samples / 28; count1++) {
  990. int byte = bytestream2_get_byteu(&gb);
  991. coeff1l = ea_adpcm_table[ byte >> 4 ];
  992. coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
  993. coeff1r = ea_adpcm_table[ byte & 0x0F];
  994. coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
  995. byte = bytestream2_get_byteu(&gb);
  996. shift_left = 20 - (byte >> 4);
  997. shift_right = 20 - (byte & 0x0F);
  998. for (count2 = 0; count2 < 28; count2++) {
  999. byte = bytestream2_get_byteu(&gb);
  1000. next_left_sample = sign_extend(byte >> 4, 4) << shift_left;
  1001. next_right_sample = sign_extend(byte, 4) << shift_right;
  1002. next_left_sample = (next_left_sample +
  1003. (current_left_sample * coeff1l) +
  1004. (previous_left_sample * coeff2l) + 0x80) >> 8;
  1005. next_right_sample = (next_right_sample +
  1006. (current_right_sample * coeff1r) +
  1007. (previous_right_sample * coeff2r) + 0x80) >> 8;
  1008. previous_left_sample = current_left_sample;
  1009. current_left_sample = av_clip_int16(next_left_sample);
  1010. previous_right_sample = current_right_sample;
  1011. current_right_sample = av_clip_int16(next_right_sample);
  1012. *samples++ = current_left_sample;
  1013. *samples++ = current_right_sample;
  1014. }
  1015. }
  1016. bytestream2_skip(&gb, 2); // Skip terminating 0x0000
  1017. break;
  1018. }
  1019. case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
  1020. {
  1021. int coeff[2][2], shift[2];
  1022. for(channel = 0; channel < avctx->channels; channel++) {
  1023. int byte = bytestream2_get_byteu(&gb);
  1024. for (i=0; i<2; i++)
  1025. coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
  1026. shift[channel] = 20 - (byte & 0x0F);
  1027. }
  1028. for (count1 = 0; count1 < nb_samples / 2; count1++) {
  1029. int byte[2];
  1030. byte[0] = bytestream2_get_byteu(&gb);
  1031. if (st) byte[1] = bytestream2_get_byteu(&gb);
  1032. for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
  1033. for(channel = 0; channel < avctx->channels; channel++) {
  1034. int sample = sign_extend(byte[channel] >> i, 4) << shift[channel];
  1035. sample = (sample +
  1036. c->status[channel].sample1 * coeff[channel][0] +
  1037. c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
  1038. c->status[channel].sample2 = c->status[channel].sample1;
  1039. c->status[channel].sample1 = av_clip_int16(sample);
  1040. *samples++ = c->status[channel].sample1;
  1041. }
  1042. }
  1043. }
  1044. bytestream2_seek(&gb, 0, SEEK_END);
  1045. break;
  1046. }
  1047. case AV_CODEC_ID_ADPCM_EA_R1:
  1048. case AV_CODEC_ID_ADPCM_EA_R2:
  1049. case AV_CODEC_ID_ADPCM_EA_R3: {
  1050. /* channel numbering
  1051. 2chan: 0=fl, 1=fr
  1052. 4chan: 0=fl, 1=rl, 2=fr, 3=rr
  1053. 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
  1054. const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
  1055. int previous_sample, current_sample, next_sample;
  1056. int coeff1, coeff2;
  1057. int shift;
  1058. unsigned int channel;
  1059. uint16_t *samplesC;
  1060. int count = 0;
  1061. int offsets[6];
  1062. for (channel=0; channel<avctx->channels; channel++)
  1063. offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
  1064. bytestream2_get_le32(&gb)) +
  1065. (avctx->channels + 1) * 4;
  1066. for (channel=0; channel<avctx->channels; channel++) {
  1067. bytestream2_seek(&gb, offsets[channel], SEEK_SET);
  1068. samplesC = samples_p[channel];
  1069. if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
  1070. current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
  1071. previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
  1072. } else {
  1073. current_sample = c->status[channel].predictor;
  1074. previous_sample = c->status[channel].prev_sample;
  1075. }
  1076. for (count1 = 0; count1 < nb_samples / 28; count1++) {
  1077. int byte = bytestream2_get_byte(&gb);
  1078. if (byte == 0xEE) { /* only seen in R2 and R3 */
  1079. current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
  1080. previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
  1081. for (count2=0; count2<28; count2++)
  1082. *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
  1083. } else {
  1084. coeff1 = ea_adpcm_table[ byte >> 4 ];
  1085. coeff2 = ea_adpcm_table[(byte >> 4) + 4];
  1086. shift = 20 - (byte & 0x0F);
  1087. for (count2=0; count2<28; count2++) {
  1088. if (count2 & 1)
  1089. next_sample = (unsigned)sign_extend(byte, 4) << shift;
  1090. else {
  1091. byte = bytestream2_get_byte(&gb);
  1092. next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
  1093. }
  1094. next_sample += (current_sample * coeff1) +
  1095. (previous_sample * coeff2);
  1096. next_sample = av_clip_int16(next_sample >> 8);
  1097. previous_sample = current_sample;
  1098. current_sample = next_sample;
  1099. *samplesC++ = current_sample;
  1100. }
  1101. }
  1102. }
  1103. if (!count) {
  1104. count = count1;
  1105. } else if (count != count1) {
  1106. av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
  1107. count = FFMAX(count, count1);
  1108. }
  1109. if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
  1110. c->status[channel].predictor = current_sample;
  1111. c->status[channel].prev_sample = previous_sample;
  1112. }
  1113. }
  1114. frame->nb_samples = count * 28;
  1115. bytestream2_seek(&gb, 0, SEEK_END);
  1116. break;
  1117. }
  1118. case AV_CODEC_ID_ADPCM_EA_XAS:
  1119. for (channel=0; channel<avctx->channels; channel++) {
  1120. int coeff[2][4], shift[4];
  1121. int16_t *s = samples_p[channel];
  1122. for (n = 0; n < 4; n++, s += 32) {
  1123. int val = sign_extend(bytestream2_get_le16u(&gb), 16);
  1124. for (i=0; i<2; i++)
  1125. coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
  1126. s[0] = val & ~0x0F;
  1127. val = sign_extend(bytestream2_get_le16u(&gb), 16);
  1128. shift[n] = 20 - (val & 0x0F);
  1129. s[1] = val & ~0x0F;
  1130. }
  1131. for (m=2; m<32; m+=2) {
  1132. s = &samples_p[channel][m];
  1133. for (n = 0; n < 4; n++, s += 32) {
  1134. int level, pred;
  1135. int byte = bytestream2_get_byteu(&gb);
  1136. level = sign_extend(byte >> 4, 4) << shift[n];
  1137. pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
  1138. s[0] = av_clip_int16((level + pred + 0x80) >> 8);
  1139. level = sign_extend(byte, 4) << shift[n];
  1140. pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
  1141. s[1] = av_clip_int16((level + pred + 0x80) >> 8);
  1142. }
  1143. }
  1144. }
  1145. break;
  1146. case AV_CODEC_ID_ADPCM_IMA_AMV:
  1147. c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
  1148. c->status[0].step_index = bytestream2_get_le16u(&gb);
  1149. bytestream2_skipu(&gb, 4);
  1150. if (c->status[0].step_index > 88u) {
  1151. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
  1152. c->status[0].step_index);
  1153. return AVERROR_INVALIDDATA;
  1154. }
  1155. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1156. int v = bytestream2_get_byteu(&gb);
  1157. *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
  1158. *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
  1159. }
  1160. break;
  1161. case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
  1162. for (i = 0; i < avctx->channels; i++) {
  1163. c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
  1164. c->status[i].step_index = bytestream2_get_byteu(&gb);
  1165. bytestream2_skipu(&gb, 1);
  1166. if (c->status[i].step_index > 88u) {
  1167. av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
  1168. c->status[i].step_index);
  1169. return AVERROR_INVALIDDATA;
  1170. }
  1171. }
  1172. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1173. int v = bytestream2_get_byteu(&gb);
  1174. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4, 3);
  1175. *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf, 3);
  1176. }
  1177. break;
  1178. case AV_CODEC_ID_ADPCM_CT:
  1179. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1180. int v = bytestream2_get_byteu(&gb);
  1181. *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
  1182. *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
  1183. }
  1184. break;
  1185. case AV_CODEC_ID_ADPCM_SBPRO_4:
  1186. case AV_CODEC_ID_ADPCM_SBPRO_3:
  1187. case AV_CODEC_ID_ADPCM_SBPRO_2:
  1188. if (!c->status[0].step_index) {
  1189. /* the first byte is a raw sample */
  1190. *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
  1191. if (st)
  1192. *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
  1193. c->status[0].step_index = 1;
  1194. nb_samples--;
  1195. }
  1196. if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
  1197. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1198. int byte = bytestream2_get_byteu(&gb);
  1199. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1200. byte >> 4, 4, 0);
  1201. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1202. byte & 0x0F, 4, 0);
  1203. }
  1204. } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
  1205. for (n = (nb_samples<<st) / 3; n > 0; n--) {
  1206. int byte = bytestream2_get_byteu(&gb);
  1207. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1208. byte >> 5 , 3, 0);
  1209. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1210. (byte >> 2) & 0x07, 3, 0);
  1211. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1212. byte & 0x03, 2, 0);
  1213. }
  1214. } else {
  1215. for (n = nb_samples >> (2 - st); n > 0; n--) {
  1216. int byte = bytestream2_get_byteu(&gb);
  1217. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1218. byte >> 6 , 2, 2);
  1219. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1220. (byte >> 4) & 0x03, 2, 2);
  1221. *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
  1222. (byte >> 2) & 0x03, 2, 2);
  1223. *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
  1224. byte & 0x03, 2, 2);
  1225. }
  1226. }
  1227. break;
  1228. case AV_CODEC_ID_ADPCM_SWF:
  1229. adpcm_swf_decode(avctx, buf, buf_size, samples);
  1230. bytestream2_seek(&gb, 0, SEEK_END);
  1231. break;
  1232. case AV_CODEC_ID_ADPCM_YAMAHA:
  1233. for (n = nb_samples >> (1 - st); n > 0; n--) {
  1234. int v = bytestream2_get_byteu(&gb);
  1235. *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
  1236. *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
  1237. }
  1238. break;
  1239. case AV_CODEC_ID_ADPCM_AFC:
  1240. {
  1241. int samples_per_block;
  1242. int blocks;
  1243. if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
  1244. samples_per_block = avctx->extradata[0] / 16;
  1245. blocks = nb_samples / avctx->extradata[0];
  1246. } else {
  1247. samples_per_block = nb_samples / 16;
  1248. blocks = 1;
  1249. }
  1250. for (m = 0; m < blocks; m++) {
  1251. for (channel = 0; channel < avctx->channels; channel++) {
  1252. int prev1 = c->status[channel].sample1;
  1253. int prev2 = c->status[channel].sample2;
  1254. samples = samples_p[channel] + m * 16;
  1255. /* Read in every sample for this channel. */
  1256. for (i = 0; i < samples_per_block; i++) {
  1257. int byte = bytestream2_get_byteu(&gb);
  1258. int scale = 1 << (byte >> 4);
  1259. int index = byte & 0xf;
  1260. int factor1 = ff_adpcm_afc_coeffs[0][index];
  1261. int factor2 = ff_adpcm_afc_coeffs[1][index];
  1262. /* Decode 16 samples. */
  1263. for (n = 0; n < 16; n++) {
  1264. int32_t sampledat;
  1265. if (n & 1) {
  1266. sampledat = sign_extend(byte, 4);
  1267. } else {
  1268. byte = bytestream2_get_byteu(&gb);
  1269. sampledat = sign_extend(byte >> 4, 4);
  1270. }
  1271. sampledat = ((prev1 * factor1 + prev2 * factor2) +
  1272. ((sampledat * scale) << 11)) >> 11;
  1273. *samples = av_clip_int16(sampledat);
  1274. prev2 = prev1;
  1275. prev1 = *samples++;
  1276. }
  1277. }
  1278. c->status[channel].sample1 = prev1;
  1279. c->status[channel].sample2 = prev2;
  1280. }
  1281. }
  1282. bytestream2_seek(&gb, 0, SEEK_END);
  1283. break;
  1284. }
  1285. case AV_CODEC_ID_ADPCM_THP:
  1286. case AV_CODEC_ID_ADPCM_THP_LE:
  1287. {
  1288. int table[10][16];
  1289. int ch;
  1290. #define THP_GET16(g) \
  1291. sign_extend( \
  1292. avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
  1293. bytestream2_get_le16u(&(g)) : \
  1294. bytestream2_get_be16u(&(g)), 16)
  1295. if (avctx->extradata) {
  1296. GetByteContext tb;
  1297. if (avctx->extradata_size < 32 * avctx->channels) {
  1298. av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
  1299. return AVERROR_INVALIDDATA;
  1300. }
  1301. bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
  1302. for (i = 0; i < avctx->channels; i++)
  1303. for (n = 0; n < 16; n++)
  1304. table[i][n] = THP_GET16(tb);
  1305. } else {
  1306. for (i = 0; i < avctx->channels; i++)
  1307. for (n = 0; n < 16; n++)
  1308. table[i][n] = THP_GET16(gb);
  1309. if (!c->has_status) {
  1310. /* Initialize the previous sample. */
  1311. for (i = 0; i < avctx->channels; i++) {
  1312. c->status[i].sample1 = THP_GET16(gb);
  1313. c->status[i].sample2 = THP_GET16(gb);
  1314. }
  1315. c->has_status = 1;
  1316. } else {
  1317. bytestream2_skip(&gb, avctx->channels * 4);
  1318. }
  1319. }
  1320. for (ch = 0; ch < avctx->channels; ch++) {
  1321. samples = samples_p[ch];
  1322. /* Read in every sample for this channel. */
  1323. for (i = 0; i < (nb_samples + 13) / 14; i++) {
  1324. int byte = bytestream2_get_byteu(&gb);
  1325. int index = (byte >> 4) & 7;
  1326. unsigned int exp = byte & 0x0F;
  1327. int factor1 = table[ch][index * 2];
  1328. int factor2 = table[ch][index * 2 + 1];
  1329. /* Decode 14 samples. */
  1330. for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
  1331. int32_t sampledat;
  1332. if (n & 1) {
  1333. sampledat = sign_extend(byte, 4);
  1334. } else {
  1335. byte = bytestream2_get_byteu(&gb);
  1336. sampledat = sign_extend(byte >> 4, 4);
  1337. }
  1338. sampledat = ((c->status[ch].sample1 * factor1
  1339. + c->status[ch].sample2 * factor2) >> 11) + (sampledat << exp);
  1340. *samples = av_clip_int16(sampledat);
  1341. c->status[ch].sample2 = c->status[ch].sample1;
  1342. c->status[ch].sample1 = *samples++;
  1343. }
  1344. }
  1345. }
  1346. break;
  1347. }
  1348. case AV_CODEC_ID_ADPCM_DTK:
  1349. for (channel = 0; channel < avctx->channels; channel++) {
  1350. samples = samples_p[channel];
  1351. /* Read in every sample for this channel. */
  1352. for (i = 0; i < nb_samples / 28; i++) {
  1353. int byte, header;
  1354. if (channel)
  1355. bytestream2_skipu(&gb, 1);
  1356. header = bytestream2_get_byteu(&gb);
  1357. bytestream2_skipu(&gb, 3 - channel);
  1358. /* Decode 28 samples. */
  1359. for (n = 0; n < 28; n++) {
  1360. int32_t sampledat, prev;
  1361. switch (header >> 4) {
  1362. case 1:
  1363. prev = (c->status[channel].sample1 * 0x3c);
  1364. break;
  1365. case 2:
  1366. prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
  1367. break;
  1368. case 3:
  1369. prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
  1370. break;
  1371. default:
  1372. prev = 0;
  1373. }
  1374. prev = av_clip_intp2((prev + 0x20) >> 6, 21);
  1375. byte = bytestream2_get_byteu(&gb);
  1376. if (!channel)
  1377. sampledat = sign_extend(byte, 4);
  1378. else
  1379. sampledat = sign_extend(byte >> 4, 4);
  1380. sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
  1381. *samples++ = av_clip_int16(sampledat >> 6);
  1382. c->status[channel].sample2 = c->status[channel].sample1;
  1383. c->status[channel].sample1 = sampledat;
  1384. }
  1385. }
  1386. if (!channel)
  1387. bytestream2_seek(&gb, 0, SEEK_SET);
  1388. }
  1389. break;
  1390. default:
  1391. return -1;
  1392. }
  1393. if (avpkt->size && bytestream2_tell(&gb) == 0) {
  1394. av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
  1395. return AVERROR_INVALIDDATA;
  1396. }
  1397. *got_frame_ptr = 1;
  1398. if (avpkt->size < bytestream2_tell(&gb)) {
  1399. av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
  1400. return avpkt->size;
  1401. }
  1402. return bytestream2_tell(&gb);
  1403. }
  1404. static void adpcm_flush(AVCodecContext *avctx)
  1405. {
  1406. ADPCMDecodeContext *c = avctx->priv_data;
  1407. c->has_status = 0;
  1408. }
  1409. static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
  1410. AV_SAMPLE_FMT_NONE };
  1411. static const enum AVSampleFormat sample_fmts_s16p[] = { AV_SAMPLE_FMT_S16P,
  1412. AV_SAMPLE_FMT_NONE };
  1413. static const enum AVSampleFormat sample_fmts_both[] = { AV_SAMPLE_FMT_S16,
  1414. AV_SAMPLE_FMT_S16P,
  1415. AV_SAMPLE_FMT_NONE };
  1416. #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
  1417. AVCodec ff_ ## name_ ## _decoder = { \
  1418. .name = #name_, \
  1419. .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
  1420. .type = AVMEDIA_TYPE_AUDIO, \
  1421. .id = id_, \
  1422. .priv_data_size = sizeof(ADPCMDecodeContext), \
  1423. .init = adpcm_decode_init, \
  1424. .decode = adpcm_decode_frame, \
  1425. .flush = adpcm_flush, \
  1426. .capabilities = AV_CODEC_CAP_DR1, \
  1427. .sample_fmts = sample_fmts_, \
  1428. }
  1429. /* Note: Do not forget to add new entries to the Makefile as well. */
  1430. ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
  1431. ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
  1432. ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
  1433. ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
  1434. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
  1435. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
  1436. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
  1437. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
  1438. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
  1439. ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
  1440. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
  1441. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
  1442. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
  1443. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
  1444. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
  1445. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
  1446. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
  1447. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
  1448. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
  1449. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
  1450. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
  1451. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16p, adpcm_ima_wav, "ADPCM IMA WAV");
  1452. ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
  1453. ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
  1454. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
  1455. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
  1456. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
  1457. ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
  1458. ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
  1459. ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
  1460. ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16p, adpcm_xa, "ADPCM CDROM XA");
  1461. ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");