You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

721 lines
26KB

  1. /*
  2. * Copyright (c) 2001-2003 The ffmpeg Project
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "avcodec.h"
  21. #include "get_bits.h"
  22. #include "put_bits.h"
  23. #include "bytestream.h"
  24. #include "adpcm.h"
  25. #include "adpcm_data.h"
  26. #include "internal.h"
  27. /**
  28. * @file
  29. * ADPCM encoders
  30. * First version by Francois Revol (revol@free.fr)
  31. * Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
  32. * by Mike Melanson (melanson@pcisys.net)
  33. *
  34. * See ADPCM decoder reference documents for codec information.
  35. */
  36. typedef struct TrellisPath {
  37. int nibble;
  38. int prev;
  39. } TrellisPath;
  40. typedef struct TrellisNode {
  41. uint32_t ssd;
  42. int path;
  43. int sample1;
  44. int sample2;
  45. int step;
  46. } TrellisNode;
  47. typedef struct ADPCMEncodeContext {
  48. ADPCMChannelStatus status[6];
  49. TrellisPath *paths;
  50. TrellisNode *node_buf;
  51. TrellisNode **nodep_buf;
  52. uint8_t *trellis_hash;
  53. } ADPCMEncodeContext;
  54. #define FREEZE_INTERVAL 128
  55. static av_cold int adpcm_encode_init(AVCodecContext *avctx)
  56. {
  57. ADPCMEncodeContext *s = avctx->priv_data;
  58. uint8_t *extradata;
  59. int i;
  60. int ret = AVERROR(ENOMEM);
  61. if (avctx->channels > 2) {
  62. av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
  63. return AVERROR(EINVAL);
  64. }
  65. if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
  66. av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
  67. return AVERROR(EINVAL);
  68. }
  69. if (avctx->trellis) {
  70. int frontier = 1 << avctx->trellis;
  71. int max_paths = frontier * FREEZE_INTERVAL;
  72. FF_ALLOC_OR_GOTO(avctx, s->paths,
  73. max_paths * sizeof(*s->paths), error);
  74. FF_ALLOC_OR_GOTO(avctx, s->node_buf,
  75. 2 * frontier * sizeof(*s->node_buf), error);
  76. FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
  77. 2 * frontier * sizeof(*s->nodep_buf), error);
  78. FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
  79. 65536 * sizeof(*s->trellis_hash), error);
  80. }
  81. avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
  82. switch (avctx->codec->id) {
  83. case AV_CODEC_ID_ADPCM_IMA_WAV:
  84. /* each 16 bits sample gives one nibble
  85. and we have 4 bytes per channel overhead */
  86. avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
  87. (4 * avctx->channels) + 1;
  88. /* seems frame_size isn't taken into account...
  89. have to buffer the samples :-( */
  90. avctx->block_align = BLKSIZE;
  91. break;
  92. case AV_CODEC_ID_ADPCM_IMA_QT:
  93. avctx->frame_size = 64;
  94. avctx->block_align = 34 * avctx->channels;
  95. break;
  96. case AV_CODEC_ID_ADPCM_MS:
  97. /* each 16 bits sample gives one nibble
  98. and we have 7 bytes per channel overhead */
  99. avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 /
  100. avctx->channels + 2;
  101. avctx->block_align = BLKSIZE;
  102. if (!(avctx->extradata = av_malloc(32 + FF_INPUT_BUFFER_PADDING_SIZE)))
  103. goto error;
  104. avctx->extradata_size = 32;
  105. extradata = avctx->extradata;
  106. bytestream_put_le16(&extradata, avctx->frame_size);
  107. bytestream_put_le16(&extradata, 7); /* wNumCoef */
  108. for (i = 0; i < 7; i++) {
  109. bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
  110. bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
  111. }
  112. break;
  113. case AV_CODEC_ID_ADPCM_YAMAHA:
  114. avctx->frame_size = BLKSIZE * 2 / avctx->channels;
  115. avctx->block_align = BLKSIZE;
  116. break;
  117. case AV_CODEC_ID_ADPCM_SWF:
  118. if (avctx->sample_rate != 11025 &&
  119. avctx->sample_rate != 22050 &&
  120. avctx->sample_rate != 44100) {
  121. av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
  122. "22050 or 44100\n");
  123. ret = AVERROR(EINVAL);
  124. goto error;
  125. }
  126. avctx->frame_size = 512 * (avctx->sample_rate / 11025);
  127. break;
  128. default:
  129. ret = AVERROR(EINVAL);
  130. goto error;
  131. }
  132. #if FF_API_OLD_ENCODE_AUDIO
  133. if (!(avctx->coded_frame = avcodec_alloc_frame()))
  134. goto error;
  135. #endif
  136. return 0;
  137. error:
  138. av_freep(&s->paths);
  139. av_freep(&s->node_buf);
  140. av_freep(&s->nodep_buf);
  141. av_freep(&s->trellis_hash);
  142. return ret;
  143. }
  144. static av_cold int adpcm_encode_close(AVCodecContext *avctx)
  145. {
  146. ADPCMEncodeContext *s = avctx->priv_data;
  147. #if FF_API_OLD_ENCODE_AUDIO
  148. av_freep(&avctx->coded_frame);
  149. #endif
  150. av_freep(&s->paths);
  151. av_freep(&s->node_buf);
  152. av_freep(&s->nodep_buf);
  153. av_freep(&s->trellis_hash);
  154. return 0;
  155. }
  156. static inline uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c,
  157. int16_t sample)
  158. {
  159. int delta = sample - c->prev_sample;
  160. int nibble = FFMIN(7, abs(delta) * 4 /
  161. ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
  162. c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
  163. ff_adpcm_yamaha_difflookup[nibble]) / 8);
  164. c->prev_sample = av_clip_int16(c->prev_sample);
  165. c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
  166. return nibble;
  167. }
  168. static inline uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
  169. int16_t sample)
  170. {
  171. int delta = sample - c->prev_sample;
  172. int mask, step = ff_adpcm_step_table[c->step_index];
  173. int diff = step >> 3;
  174. int nibble = 0;
  175. if (delta < 0) {
  176. nibble = 8;
  177. delta = -delta;
  178. }
  179. for (mask = 4; mask;) {
  180. if (delta >= step) {
  181. nibble |= mask;
  182. delta -= step;
  183. diff += step;
  184. }
  185. step >>= 1;
  186. mask >>= 1;
  187. }
  188. if (nibble & 8)
  189. c->prev_sample -= diff;
  190. else
  191. c->prev_sample += diff;
  192. c->prev_sample = av_clip_int16(c->prev_sample);
  193. c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
  194. return nibble;
  195. }
  196. static inline uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c,
  197. int16_t sample)
  198. {
  199. int predictor, nibble, bias;
  200. predictor = (((c->sample1) * (c->coeff1)) +
  201. (( c->sample2) * (c->coeff2))) / 64;
  202. nibble = sample - predictor;
  203. if (nibble >= 0)
  204. bias = c->idelta / 2;
  205. else
  206. bias = -c->idelta / 2;
  207. nibble = (nibble + bias) / c->idelta;
  208. nibble = av_clip(nibble, -8, 7) & 0x0F;
  209. predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
  210. c->sample2 = c->sample1;
  211. c->sample1 = av_clip_int16(predictor);
  212. c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
  213. if (c->idelta < 16)
  214. c->idelta = 16;
  215. return nibble;
  216. }
  217. static inline uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
  218. int16_t sample)
  219. {
  220. int nibble, delta;
  221. if (!c->step) {
  222. c->predictor = 0;
  223. c->step = 127;
  224. }
  225. delta = sample - c->predictor;
  226. nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
  227. c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
  228. c->predictor = av_clip_int16(c->predictor);
  229. c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
  230. c->step = av_clip(c->step, 127, 24567);
  231. return nibble;
  232. }
  233. static void adpcm_compress_trellis(AVCodecContext *avctx,
  234. const int16_t *samples, uint8_t *dst,
  235. ADPCMChannelStatus *c, int n)
  236. {
  237. //FIXME 6% faster if frontier is a compile-time constant
  238. ADPCMEncodeContext *s = avctx->priv_data;
  239. const int frontier = 1 << avctx->trellis;
  240. const int stride = avctx->channels;
  241. const int version = avctx->codec->id;
  242. TrellisPath *paths = s->paths, *p;
  243. TrellisNode *node_buf = s->node_buf;
  244. TrellisNode **nodep_buf = s->nodep_buf;
  245. TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
  246. TrellisNode **nodes_next = nodep_buf + frontier;
  247. int pathn = 0, froze = -1, i, j, k, generation = 0;
  248. uint8_t *hash = s->trellis_hash;
  249. memset(hash, 0xff, 65536 * sizeof(*hash));
  250. memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
  251. nodes[0] = node_buf + frontier;
  252. nodes[0]->ssd = 0;
  253. nodes[0]->path = 0;
  254. nodes[0]->step = c->step_index;
  255. nodes[0]->sample1 = c->sample1;
  256. nodes[0]->sample2 = c->sample2;
  257. if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
  258. version == AV_CODEC_ID_ADPCM_IMA_QT ||
  259. version == AV_CODEC_ID_ADPCM_SWF)
  260. nodes[0]->sample1 = c->prev_sample;
  261. if (version == AV_CODEC_ID_ADPCM_MS)
  262. nodes[0]->step = c->idelta;
  263. if (version == AV_CODEC_ID_ADPCM_YAMAHA) {
  264. if (c->step == 0) {
  265. nodes[0]->step = 127;
  266. nodes[0]->sample1 = 0;
  267. } else {
  268. nodes[0]->step = c->step;
  269. nodes[0]->sample1 = c->predictor;
  270. }
  271. }
  272. for (i = 0; i < n; i++) {
  273. TrellisNode *t = node_buf + frontier*(i&1);
  274. TrellisNode **u;
  275. int sample = samples[i * stride];
  276. int heap_pos = 0;
  277. memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
  278. for (j = 0; j < frontier && nodes[j]; j++) {
  279. // higher j have higher ssd already, so they're likely
  280. // to yield a suboptimal next sample too
  281. const int range = (j < frontier / 2) ? 1 : 0;
  282. const int step = nodes[j]->step;
  283. int nidx;
  284. if (version == AV_CODEC_ID_ADPCM_MS) {
  285. const int predictor = ((nodes[j]->sample1 * c->coeff1) +
  286. (nodes[j]->sample2 * c->coeff2)) / 64;
  287. const int div = (sample - predictor) / step;
  288. const int nmin = av_clip(div-range, -8, 6);
  289. const int nmax = av_clip(div+range, -7, 7);
  290. for (nidx = nmin; nidx <= nmax; nidx++) {
  291. const int nibble = nidx & 0xf;
  292. int dec_sample = predictor + nidx * step;
  293. #define STORE_NODE(NAME, STEP_INDEX)\
  294. int d;\
  295. uint32_t ssd;\
  296. int pos;\
  297. TrellisNode *u;\
  298. uint8_t *h;\
  299. dec_sample = av_clip_int16(dec_sample);\
  300. d = sample - dec_sample;\
  301. ssd = nodes[j]->ssd + d*d;\
  302. /* Check for wraparound, skip such samples completely. \
  303. * Note, changing ssd to a 64 bit variable would be \
  304. * simpler, avoiding this check, but it's slower on \
  305. * x86 32 bit at the moment. */\
  306. if (ssd < nodes[j]->ssd)\
  307. goto next_##NAME;\
  308. /* Collapse any two states with the same previous sample value. \
  309. * One could also distinguish states by step and by 2nd to last
  310. * sample, but the effects of that are negligible.
  311. * Since nodes in the previous generation are iterated
  312. * through a heap, they're roughly ordered from better to
  313. * worse, but not strictly ordered. Therefore, an earlier
  314. * node with the same sample value is better in most cases
  315. * (and thus the current is skipped), but not strictly
  316. * in all cases. Only skipping samples where ssd >=
  317. * ssd of the earlier node with the same sample gives
  318. * slightly worse quality, though, for some reason. */ \
  319. h = &hash[(uint16_t) dec_sample];\
  320. if (*h == generation)\
  321. goto next_##NAME;\
  322. if (heap_pos < frontier) {\
  323. pos = heap_pos++;\
  324. } else {\
  325. /* Try to replace one of the leaf nodes with the new \
  326. * one, but try a different slot each time. */\
  327. pos = (frontier >> 1) +\
  328. (heap_pos & ((frontier >> 1) - 1));\
  329. if (ssd > nodes_next[pos]->ssd)\
  330. goto next_##NAME;\
  331. heap_pos++;\
  332. }\
  333. *h = generation;\
  334. u = nodes_next[pos];\
  335. if (!u) {\
  336. assert(pathn < FREEZE_INTERVAL << avctx->trellis);\
  337. u = t++;\
  338. nodes_next[pos] = u;\
  339. u->path = pathn++;\
  340. }\
  341. u->ssd = ssd;\
  342. u->step = STEP_INDEX;\
  343. u->sample2 = nodes[j]->sample1;\
  344. u->sample1 = dec_sample;\
  345. paths[u->path].nibble = nibble;\
  346. paths[u->path].prev = nodes[j]->path;\
  347. /* Sift the newly inserted node up in the heap to \
  348. * restore the heap property. */\
  349. while (pos > 0) {\
  350. int parent = (pos - 1) >> 1;\
  351. if (nodes_next[parent]->ssd <= ssd)\
  352. break;\
  353. FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
  354. pos = parent;\
  355. }\
  356. next_##NAME:;
  357. STORE_NODE(ms, FFMAX(16,
  358. (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
  359. }
  360. } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
  361. version == AV_CODEC_ID_ADPCM_IMA_QT ||
  362. version == AV_CODEC_ID_ADPCM_SWF) {
  363. #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
  364. const int predictor = nodes[j]->sample1;\
  365. const int div = (sample - predictor) * 4 / STEP_TABLE;\
  366. int nmin = av_clip(div - range, -7, 6);\
  367. int nmax = av_clip(div + range, -6, 7);\
  368. if (nmin <= 0)\
  369. nmin--; /* distinguish -0 from +0 */\
  370. if (nmax < 0)\
  371. nmax--;\
  372. for (nidx = nmin; nidx <= nmax; nidx++) {\
  373. const int nibble = nidx < 0 ? 7 - nidx : nidx;\
  374. int dec_sample = predictor +\
  375. (STEP_TABLE *\
  376. ff_adpcm_yamaha_difflookup[nibble]) / 8;\
  377. STORE_NODE(NAME, STEP_INDEX);\
  378. }
  379. LOOP_NODES(ima, ff_adpcm_step_table[step],
  380. av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
  381. } else { //AV_CODEC_ID_ADPCM_YAMAHA
  382. LOOP_NODES(yamaha, step,
  383. av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
  384. 127, 24567));
  385. #undef LOOP_NODES
  386. #undef STORE_NODE
  387. }
  388. }
  389. u = nodes;
  390. nodes = nodes_next;
  391. nodes_next = u;
  392. generation++;
  393. if (generation == 255) {
  394. memset(hash, 0xff, 65536 * sizeof(*hash));
  395. generation = 0;
  396. }
  397. // prevent overflow
  398. if (nodes[0]->ssd > (1 << 28)) {
  399. for (j = 1; j < frontier && nodes[j]; j++)
  400. nodes[j]->ssd -= nodes[0]->ssd;
  401. nodes[0]->ssd = 0;
  402. }
  403. // merge old paths to save memory
  404. if (i == froze + FREEZE_INTERVAL) {
  405. p = &paths[nodes[0]->path];
  406. for (k = i; k > froze; k--) {
  407. dst[k] = p->nibble;
  408. p = &paths[p->prev];
  409. }
  410. froze = i;
  411. pathn = 0;
  412. // other nodes might use paths that don't coincide with the frozen one.
  413. // checking which nodes do so is too slow, so just kill them all.
  414. // this also slightly improves quality, but I don't know why.
  415. memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
  416. }
  417. }
  418. p = &paths[nodes[0]->path];
  419. for (i = n - 1; i > froze; i--) {
  420. dst[i] = p->nibble;
  421. p = &paths[p->prev];
  422. }
  423. c->predictor = nodes[0]->sample1;
  424. c->sample1 = nodes[0]->sample1;
  425. c->sample2 = nodes[0]->sample2;
  426. c->step_index = nodes[0]->step;
  427. c->step = nodes[0]->step;
  428. c->idelta = nodes[0]->step;
  429. }
  430. static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  431. const AVFrame *frame, int *got_packet_ptr)
  432. {
  433. int n, i, ch, st, pkt_size, ret;
  434. const int16_t *samples;
  435. uint8_t *dst;
  436. ADPCMEncodeContext *c = avctx->priv_data;
  437. uint8_t *buf;
  438. samples = (const int16_t *)frame->data[0];
  439. st = avctx->channels == 2;
  440. if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF)
  441. pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
  442. else
  443. pkt_size = avctx->block_align;
  444. if ((ret = ff_alloc_packet(avpkt, pkt_size))) {
  445. av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
  446. return ret;
  447. }
  448. dst = avpkt->data;
  449. switch(avctx->codec->id) {
  450. case AV_CODEC_ID_ADPCM_IMA_WAV:
  451. {
  452. int blocks, j;
  453. blocks = (frame->nb_samples - 1) / 8;
  454. for (ch = 0; ch < avctx->channels; ch++) {
  455. ADPCMChannelStatus *status = &c->status[ch];
  456. status->prev_sample = samples[ch];
  457. /* status->step_index = 0;
  458. XXX: not sure how to init the state machine */
  459. bytestream_put_le16(&dst, status->prev_sample);
  460. *dst++ = status->step_index;
  461. *dst++ = 0; /* unknown */
  462. }
  463. /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
  464. if (avctx->trellis > 0) {
  465. FF_ALLOC_OR_GOTO(avctx, buf, avctx->channels * blocks * 8, error);
  466. for (ch = 0; ch < avctx->channels; ch++) {
  467. adpcm_compress_trellis(avctx, &samples[avctx->channels + ch],
  468. buf + ch * blocks * 8, &c->status[ch],
  469. blocks * 8);
  470. }
  471. for (i = 0; i < blocks; i++) {
  472. for (ch = 0; ch < avctx->channels; ch++) {
  473. uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
  474. for (j = 0; j < 8; j += 2)
  475. *dst++ = buf1[j] | (buf1[j + 1] << 4);
  476. }
  477. }
  478. av_free(buf);
  479. } else {
  480. for (i = 0; i < blocks; i++) {
  481. for (ch = 0; ch < avctx->channels; ch++) {
  482. ADPCMChannelStatus *status = &c->status[ch];
  483. const int16_t *smp = &samples[avctx->channels * (1 + i * 8) + ch];
  484. for (j = 0; j < 8; j += 2) {
  485. *dst++ = adpcm_ima_compress_sample(status, smp[avctx->channels * j ]) |
  486. (adpcm_ima_compress_sample(status, smp[avctx->channels * (j + 1)]) << 4);
  487. }
  488. }
  489. }
  490. }
  491. break;
  492. }
  493. case AV_CODEC_ID_ADPCM_IMA_QT:
  494. {
  495. PutBitContext pb;
  496. init_put_bits(&pb, dst, pkt_size * 8);
  497. for (ch = 0; ch < avctx->channels; ch++) {
  498. put_bits(&pb, 9, (c->status[ch].prev_sample & 0xFFFF) >> 7);
  499. put_bits(&pb, 7, c->status[ch].step_index);
  500. if (avctx->trellis > 0) {
  501. uint8_t buf[64];
  502. adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
  503. for (i = 0; i < 64; i++)
  504. put_bits(&pb, 4, buf[i ^ 1]);
  505. } else {
  506. for (i = 0; i < 64; i += 2) {
  507. int t1, t2;
  508. t1 = adpcm_ima_qt_compress_sample(&c->status[ch],
  509. samples[avctx->channels * (i + 0) + ch]);
  510. t2 = adpcm_ima_qt_compress_sample(&c->status[ch],
  511. samples[avctx->channels * (i + 1) + ch]);
  512. put_bits(&pb, 4, t2);
  513. put_bits(&pb, 4, t1);
  514. }
  515. }
  516. }
  517. flush_put_bits(&pb);
  518. break;
  519. }
  520. case AV_CODEC_ID_ADPCM_SWF:
  521. {
  522. PutBitContext pb;
  523. init_put_bits(&pb, dst, pkt_size * 8);
  524. n = frame->nb_samples - 1;
  525. // store AdpcmCodeSize
  526. put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
  527. // init the encoder state
  528. for (i = 0; i < avctx->channels; i++) {
  529. // clip step so it fits 6 bits
  530. c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
  531. put_sbits(&pb, 16, samples[i]);
  532. put_bits(&pb, 6, c->status[i].step_index);
  533. c->status[i].prev_sample = samples[i];
  534. }
  535. if (avctx->trellis > 0) {
  536. FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
  537. adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
  538. &c->status[0], n);
  539. if (avctx->channels == 2)
  540. adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
  541. buf + n, &c->status[1], n);
  542. for (i = 0; i < n; i++) {
  543. put_bits(&pb, 4, buf[i]);
  544. if (avctx->channels == 2)
  545. put_bits(&pb, 4, buf[n + i]);
  546. }
  547. av_free(buf);
  548. } else {
  549. for (i = 1; i < frame->nb_samples; i++) {
  550. put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
  551. samples[avctx->channels * i]));
  552. if (avctx->channels == 2)
  553. put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
  554. samples[2 * i + 1]));
  555. }
  556. }
  557. flush_put_bits(&pb);
  558. break;
  559. }
  560. case AV_CODEC_ID_ADPCM_MS:
  561. for (i = 0; i < avctx->channels; i++) {
  562. int predictor = 0;
  563. *dst++ = predictor;
  564. c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
  565. c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
  566. }
  567. for (i = 0; i < avctx->channels; i++) {
  568. if (c->status[i].idelta < 16)
  569. c->status[i].idelta = 16;
  570. bytestream_put_le16(&dst, c->status[i].idelta);
  571. }
  572. for (i = 0; i < avctx->channels; i++)
  573. c->status[i].sample2= *samples++;
  574. for (i = 0; i < avctx->channels; i++) {
  575. c->status[i].sample1 = *samples++;
  576. bytestream_put_le16(&dst, c->status[i].sample1);
  577. }
  578. for (i = 0; i < avctx->channels; i++)
  579. bytestream_put_le16(&dst, c->status[i].sample2);
  580. if (avctx->trellis > 0) {
  581. n = avctx->block_align - 7 * avctx->channels;
  582. FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
  583. if (avctx->channels == 1) {
  584. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  585. for (i = 0; i < n; i += 2)
  586. *dst++ = (buf[i] << 4) | buf[i + 1];
  587. } else {
  588. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  589. adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
  590. for (i = 0; i < n; i++)
  591. *dst++ = (buf[i] << 4) | buf[n + i];
  592. }
  593. av_free(buf);
  594. } else {
  595. for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
  596. int nibble;
  597. nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
  598. nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
  599. *dst++ = nibble;
  600. }
  601. }
  602. break;
  603. case AV_CODEC_ID_ADPCM_YAMAHA:
  604. n = frame->nb_samples / 2;
  605. if (avctx->trellis > 0) {
  606. FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
  607. n *= 2;
  608. if (avctx->channels == 1) {
  609. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  610. for (i = 0; i < n; i += 2)
  611. *dst++ = buf[i] | (buf[i + 1] << 4);
  612. } else {
  613. adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
  614. adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
  615. for (i = 0; i < n; i++)
  616. *dst++ = buf[i] | (buf[n + i] << 4);
  617. }
  618. av_free(buf);
  619. } else
  620. for (n *= avctx->channels; n > 0; n--) {
  621. int nibble;
  622. nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
  623. nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
  624. *dst++ = nibble;
  625. }
  626. break;
  627. default:
  628. return AVERROR(EINVAL);
  629. }
  630. avpkt->size = pkt_size;
  631. *got_packet_ptr = 1;
  632. return 0;
  633. error:
  634. return AVERROR(ENOMEM);
  635. }
  636. static const enum AVSampleFormat sample_fmts[] = {
  637. AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
  638. };
  639. #define ADPCM_ENCODER(id_, name_, long_name_) \
  640. AVCodec ff_ ## name_ ## _encoder = { \
  641. .name = #name_, \
  642. .type = AVMEDIA_TYPE_AUDIO, \
  643. .id = id_, \
  644. .priv_data_size = sizeof(ADPCMEncodeContext), \
  645. .init = adpcm_encode_init, \
  646. .encode2 = adpcm_encode_frame, \
  647. .close = adpcm_encode_close, \
  648. .sample_fmts = sample_fmts, \
  649. .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
  650. }
  651. ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
  652. ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
  653. ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
  654. ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
  655. ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");