You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

946 lines
31KB

  1. /*
  2. * TAK decoder
  3. * Copyright (c) 2012 Paul B Mahol
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * TAK (Tom's lossless Audio Kompressor) decoder
  24. * @author Paul B Mahol
  25. */
  26. #include "libavutil/internal.h"
  27. #include "libavutil/samplefmt.h"
  28. #define BITSTREAM_READER_LE
  29. #include "audiodsp.h"
  30. #include "avcodec.h"
  31. #include "bitstream.h"
  32. #include "internal.h"
  33. #include "unary.h"
  34. #include "tak.h"
  35. #define MAX_SUBFRAMES 8 // max number of subframes per channel
  36. #define MAX_PREDICTORS 256
  37. typedef struct MCDParam {
  38. int8_t present; // decorrelation parameter availability for this channel
  39. int8_t index; // index into array of decorrelation types
  40. int8_t chan1;
  41. int8_t chan2;
  42. } MCDParam;
  43. typedef struct TAKDecContext {
  44. AVCodecContext *avctx; // parent AVCodecContext
  45. AudioDSPContext adsp;
  46. TAKStreamInfo ti;
  47. BitstreamContext bc; // bitstream reader initialized to start at the current frame
  48. int uval;
  49. int nb_samples; // number of samples in the current frame
  50. uint8_t *decode_buffer;
  51. unsigned int decode_buffer_size;
  52. int32_t *decoded[TAK_MAX_CHANNELS]; // decoded samples for each channel
  53. int8_t lpc_mode[TAK_MAX_CHANNELS];
  54. int8_t sample_shift[TAK_MAX_CHANNELS]; // shift applied to every sample in the channel
  55. int subframe_scale;
  56. int8_t dmode; // channel decorrelation type in the current frame
  57. MCDParam mcdparams[TAK_MAX_CHANNELS]; // multichannel decorrelation parameters
  58. int16_t *residues;
  59. unsigned int residues_buf_size;
  60. } TAKDecContext;
  61. static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
  62. static const uint16_t predictor_sizes[] = {
  63. 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
  64. };
  65. static const struct CParam {
  66. int init;
  67. int escape;
  68. int scale;
  69. int aescape;
  70. int bias;
  71. } xcodes[50] = {
  72. { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
  73. { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
  74. { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
  75. { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
  76. { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
  77. { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
  78. { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
  79. { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
  80. { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
  81. { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
  82. { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
  83. { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
  84. { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
  85. { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
  86. { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
  87. { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
  88. { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
  89. { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
  90. { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
  91. { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
  92. { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
  93. { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
  94. { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
  95. { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
  96. { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
  97. { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
  98. { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
  99. { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
  100. { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
  101. { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
  102. { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
  103. { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
  104. { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
  105. { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
  106. { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
  107. { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
  108. { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
  109. { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
  110. { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
  111. { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
  112. { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
  113. { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
  114. { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
  115. { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
  116. { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
  117. { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
  118. { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
  119. { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
  120. { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
  121. { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
  122. };
  123. static av_cold void tak_init_static_data(AVCodec *codec)
  124. {
  125. ff_tak_init_crc();
  126. }
  127. static int set_bps_params(AVCodecContext *avctx)
  128. {
  129. switch (avctx->bits_per_coded_sample) {
  130. case 8:
  131. avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
  132. break;
  133. case 16:
  134. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  135. break;
  136. case 24:
  137. avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
  138. break;
  139. default:
  140. av_log(avctx, AV_LOG_ERROR, "unsupported bits per sample: %d\n",
  141. avctx->bits_per_coded_sample);
  142. return AVERROR_INVALIDDATA;
  143. }
  144. avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
  145. return 0;
  146. }
  147. static void set_sample_rate_params(AVCodecContext *avctx)
  148. {
  149. TAKDecContext *s = avctx->priv_data;
  150. int shift = 3 - (avctx->sample_rate / 11025);
  151. shift = FFMAX(0, shift);
  152. s->uval = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << shift;
  153. s->subframe_scale = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << 1;
  154. }
  155. static av_cold int tak_decode_init(AVCodecContext *avctx)
  156. {
  157. TAKDecContext *s = avctx->priv_data;
  158. ff_audiodsp_init(&s->adsp);
  159. s->avctx = avctx;
  160. set_sample_rate_params(avctx);
  161. return set_bps_params(avctx);
  162. }
  163. static void decode_lpc(int32_t *coeffs, int mode, int length)
  164. {
  165. int i;
  166. if (length < 2)
  167. return;
  168. if (mode == 1) {
  169. int a1 = *coeffs++;
  170. for (i = 0; i < length - 1 >> 1; i++) {
  171. *coeffs += a1;
  172. coeffs[1] += *coeffs;
  173. a1 = coeffs[1];
  174. coeffs += 2;
  175. }
  176. if (length - 1 & 1)
  177. *coeffs += a1;
  178. } else if (mode == 2) {
  179. int a1 = coeffs[1];
  180. int a2 = a1 + *coeffs;
  181. coeffs[1] = a2;
  182. if (length > 2) {
  183. coeffs += 2;
  184. for (i = 0; i < length - 2 >> 1; i++) {
  185. int a3 = *coeffs + a1;
  186. int a4 = a3 + a2;
  187. *coeffs = a4;
  188. a1 = coeffs[1] + a3;
  189. a2 = a1 + a4;
  190. coeffs[1] = a2;
  191. coeffs += 2;
  192. }
  193. if (length & 1)
  194. *coeffs += a1 + a2;
  195. }
  196. } else if (mode == 3) {
  197. int a1 = coeffs[1];
  198. int a2 = a1 + *coeffs;
  199. coeffs[1] = a2;
  200. if (length > 2) {
  201. int a3 = coeffs[2];
  202. int a4 = a3 + a1;
  203. int a5 = a4 + a2;
  204. coeffs += 3;
  205. for (i = 0; i < length - 3; i++) {
  206. a3 += *coeffs;
  207. a4 += a3;
  208. a5 += a4;
  209. *coeffs = a5;
  210. coeffs++;
  211. }
  212. }
  213. }
  214. }
  215. static int decode_segment(BitstreamContext *bc, int mode, int32_t *decoded,
  216. int len)
  217. {
  218. struct CParam code;
  219. int i;
  220. if (!mode) {
  221. memset(decoded, 0, len * sizeof(*decoded));
  222. return 0;
  223. }
  224. if (mode > FF_ARRAY_ELEMS(xcodes))
  225. return AVERROR_INVALIDDATA;
  226. code = xcodes[mode - 1];
  227. for (i = 0; i < len; i++) {
  228. int x = bitstream_read(bc, code.init);
  229. if (x >= code.escape && bitstream_read_bit(bc)) {
  230. x |= 1 << code.init;
  231. if (x >= code.aescape) {
  232. int scale = get_unary(bc, 1, 9);
  233. if (scale == 9) {
  234. int scale_bits = bitstream_read(bc, 3);
  235. if (scale_bits > 0) {
  236. if (scale_bits == 7) {
  237. scale_bits += bitstream_read(bc, 5);
  238. if (scale_bits > 29)
  239. return AVERROR_INVALIDDATA;
  240. }
  241. scale = bitstream_read(bc, scale_bits) + 1;
  242. x += code.scale * scale;
  243. }
  244. x += code.bias;
  245. } else
  246. x += code.scale * scale - code.escape;
  247. } else
  248. x -= code.escape;
  249. }
  250. decoded[i] = (x >> 1) ^ -(x & 1);
  251. }
  252. return 0;
  253. }
  254. static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
  255. {
  256. BitstreamContext *bc = &s->bc;
  257. int i, mode, ret;
  258. if (length > s->nb_samples)
  259. return AVERROR_INVALIDDATA;
  260. if (bitstream_read_bit(bc)) {
  261. int wlength, rval;
  262. int coding_mode[128];
  263. wlength = length / s->uval;
  264. rval = length - (wlength * s->uval);
  265. if (rval < s->uval / 2)
  266. rval += s->uval;
  267. else
  268. wlength++;
  269. if (wlength <= 1 || wlength > 128)
  270. return AVERROR_INVALIDDATA;
  271. coding_mode[0] =
  272. mode = bitstream_read(bc, 6);
  273. for (i = 1; i < wlength; i++) {
  274. int c = get_unary(bc, 1, 6);
  275. switch (c) {
  276. case 6:
  277. mode = bitstream_read(bc, 6);
  278. break;
  279. case 5:
  280. case 4:
  281. case 3: {
  282. /* mode += sign ? (1 - c) : (c - 1) */
  283. int sign = bitstream_read_bit(bc);
  284. mode += (-sign ^ (c - 1)) + sign;
  285. break;
  286. }
  287. case 2:
  288. mode++;
  289. break;
  290. case 1:
  291. mode--;
  292. break;
  293. }
  294. coding_mode[i] = mode;
  295. }
  296. i = 0;
  297. while (i < wlength) {
  298. int len = 0;
  299. mode = coding_mode[i];
  300. do {
  301. if (i >= wlength - 1)
  302. len += rval;
  303. else
  304. len += s->uval;
  305. i++;
  306. if (i == wlength)
  307. break;
  308. } while (coding_mode[i] == mode);
  309. if ((ret = decode_segment(bc, mode, decoded, len)) < 0)
  310. return ret;
  311. decoded += len;
  312. }
  313. } else {
  314. mode = bitstream_read(bc, 6);
  315. if ((ret = decode_segment(bc, mode, decoded, length)) < 0)
  316. return ret;
  317. }
  318. return 0;
  319. }
  320. static int bits_esc4(BitstreamContext *bc)
  321. {
  322. if (bitstream_read_bit(bc))
  323. return bitstream_read(bc, 4) + 1;
  324. else
  325. return 0;
  326. }
  327. static void decode_filter_coeffs(TAKDecContext *s, int filter_order, int size,
  328. int filter_quant, int16_t *filter)
  329. {
  330. BitstreamContext *bc = &s->bc;
  331. int i, j, a, b;
  332. int filter_tmp[MAX_PREDICTORS];
  333. int16_t predictors[MAX_PREDICTORS];
  334. predictors[0] = bitstream_read_signed(bc, 10);
  335. predictors[1] = bitstream_read_signed(bc, 10);
  336. predictors[2] = bitstream_read_signed(bc, size) << (10 - size);
  337. predictors[3] = bitstream_read_signed(bc, size) << (10 - size);
  338. if (filter_order > 4) {
  339. int av_uninit(code_size);
  340. int code_size_base = size - bitstream_read_bit(bc);
  341. for (i = 4; i < filter_order; i++) {
  342. if (!(i & 3))
  343. code_size = code_size_base - bitstream_read(bc, 2);
  344. predictors[i] = bitstream_read_signed(bc, code_size) << (10 - size);
  345. }
  346. }
  347. filter_tmp[0] = predictors[0] << 6;
  348. for (i = 1; i < filter_order; i++) {
  349. int *p1 = &filter_tmp[0];
  350. int *p2 = &filter_tmp[i - 1];
  351. for (j = 0; j < (i + 1) / 2; j++) {
  352. int tmp = *p1 + (predictors[i] * *p2 + 256 >> 9);
  353. *p2 = *p2 + (predictors[i] * *p1 + 256 >> 9);
  354. *p1 = tmp;
  355. p1++;
  356. p2--;
  357. }
  358. filter_tmp[i] = predictors[i] << 6;
  359. }
  360. a = 1 << (32 - (15 - filter_quant));
  361. b = 1 << ((15 - filter_quant) - 1);
  362. for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
  363. filter[j] = a - ((filter_tmp[i] + b) >> (15 - filter_quant));
  364. filter[i] = a - ((filter_tmp[j] + b) >> (15 - filter_quant));
  365. }
  366. }
  367. static int decode_subframe(TAKDecContext *s, int32_t *decoded,
  368. int subframe_size, int prev_subframe_size)
  369. {
  370. LOCAL_ALIGNED_16(int16_t, filter, [MAX_PREDICTORS]);
  371. BitstreamContext *bc = &s->bc;
  372. int i, ret;
  373. int dshift, size, filter_quant, filter_order;
  374. memset(filter, 0, MAX_PREDICTORS * sizeof(*filter));
  375. if (!bitstream_read_bit(bc))
  376. return decode_residues(s, decoded, subframe_size);
  377. filter_order = predictor_sizes[bitstream_read(bc, 4)];
  378. if (prev_subframe_size > 0 && bitstream_read_bit(bc)) {
  379. if (filter_order > prev_subframe_size)
  380. return AVERROR_INVALIDDATA;
  381. decoded -= filter_order;
  382. subframe_size += filter_order;
  383. if (filter_order > subframe_size)
  384. return AVERROR_INVALIDDATA;
  385. } else {
  386. int lpc_mode;
  387. if (filter_order > subframe_size)
  388. return AVERROR_INVALIDDATA;
  389. lpc_mode = bitstream_read(bc, 2);
  390. if (lpc_mode > 2)
  391. return AVERROR_INVALIDDATA;
  392. if ((ret = decode_residues(s, decoded, filter_order)) < 0)
  393. return ret;
  394. if (lpc_mode)
  395. decode_lpc(decoded, lpc_mode, filter_order);
  396. }
  397. dshift = bits_esc4(bc);
  398. size = bitstream_read_bit(bc) + 6;
  399. filter_quant = 10;
  400. if (bitstream_read_bit(bc)) {
  401. filter_quant -= bitstream_read(bc, 3) + 1;
  402. if (filter_quant < 3)
  403. return AVERROR_INVALIDDATA;
  404. }
  405. decode_filter_coeffs(s, filter_order, size, filter_quant, filter);
  406. if ((ret = decode_residues(s, &decoded[filter_order],
  407. subframe_size - filter_order)) < 0)
  408. return ret;
  409. av_fast_malloc(&s->residues, &s->residues_buf_size,
  410. FFALIGN(subframe_size + 16, 16) * sizeof(*s->residues));
  411. if (!s->residues)
  412. return AVERROR(ENOMEM);
  413. memset(s->residues, 0, s->residues_buf_size);
  414. for (i = 0; i < filter_order; i++)
  415. s->residues[i] = *decoded++ >> dshift;
  416. for (i = 0; i < subframe_size - filter_order; i++) {
  417. int v = 1 << (filter_quant - 1);
  418. v += s->adsp.scalarproduct_int16(&s->residues[i], filter,
  419. FFALIGN(filter_order, 16));
  420. v = (av_clip_intp2(v >> filter_quant, 13) << dshift) - *decoded;
  421. *decoded++ = v;
  422. s->residues[filter_order + i] = v >> dshift;
  423. }
  424. emms_c();
  425. return 0;
  426. }
  427. static int decode_channel(TAKDecContext *s, int chan)
  428. {
  429. AVCodecContext *avctx = s->avctx;
  430. BitstreamContext *bc = &s->bc;
  431. int32_t *decoded = s->decoded[chan];
  432. int left = s->nb_samples - 1;
  433. int i, prev, ret, nb_subframes;
  434. int subframe_len[MAX_SUBFRAMES];
  435. s->sample_shift[chan] = bits_esc4(bc);
  436. if (s->sample_shift[chan] >= avctx->bits_per_coded_sample)
  437. return AVERROR_INVALIDDATA;
  438. /* NOTE: TAK 2.2.0 appears to set the sample value to 0 if
  439. * bits_per_coded_sample - sample_shift is 1, but this produces
  440. * non-bit-exact output. Reading the 1 bit using bitstream_read_signed()
  441. * instead of skipping it produces bit-exact output. This has been
  442. * reported to the TAK author. */
  443. *decoded++ = bitstream_read_signed(bc,
  444. avctx->bits_per_coded_sample -
  445. s->sample_shift[chan]);
  446. s->lpc_mode[chan] = bitstream_read(bc, 2);
  447. nb_subframes = bitstream_read(bc, 3) + 1;
  448. i = 0;
  449. if (nb_subframes > 1) {
  450. if (bitstream_bits_left(bc) < (nb_subframes - 1) * 6)
  451. return AVERROR_INVALIDDATA;
  452. prev = 0;
  453. for (; i < nb_subframes - 1; i++) {
  454. int subframe_end = bitstream_read(bc, 6) * s->subframe_scale;
  455. if (subframe_end <= prev)
  456. return AVERROR_INVALIDDATA;
  457. subframe_len[i] = subframe_end - prev;
  458. left -= subframe_len[i];
  459. prev = subframe_end;
  460. }
  461. if (left <= 0)
  462. return AVERROR_INVALIDDATA;
  463. }
  464. subframe_len[i] = left;
  465. prev = 0;
  466. for (i = 0; i < nb_subframes; i++) {
  467. if ((ret = decode_subframe(s, decoded, subframe_len[i], prev)) < 0)
  468. return ret;
  469. decoded += subframe_len[i];
  470. prev = subframe_len[i];
  471. }
  472. return 0;
  473. }
  474. static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
  475. {
  476. BitstreamContext *bc = &s->bc;
  477. int32_t *p1 = s->decoded[c1] + 1;
  478. int32_t *p2 = s->decoded[c2] + 1;
  479. int i;
  480. int dshift, dfactor;
  481. switch (s->dmode) {
  482. case 1: /* left/side */
  483. for (i = 0; i < length; i++) {
  484. int32_t a = p1[i];
  485. int32_t b = p2[i];
  486. p2[i] = a + b;
  487. }
  488. break;
  489. case 2: /* side/right */
  490. for (i = 0; i < length; i++) {
  491. int32_t a = p1[i];
  492. int32_t b = p2[i];
  493. p1[i] = b - a;
  494. }
  495. break;
  496. case 3: /* side/mid */
  497. for (i = 0; i < length; i++) {
  498. int32_t a = p1[i];
  499. int32_t b = p2[i];
  500. a -= b >> 1;
  501. p1[i] = a;
  502. p2[i] = a + b;
  503. }
  504. break;
  505. case 4: /* side/left with scale factor */
  506. FFSWAP(int32_t*, p1, p2);
  507. case 5: /* side/right with scale factor */
  508. dshift = bits_esc4(bc);
  509. dfactor = bitstream_read_signed(bc, 10);
  510. for (i = 0; i < length; i++) {
  511. int32_t a = p1[i];
  512. int32_t b = p2[i];
  513. b = dfactor * (b >> dshift) + 128 >> 8 << dshift;
  514. p1[i] = b - a;
  515. }
  516. break;
  517. case 6:
  518. FFSWAP(int32_t*, p1, p2);
  519. case 7: {
  520. LOCAL_ALIGNED_16(int16_t, filter, [MAX_PREDICTORS]);
  521. int length2, order_half, filter_order, dval1, dval2;
  522. int av_uninit(code_size);
  523. memset(filter, 0, MAX_PREDICTORS * sizeof(*filter));
  524. if (length < 256)
  525. return AVERROR_INVALIDDATA;
  526. dshift = bits_esc4(bc);
  527. filter_order = 8 << bitstream_read_bit(bc);
  528. dval1 = bitstream_read_bit(bc);
  529. dval2 = bitstream_read_bit(bc);
  530. for (i = 0; i < filter_order; i++) {
  531. if (!(i & 3))
  532. code_size = 14 - bitstream_read(bc, 3);
  533. filter[i] = bitstream_read_signed(bc, code_size);
  534. }
  535. order_half = filter_order / 2;
  536. length2 = length - (filter_order - 1);
  537. /* decorrelate beginning samples */
  538. if (dval1) {
  539. for (i = 0; i < order_half; i++) {
  540. int32_t a = p1[i];
  541. int32_t b = p2[i];
  542. p1[i] = a + b;
  543. }
  544. }
  545. /* decorrelate ending samples */
  546. if (dval2) {
  547. for (i = length2 + order_half; i < length; i++) {
  548. int32_t a = p1[i];
  549. int32_t b = p2[i];
  550. p1[i] = a + b;
  551. }
  552. }
  553. av_fast_malloc(&s->residues, &s->residues_buf_size,
  554. FFALIGN(length + 16, 16) * sizeof(*s->residues));
  555. if (!s->residues)
  556. return AVERROR(ENOMEM);
  557. memset(s->residues, 0, s->residues_buf_size);
  558. for (i = 0; i < length; i++)
  559. s->residues[i] = p2[i] >> dshift;
  560. p1 += order_half;
  561. for (i = 0; i < length2; i++) {
  562. int v = 1 << 9;
  563. v += s->adsp.scalarproduct_int16(&s->residues[i], filter,
  564. FFALIGN(filter_order, 16));
  565. p1[i] = (av_clip_intp2(v >> 10, 13) << dshift) - p1[i];
  566. }
  567. emms_c();
  568. break;
  569. }
  570. }
  571. return 0;
  572. }
  573. static int tak_decode_frame(AVCodecContext *avctx, void *data,
  574. int *got_frame_ptr, AVPacket *pkt)
  575. {
  576. TAKDecContext *s = avctx->priv_data;
  577. AVFrame *frame = data;
  578. BitstreamContext *bc = &s->bc;
  579. int chan, i, ret, hsize;
  580. if (pkt->size < TAK_MIN_FRAME_HEADER_BYTES)
  581. return AVERROR_INVALIDDATA;
  582. bitstream_init8(bc, pkt->data, pkt->size);
  583. if ((ret = ff_tak_decode_frame_header(avctx, bc, &s->ti, 0)) < 0)
  584. return ret;
  585. if (s->ti.flags & TAK_FRAME_FLAG_HAS_METADATA) {
  586. avpriv_request_sample(avctx, "Frame metadata");
  587. return AVERROR_PATCHWELCOME;
  588. }
  589. hsize = bitstream_tell(bc) / 8;
  590. if (avctx->err_recognition & AV_EF_CRCCHECK) {
  591. if (ff_tak_check_crc(pkt->data, hsize)) {
  592. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  593. if (avctx->err_recognition & AV_EF_EXPLODE)
  594. return AVERROR_INVALIDDATA;
  595. }
  596. }
  597. if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
  598. s->ti.codec != TAK_CODEC_MULTICHANNEL) {
  599. avpriv_report_missing_feature(avctx, "TAK codec type %d", s->ti.codec);
  600. return AVERROR_PATCHWELCOME;
  601. }
  602. if (s->ti.data_type) {
  603. av_log(avctx, AV_LOG_ERROR,
  604. "unsupported data type: %d\n", s->ti.data_type);
  605. return AVERROR_INVALIDDATA;
  606. }
  607. if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
  608. av_log(avctx, AV_LOG_ERROR,
  609. "invalid number of channels: %d\n", s->ti.channels);
  610. return AVERROR_INVALIDDATA;
  611. }
  612. if (s->ti.channels > 6) {
  613. av_log(avctx, AV_LOG_ERROR,
  614. "unsupported number of channels: %d\n", s->ti.channels);
  615. return AVERROR_INVALIDDATA;
  616. }
  617. if (s->ti.frame_samples <= 0) {
  618. av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
  619. return AVERROR_INVALIDDATA;
  620. }
  621. if (s->ti.bps != avctx->bits_per_coded_sample) {
  622. avctx->bits_per_coded_sample = s->ti.bps;
  623. if ((ret = set_bps_params(avctx)) < 0)
  624. return ret;
  625. }
  626. if (s->ti.sample_rate != avctx->sample_rate) {
  627. avctx->sample_rate = s->ti.sample_rate;
  628. set_sample_rate_params(avctx);
  629. }
  630. if (s->ti.ch_layout)
  631. avctx->channel_layout = s->ti.ch_layout;
  632. avctx->channels = s->ti.channels;
  633. s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
  634. : s->ti.frame_samples;
  635. frame->nb_samples = s->nb_samples;
  636. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  637. return ret;
  638. if (avctx->bits_per_coded_sample <= 16) {
  639. int buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
  640. s->nb_samples,
  641. AV_SAMPLE_FMT_S32P, 0);
  642. if (buf_size < 0)
  643. return buf_size;
  644. av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
  645. if (!s->decode_buffer)
  646. return AVERROR(ENOMEM);
  647. ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
  648. s->decode_buffer, avctx->channels,
  649. s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
  650. if (ret < 0)
  651. return ret;
  652. } else {
  653. for (chan = 0; chan < avctx->channels; chan++)
  654. s->decoded[chan] = (int32_t *)frame->extended_data[chan];
  655. }
  656. if (s->nb_samples < 16) {
  657. for (chan = 0; chan < avctx->channels; chan++) {
  658. int32_t *decoded = s->decoded[chan];
  659. for (i = 0; i < s->nb_samples; i++)
  660. decoded[i] = bitstream_read_signed(bc, avctx->bits_per_coded_sample);
  661. }
  662. } else {
  663. if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
  664. for (chan = 0; chan < avctx->channels; chan++)
  665. if (ret = decode_channel(s, chan))
  666. return ret;
  667. if (avctx->channels == 2) {
  668. if (bitstream_read_bit(bc)) {
  669. // some kind of subframe length, but it seems to be unused
  670. bitstream_skip(bc, 6);
  671. }
  672. s->dmode = bitstream_read(bc, 3);
  673. if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
  674. return ret;
  675. }
  676. } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
  677. if (bitstream_read_bit(bc)) {
  678. int ch_mask = 0;
  679. chan = bitstream_read(bc, 4) + 1;
  680. if (chan > avctx->channels)
  681. return AVERROR_INVALIDDATA;
  682. for (i = 0; i < chan; i++) {
  683. int nbit = bitstream_read(bc, 4);
  684. if (nbit >= avctx->channels)
  685. return AVERROR_INVALIDDATA;
  686. if (ch_mask & 1 << nbit)
  687. return AVERROR_INVALIDDATA;
  688. s->mcdparams[i].present = bitstream_read_bit(bc);
  689. if (s->mcdparams[i].present) {
  690. s->mcdparams[i].index = bitstream_read(bc, 2);
  691. s->mcdparams[i].chan2 = bitstream_read(bc, 4);
  692. if (s->mcdparams[i].chan2 >= avctx->channels) {
  693. av_log(avctx, AV_LOG_ERROR,
  694. "invalid channel 2 (%d) for %d channel(s)\n",
  695. s->mcdparams[i].chan2, avctx->channels);
  696. return AVERROR_INVALIDDATA;
  697. }
  698. if (s->mcdparams[i].index == 1) {
  699. if ((nbit == s->mcdparams[i].chan2) ||
  700. (ch_mask & 1 << s->mcdparams[i].chan2))
  701. return AVERROR_INVALIDDATA;
  702. ch_mask |= 1 << s->mcdparams[i].chan2;
  703. } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
  704. return AVERROR_INVALIDDATA;
  705. }
  706. }
  707. s->mcdparams[i].chan1 = nbit;
  708. ch_mask |= 1 << nbit;
  709. }
  710. } else {
  711. chan = avctx->channels;
  712. for (i = 0; i < chan; i++) {
  713. s->mcdparams[i].present = 0;
  714. s->mcdparams[i].chan1 = i;
  715. }
  716. }
  717. for (i = 0; i < chan; i++) {
  718. if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
  719. if (ret = decode_channel(s, s->mcdparams[i].chan2))
  720. return ret;
  721. if (ret = decode_channel(s, s->mcdparams[i].chan1))
  722. return ret;
  723. if (s->mcdparams[i].present) {
  724. s->dmode = mc_dmodes[s->mcdparams[i].index];
  725. if (ret = decorrelate(s,
  726. s->mcdparams[i].chan2,
  727. s->mcdparams[i].chan1,
  728. s->nb_samples - 1))
  729. return ret;
  730. }
  731. }
  732. }
  733. for (chan = 0; chan < avctx->channels; chan++) {
  734. int32_t *decoded = s->decoded[chan];
  735. if (s->lpc_mode[chan])
  736. decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
  737. if (s->sample_shift[chan] > 0)
  738. for (i = 0; i < s->nb_samples; i++)
  739. decoded[i] <<= s->sample_shift[chan];
  740. }
  741. }
  742. bitstream_align(bc);
  743. bitstream_skip(bc, 24);
  744. if (bitstream_bits_left(bc) < 0)
  745. av_log(avctx, AV_LOG_DEBUG, "overread\n");
  746. else if (bitstream_bits_left(bc) > 0)
  747. av_log(avctx, AV_LOG_DEBUG, "underread\n");
  748. if (avctx->err_recognition & AV_EF_CRCCHECK) {
  749. if (ff_tak_check_crc(pkt->data + hsize,
  750. bitstream_tell(bc) / 8 - hsize)) {
  751. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  752. if (avctx->err_recognition & AV_EF_EXPLODE)
  753. return AVERROR_INVALIDDATA;
  754. }
  755. }
  756. /* convert to output buffer */
  757. switch (avctx->sample_fmt) {
  758. case AV_SAMPLE_FMT_U8P:
  759. for (chan = 0; chan < avctx->channels; chan++) {
  760. uint8_t *samples = (uint8_t *)frame->extended_data[chan];
  761. int32_t *decoded = s->decoded[chan];
  762. for (i = 0; i < s->nb_samples; i++)
  763. samples[i] = decoded[i] + 0x80;
  764. }
  765. break;
  766. case AV_SAMPLE_FMT_S16P:
  767. for (chan = 0; chan < avctx->channels; chan++) {
  768. int16_t *samples = (int16_t *)frame->extended_data[chan];
  769. int32_t *decoded = s->decoded[chan];
  770. for (i = 0; i < s->nb_samples; i++)
  771. samples[i] = decoded[i];
  772. }
  773. break;
  774. case AV_SAMPLE_FMT_S32P:
  775. for (chan = 0; chan < avctx->channels; chan++) {
  776. int32_t *samples = (int32_t *)frame->extended_data[chan];
  777. for (i = 0; i < s->nb_samples; i++)
  778. samples[i] <<= 8;
  779. }
  780. break;
  781. }
  782. *got_frame_ptr = 1;
  783. return pkt->size;
  784. }
  785. static av_cold int tak_decode_close(AVCodecContext *avctx)
  786. {
  787. TAKDecContext *s = avctx->priv_data;
  788. av_freep(&s->decode_buffer);
  789. av_freep(&s->residues);
  790. return 0;
  791. }
  792. AVCodec ff_tak_decoder = {
  793. .name = "tak",
  794. .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
  795. .type = AVMEDIA_TYPE_AUDIO,
  796. .id = AV_CODEC_ID_TAK,
  797. .priv_data_size = sizeof(TAKDecContext),
  798. .init = tak_decode_init,
  799. .init_static_data = tak_init_static_data,
  800. .close = tak_decode_close,
  801. .decode = tak_decode_frame,
  802. .capabilities = AV_CODEC_CAP_DR1,
  803. .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
  804. AV_SAMPLE_FMT_S16P,
  805. AV_SAMPLE_FMT_S32P,
  806. AV_SAMPLE_FMT_NONE },
  807. };