You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

934 lines
30KB

  1. /*
  2. * TAK decoder
  3. * Copyright (c) 2012 Paul B Mahol
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * TAK (Tom's lossless Audio Kompressor) decoder
  24. * @author Paul B Mahol
  25. */
  26. #include "libavutil/samplefmt.h"
  27. #include "tak.h"
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #include "internal.h"
  31. #include "unary.h"
  32. #define MAX_SUBFRAMES 8 // max number of subframes per channel
  33. #define MAX_PREDICTORS 256
  34. typedef struct MCDParam {
  35. int8_t present; // decorrelation parameter availability for this channel
  36. int8_t index; // index into array of decorrelation types
  37. int8_t chan1;
  38. int8_t chan2;
  39. } MCDParam;
  40. typedef struct TAKDecContext {
  41. AVCodecContext *avctx; // parent AVCodecContext
  42. AVFrame frame; // AVFrame for decoded output
  43. DSPContext dsp;
  44. TAKStreamInfo ti;
  45. GetBitContext gb; // bitstream reader initialized to start at the current frame
  46. int uval;
  47. int nb_samples; // number of samples in the current frame
  48. uint8_t *decode_buffer;
  49. unsigned int decode_buffer_size;
  50. int32_t *decoded[TAK_MAX_CHANNELS]; // decoded samples for each channel
  51. int8_t lpc_mode[TAK_MAX_CHANNELS];
  52. int8_t sample_shift[TAK_MAX_CHANNELS]; // shift applied to every sample in the channel
  53. int subframe_scale;
  54. int8_t dmode; // channel decorrelation type in the current frame
  55. MCDParam mcdparams[TAK_MAX_CHANNELS]; // multichannel decorrelation parameters
  56. int16_t *residues;
  57. unsigned int residues_buf_size;
  58. } TAKDecContext;
  59. static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
  60. static const uint16_t predictor_sizes[] = {
  61. 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
  62. };
  63. static const struct CParam {
  64. int init;
  65. int escape;
  66. int scale;
  67. int aescape;
  68. int bias;
  69. } xcodes[50] = {
  70. { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
  71. { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
  72. { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
  73. { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
  74. { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
  75. { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
  76. { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
  77. { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
  78. { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
  79. { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
  80. { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
  81. { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
  82. { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
  83. { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
  84. { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
  85. { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
  86. { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
  87. { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
  88. { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
  89. { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
  90. { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
  91. { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
  92. { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
  93. { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
  94. { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
  95. { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
  96. { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
  97. { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
  98. { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
  99. { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
  100. { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
  101. { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
  102. { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
  103. { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
  104. { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
  105. { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
  106. { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
  107. { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
  108. { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
  109. { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
  110. { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
  111. { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
  112. { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
  113. { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
  114. { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
  115. { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
  116. { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
  117. { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
  118. { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
  119. { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
  120. };
  121. static av_cold void tak_init_static_data(AVCodec *codec)
  122. {
  123. ff_tak_init_crc();
  124. }
  125. static int set_bps_params(AVCodecContext *avctx)
  126. {
  127. switch (avctx->bits_per_coded_sample) {
  128. case 8:
  129. avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
  130. break;
  131. case 16:
  132. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  133. break;
  134. case 24:
  135. avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
  136. break;
  137. default:
  138. av_log(avctx, AV_LOG_ERROR, "unsupported bits per sample: %d\n",
  139. avctx->bits_per_coded_sample);
  140. return AVERROR_INVALIDDATA;
  141. }
  142. avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
  143. return 0;
  144. }
  145. static void set_sample_rate_params(AVCodecContext *avctx)
  146. {
  147. TAKDecContext *s = avctx->priv_data;
  148. int shift = 3 - (avctx->sample_rate / 11025);
  149. shift = FFMAX(0, shift);
  150. s->uval = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << shift;
  151. s->subframe_scale = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << 1;
  152. }
  153. static av_cold int tak_decode_init(AVCodecContext *avctx)
  154. {
  155. TAKDecContext *s = avctx->priv_data;
  156. ff_dsputil_init(&s->dsp, avctx);
  157. s->avctx = avctx;
  158. avcodec_get_frame_defaults(&s->frame);
  159. avctx->coded_frame = &s->frame;
  160. set_sample_rate_params(avctx);
  161. return set_bps_params(avctx);
  162. }
  163. static void decode_lpc(int32_t *coeffs, int mode, int length)
  164. {
  165. int i;
  166. if (length < 2)
  167. return;
  168. if (mode == 1) {
  169. int a1 = *coeffs++;
  170. for (i = 0; i < length - 1 >> 1; i++) {
  171. *coeffs += a1;
  172. coeffs[1] += *coeffs;
  173. a1 = coeffs[1];
  174. coeffs += 2;
  175. }
  176. if (length - 1 & 1)
  177. *coeffs += a1;
  178. } else if (mode == 2) {
  179. int a1 = coeffs[1];
  180. int a2 = a1 + *coeffs;
  181. coeffs[1] = a2;
  182. if (length > 2) {
  183. coeffs += 2;
  184. for (i = 0; i < length - 2 >> 1; i++) {
  185. int a3 = *coeffs + a1;
  186. int a4 = a3 + a2;
  187. *coeffs = a4;
  188. a1 = coeffs[1] + a3;
  189. a2 = a1 + a4;
  190. coeffs[1] = a2;
  191. coeffs += 2;
  192. }
  193. if (length & 1)
  194. *coeffs += a1 + a2;
  195. }
  196. } else if (mode == 3) {
  197. int a1 = coeffs[1];
  198. int a2 = a1 + *coeffs;
  199. coeffs[1] = a2;
  200. if (length > 2) {
  201. int a3 = coeffs[2];
  202. int a4 = a3 + a1;
  203. int a5 = a4 + a2;
  204. coeffs += 3;
  205. for (i = 0; i < length - 3; i++) {
  206. a3 += *coeffs;
  207. a4 += a3;
  208. a5 += a4;
  209. *coeffs = a5;
  210. coeffs++;
  211. }
  212. }
  213. }
  214. }
  215. static int decode_segment(GetBitContext *gb, int mode, int32_t *decoded,
  216. int len)
  217. {
  218. struct CParam code;
  219. int i;
  220. if (!mode) {
  221. memset(decoded, 0, len * sizeof(*decoded));
  222. return 0;
  223. }
  224. if (mode > FF_ARRAY_ELEMS(xcodes))
  225. return AVERROR_INVALIDDATA;
  226. code = xcodes[mode - 1];
  227. for (i = 0; i < len; i++) {
  228. int x = get_bits_long(gb, code.init);
  229. if (x >= code.escape && get_bits1(gb)) {
  230. x |= 1 << code.init;
  231. if (x >= code.aescape) {
  232. int scale = get_unary(gb, 1, 9);
  233. if (scale == 9) {
  234. int scale_bits = get_bits(gb, 3);
  235. if (scale_bits > 0) {
  236. if (scale_bits == 7) {
  237. scale_bits += get_bits(gb, 5);
  238. if (scale_bits > 29)
  239. return AVERROR_INVALIDDATA;
  240. }
  241. scale = get_bits_long(gb, scale_bits) + 1;
  242. x += code.scale * scale;
  243. }
  244. x += code.bias;
  245. } else
  246. x += code.scale * scale - code.escape;
  247. } else
  248. x -= code.escape;
  249. }
  250. decoded[i] = (x >> 1) ^ -(x & 1);
  251. }
  252. return 0;
  253. }
  254. static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
  255. {
  256. GetBitContext *gb = &s->gb;
  257. int i, mode, ret;
  258. if (length > s->nb_samples)
  259. return AVERROR_INVALIDDATA;
  260. if (get_bits1(gb)) {
  261. int wlength, rval;
  262. int coding_mode[128];
  263. wlength = length / s->uval;
  264. rval = length - (wlength * s->uval);
  265. if (rval < s->uval / 2)
  266. rval += s->uval;
  267. else
  268. wlength++;
  269. if (wlength <= 1 || wlength > 128)
  270. return AVERROR_INVALIDDATA;
  271. coding_mode[0] = mode = get_bits(gb, 6);
  272. for (i = 1; i < wlength; i++) {
  273. int c = get_unary(gb, 1, 6);
  274. switch (c) {
  275. case 6:
  276. mode = get_bits(gb, 6);
  277. break;
  278. case 5:
  279. case 4:
  280. case 3: {
  281. /* mode += sign ? (1 - c) : (c - 1) */
  282. int sign = get_bits1(gb);
  283. mode += (-sign ^ (c - 1)) + sign;
  284. break;
  285. }
  286. case 2:
  287. mode++;
  288. break;
  289. case 1:
  290. mode--;
  291. break;
  292. }
  293. coding_mode[i] = mode;
  294. }
  295. i = 0;
  296. while (i < wlength) {
  297. int len = 0;
  298. mode = coding_mode[i];
  299. do {
  300. if (i >= wlength - 1)
  301. len += rval;
  302. else
  303. len += s->uval;
  304. i++;
  305. if (i == wlength)
  306. break;
  307. } while (coding_mode[i] == mode);
  308. if ((ret = decode_segment(gb, mode, decoded, len)) < 0)
  309. return ret;
  310. decoded += len;
  311. }
  312. } else {
  313. mode = get_bits(gb, 6);
  314. if ((ret = decode_segment(gb, mode, decoded, length)) < 0)
  315. return ret;
  316. }
  317. return 0;
  318. }
  319. static int get_bits_esc4(GetBitContext *gb)
  320. {
  321. if (get_bits1(gb))
  322. return get_bits(gb, 4) + 1;
  323. else
  324. return 0;
  325. }
  326. static void decode_filter_coeffs(TAKDecContext *s, int filter_order, int size,
  327. int filter_quant, int16_t *filter)
  328. {
  329. GetBitContext *gb = &s->gb;
  330. int i, j, a, b;
  331. int filter_tmp[MAX_PREDICTORS];
  332. int16_t predictors[MAX_PREDICTORS];
  333. predictors[0] = get_sbits(gb, 10);
  334. predictors[1] = get_sbits(gb, 10);
  335. predictors[2] = get_sbits(gb, size) << (10 - size);
  336. predictors[3] = get_sbits(gb, size) << (10 - size);
  337. if (filter_order > 4) {
  338. int av_uninit(code_size);
  339. int code_size_base = size - get_bits1(gb);
  340. for (i = 4; i < filter_order; i++) {
  341. if (!(i & 3))
  342. code_size = code_size_base - get_bits(gb, 2);
  343. predictors[i] = get_sbits(gb, code_size) << (10 - size);
  344. }
  345. }
  346. filter_tmp[0] = predictors[0] << 6;
  347. for (i = 1; i < filter_order; i++) {
  348. int *p1 = &filter_tmp[0];
  349. int *p2 = &filter_tmp[i - 1];
  350. for (j = 0; j < (i + 1) / 2; j++) {
  351. int tmp = *p1 + (predictors[i] * *p2 + 256 >> 9);
  352. *p2 = *p2 + (predictors[i] * *p1 + 256 >> 9);
  353. *p1 = tmp;
  354. p1++;
  355. p2--;
  356. }
  357. filter_tmp[i] = predictors[i] << 6;
  358. }
  359. a = 1 << (32 - (15 - filter_quant));
  360. b = 1 << ((15 - filter_quant) - 1);
  361. for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
  362. filter[j] = a - ((filter_tmp[i] + b) >> (15 - filter_quant));
  363. filter[i] = a - ((filter_tmp[j] + b) >> (15 - filter_quant));
  364. }
  365. }
  366. static int decode_subframe(TAKDecContext *s, int32_t *decoded,
  367. int subframe_size, int prev_subframe_size)
  368. {
  369. LOCAL_ALIGNED_16(int16_t, filter, [MAX_PREDICTORS]);
  370. GetBitContext *gb = &s->gb;
  371. int i, ret;
  372. int dshift, size, filter_quant, filter_order;
  373. memset(filter, 0, MAX_PREDICTORS * sizeof(*filter));
  374. if (!get_bits1(gb))
  375. return decode_residues(s, decoded, subframe_size);
  376. filter_order = predictor_sizes[get_bits(gb, 4)];
  377. if (prev_subframe_size > 0 && get_bits1(gb)) {
  378. if (filter_order > prev_subframe_size)
  379. return AVERROR_INVALIDDATA;
  380. decoded -= filter_order;
  381. subframe_size += filter_order;
  382. if (filter_order > subframe_size)
  383. return AVERROR_INVALIDDATA;
  384. } else {
  385. int lpc_mode;
  386. if (filter_order > subframe_size)
  387. return AVERROR_INVALIDDATA;
  388. lpc_mode = get_bits(gb, 2);
  389. if (lpc_mode > 2)
  390. return AVERROR_INVALIDDATA;
  391. if ((ret = decode_residues(s, decoded, filter_order)) < 0)
  392. return ret;
  393. if (lpc_mode)
  394. decode_lpc(decoded, lpc_mode, filter_order);
  395. }
  396. dshift = get_bits_esc4(gb);
  397. size = get_bits1(gb) + 6;
  398. filter_quant = 10;
  399. if (get_bits1(gb)) {
  400. filter_quant -= get_bits(gb, 3) + 1;
  401. if (filter_quant < 3)
  402. return AVERROR_INVALIDDATA;
  403. }
  404. decode_filter_coeffs(s, filter_order, size, filter_quant, filter);
  405. if ((ret = decode_residues(s, &decoded[filter_order],
  406. subframe_size - filter_order)) < 0)
  407. return ret;
  408. av_fast_malloc(&s->residues, &s->residues_buf_size,
  409. FFALIGN(subframe_size + 16, 16) * sizeof(*s->residues));
  410. if (!s->residues)
  411. return AVERROR(ENOMEM);
  412. memset(s->residues, 0, s->residues_buf_size);
  413. for (i = 0; i < filter_order; i++)
  414. s->residues[i] = *decoded++ >> dshift;
  415. for (i = 0; i < subframe_size - filter_order; i++) {
  416. int v = 1 << (filter_quant - 1);
  417. v += s->dsp.scalarproduct_int16(&s->residues[i], filter,
  418. FFALIGN(filter_order, 16));
  419. v = (av_clip(v >> filter_quant, -8192, 8191) << dshift) - *decoded;
  420. *decoded++ = v;
  421. s->residues[filter_order + i] = v >> dshift;
  422. }
  423. emms_c();
  424. return 0;
  425. }
  426. static int decode_channel(TAKDecContext *s, int chan)
  427. {
  428. AVCodecContext *avctx = s->avctx;
  429. GetBitContext *gb = &s->gb;
  430. int32_t *decoded = s->decoded[chan];
  431. int left = s->nb_samples - 1;
  432. int i, prev, ret, nb_subframes;
  433. int subframe_len[MAX_SUBFRAMES];
  434. s->sample_shift[chan] = get_bits_esc4(gb);
  435. if (s->sample_shift[chan] >= avctx->bits_per_coded_sample)
  436. return AVERROR_INVALIDDATA;
  437. /* NOTE: TAK 2.2.0 appears to set the sample value to 0 if
  438. * bits_per_coded_sample - sample_shift is 1, but this produces
  439. * non-bit-exact output. Reading the 1 bit using get_sbits() instead
  440. * of skipping it produces bit-exact output. This has been reported
  441. * to the TAK author. */
  442. *decoded++ = get_sbits(gb,
  443. avctx->bits_per_coded_sample -
  444. s->sample_shift[chan]);
  445. s->lpc_mode[chan] = get_bits(gb, 2);
  446. nb_subframes = get_bits(gb, 3) + 1;
  447. i = 0;
  448. if (nb_subframes > 1) {
  449. if (get_bits_left(gb) < (nb_subframes - 1) * 6)
  450. return AVERROR_INVALIDDATA;
  451. prev = 0;
  452. for (; i < nb_subframes - 1; i++) {
  453. int subframe_end = get_bits(gb, 6) * s->subframe_scale;
  454. if (subframe_end <= prev)
  455. return AVERROR_INVALIDDATA;
  456. subframe_len[i] = subframe_end - prev;
  457. left -= subframe_len[i];
  458. prev = subframe_end;
  459. }
  460. if (left <= 0)
  461. return AVERROR_INVALIDDATA;
  462. }
  463. subframe_len[i] = left;
  464. prev = 0;
  465. for (i = 0; i < nb_subframes; i++) {
  466. if ((ret = decode_subframe(s, decoded, subframe_len[i], prev)) < 0)
  467. return ret;
  468. decoded += subframe_len[i];
  469. prev = subframe_len[i];
  470. }
  471. return 0;
  472. }
  473. static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
  474. {
  475. GetBitContext *gb = &s->gb;
  476. int32_t *p1 = s->decoded[c1] + 1;
  477. int32_t *p2 = s->decoded[c2] + 1;
  478. int i;
  479. int dshift, dfactor;
  480. switch (s->dmode) {
  481. case 1: /* left/side */
  482. for (i = 0; i < length; i++) {
  483. int32_t a = p1[i];
  484. int32_t b = p2[i];
  485. p2[i] = a + b;
  486. }
  487. break;
  488. case 2: /* side/right */
  489. for (i = 0; i < length; i++) {
  490. int32_t a = p1[i];
  491. int32_t b = p2[i];
  492. p1[i] = b - a;
  493. }
  494. break;
  495. case 3: /* side/mid */
  496. for (i = 0; i < length; i++) {
  497. int32_t a = p1[i];
  498. int32_t b = p2[i];
  499. a -= b >> 1;
  500. p1[i] = a;
  501. p2[i] = a + b;
  502. }
  503. break;
  504. case 4: /* side/left with scale factor */
  505. FFSWAP(int32_t*, p1, p2);
  506. case 5: /* side/right with scale factor */
  507. dshift = get_bits_esc4(gb);
  508. dfactor = get_sbits(gb, 10);
  509. for (i = 0; i < length; i++) {
  510. int32_t a = p1[i];
  511. int32_t b = p2[i];
  512. b = dfactor * (b >> dshift) + 128 >> 8 << dshift;
  513. p1[i] = b - a;
  514. }
  515. break;
  516. case 6:
  517. FFSWAP(int32_t*, p1, p2);
  518. case 7: {
  519. LOCAL_ALIGNED_16(int16_t, filter, [MAX_PREDICTORS]);
  520. int length2, order_half, filter_order, dval1, dval2;
  521. int av_uninit(code_size);
  522. memset(filter, 0, MAX_PREDICTORS * sizeof(*filter));
  523. if (length < 256)
  524. return AVERROR_INVALIDDATA;
  525. dshift = get_bits_esc4(gb);
  526. filter_order = 8 << get_bits1(gb);
  527. dval1 = get_bits1(gb);
  528. dval2 = get_bits1(gb);
  529. for (i = 0; i < filter_order; i++) {
  530. if (!(i & 3))
  531. code_size = 14 - get_bits(gb, 3);
  532. filter[i] = get_sbits(gb, code_size);
  533. }
  534. order_half = filter_order / 2;
  535. length2 = length - (filter_order - 1);
  536. /* decorrelate beginning samples */
  537. if (dval1) {
  538. for (i = 0; i < order_half; i++) {
  539. int32_t a = p1[i];
  540. int32_t b = p2[i];
  541. p1[i] = a + b;
  542. }
  543. }
  544. /* decorrelate ending samples */
  545. if (dval2) {
  546. for (i = length2 + order_half; i < length; i++) {
  547. int32_t a = p1[i];
  548. int32_t b = p2[i];
  549. p1[i] = a + b;
  550. }
  551. }
  552. av_fast_malloc(&s->residues, &s->residues_buf_size,
  553. FFALIGN(length + 16, 16) * sizeof(*s->residues));
  554. if (!s->residues)
  555. return AVERROR(ENOMEM);
  556. memset(s->residues, 0, s->residues_buf_size);
  557. for (i = 0; i < length; i++)
  558. s->residues[i] = p2[i] >> dshift;
  559. p1 += order_half;
  560. for (i = 0; i < length2; i++) {
  561. int v = 1 << 9;
  562. v += s->dsp.scalarproduct_int16(&s->residues[i], filter,
  563. FFALIGN(filter_order, 16));
  564. p1[i] = (av_clip(v >> 10, -8192, 8191) << dshift) - p1[i];
  565. }
  566. emms_c();
  567. break;
  568. }
  569. }
  570. return 0;
  571. }
  572. static int tak_decode_frame(AVCodecContext *avctx, void *data,
  573. int *got_frame_ptr, AVPacket *pkt)
  574. {
  575. TAKDecContext *s = avctx->priv_data;
  576. GetBitContext *gb = &s->gb;
  577. int chan, i, ret, hsize;
  578. if (pkt->size < TAK_MIN_FRAME_HEADER_BYTES)
  579. return AVERROR_INVALIDDATA;
  580. init_get_bits(gb, pkt->data, pkt->size * 8);
  581. if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
  582. return ret;
  583. if (s->ti.flags & TAK_FRAME_FLAG_HAS_METADATA) {
  584. av_log_missing_feature(avctx, "frame metadata", 1);
  585. return AVERROR_PATCHWELCOME;
  586. }
  587. hsize = get_bits_count(gb) / 8;
  588. if (avctx->err_recognition & AV_EF_CRCCHECK) {
  589. if (ff_tak_check_crc(pkt->data, hsize)) {
  590. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  591. return AVERROR_INVALIDDATA;
  592. }
  593. }
  594. if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
  595. s->ti.codec != TAK_CODEC_MULTICHANNEL) {
  596. av_log(avctx, AV_LOG_ERROR, "unsupported codec: %d\n", s->ti.codec);
  597. return AVERROR_PATCHWELCOME;
  598. }
  599. if (s->ti.data_type) {
  600. av_log(avctx, AV_LOG_ERROR,
  601. "unsupported data type: %d\n", s->ti.data_type);
  602. return AVERROR_INVALIDDATA;
  603. }
  604. if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
  605. av_log(avctx, AV_LOG_ERROR,
  606. "invalid number of channels: %d\n", s->ti.channels);
  607. return AVERROR_INVALIDDATA;
  608. }
  609. if (s->ti.channels > 6) {
  610. av_log(avctx, AV_LOG_ERROR,
  611. "unsupported number of channels: %d\n", s->ti.channels);
  612. return AVERROR_INVALIDDATA;
  613. }
  614. if (s->ti.frame_samples <= 0) {
  615. av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
  616. return AVERROR_INVALIDDATA;
  617. }
  618. if (s->ti.bps != avctx->bits_per_coded_sample) {
  619. avctx->bits_per_coded_sample = s->ti.bps;
  620. if ((ret = set_bps_params(avctx)) < 0)
  621. return ret;
  622. }
  623. if (s->ti.sample_rate != avctx->sample_rate) {
  624. avctx->sample_rate = s->ti.sample_rate;
  625. set_sample_rate_params(avctx);
  626. }
  627. if (s->ti.ch_layout)
  628. avctx->channel_layout = s->ti.ch_layout;
  629. avctx->channels = s->ti.channels;
  630. s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
  631. : s->ti.frame_samples;
  632. s->frame.nb_samples = s->nb_samples;
  633. if ((ret = ff_get_buffer(avctx, &s->frame)) < 0)
  634. return ret;
  635. if (avctx->bits_per_coded_sample <= 16) {
  636. int buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
  637. s->nb_samples,
  638. AV_SAMPLE_FMT_S32P, 0);
  639. av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
  640. if (!s->decode_buffer)
  641. return AVERROR(ENOMEM);
  642. ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
  643. s->decode_buffer, avctx->channels,
  644. s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
  645. if (ret < 0)
  646. return ret;
  647. } else {
  648. for (chan = 0; chan < avctx->channels; chan++)
  649. s->decoded[chan] = (int32_t *)s->frame.extended_data[chan];
  650. }
  651. if (s->nb_samples < 16) {
  652. for (chan = 0; chan < avctx->channels; chan++) {
  653. int32_t *decoded = s->decoded[chan];
  654. for (i = 0; i < s->nb_samples; i++)
  655. decoded[i] = get_sbits(gb, avctx->bits_per_coded_sample);
  656. }
  657. } else {
  658. if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
  659. for (chan = 0; chan < avctx->channels; chan++)
  660. if (ret = decode_channel(s, chan))
  661. return ret;
  662. if (avctx->channels == 2) {
  663. if (get_bits1(gb)) {
  664. // some kind of subframe length, but it seems to be unused
  665. skip_bits(gb, 6);
  666. }
  667. s->dmode = get_bits(gb, 3);
  668. if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
  669. return ret;
  670. }
  671. } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
  672. if (get_bits1(gb)) {
  673. int ch_mask = 0;
  674. chan = get_bits(gb, 4) + 1;
  675. if (chan > avctx->channels)
  676. return AVERROR_INVALIDDATA;
  677. for (i = 0; i < chan; i++) {
  678. int nbit = get_bits(gb, 4);
  679. if (nbit >= avctx->channels)
  680. return AVERROR_INVALIDDATA;
  681. if (ch_mask & 1 << nbit)
  682. return AVERROR_INVALIDDATA;
  683. s->mcdparams[i].present = get_bits1(gb);
  684. if (s->mcdparams[i].present) {
  685. s->mcdparams[i].index = get_bits(gb, 2);
  686. s->mcdparams[i].chan2 = get_bits(gb, 4);
  687. if (s->mcdparams[i].index == 1) {
  688. if ((nbit == s->mcdparams[i].chan2) ||
  689. (ch_mask & 1 << s->mcdparams[i].chan2))
  690. return AVERROR_INVALIDDATA;
  691. ch_mask |= 1 << s->mcdparams[i].chan2;
  692. } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
  693. return AVERROR_INVALIDDATA;
  694. }
  695. }
  696. s->mcdparams[i].chan1 = nbit;
  697. ch_mask |= 1 << nbit;
  698. }
  699. } else {
  700. chan = avctx->channels;
  701. for (i = 0; i < chan; i++) {
  702. s->mcdparams[i].present = 0;
  703. s->mcdparams[i].chan1 = i;
  704. }
  705. }
  706. for (i = 0; i < chan; i++) {
  707. if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
  708. if (ret = decode_channel(s, s->mcdparams[i].chan2))
  709. return ret;
  710. if (ret = decode_channel(s, s->mcdparams[i].chan1))
  711. return ret;
  712. if (s->mcdparams[i].present) {
  713. s->dmode = mc_dmodes[s->mcdparams[i].index];
  714. if (ret = decorrelate(s,
  715. s->mcdparams[i].chan2,
  716. s->mcdparams[i].chan1,
  717. s->nb_samples - 1))
  718. return ret;
  719. }
  720. }
  721. }
  722. for (chan = 0; chan < avctx->channels; chan++) {
  723. int32_t *decoded = s->decoded[chan];
  724. if (s->lpc_mode[chan])
  725. decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
  726. if (s->sample_shift[chan] > 0)
  727. for (i = 0; i < s->nb_samples; i++)
  728. decoded[i] <<= s->sample_shift[chan];
  729. }
  730. }
  731. align_get_bits(gb);
  732. skip_bits(gb, 24);
  733. if (get_bits_left(gb) < 0)
  734. av_log(avctx, AV_LOG_DEBUG, "overread\n");
  735. else if (get_bits_left(gb) > 0)
  736. av_log(avctx, AV_LOG_DEBUG, "underread\n");
  737. if (avctx->err_recognition & AV_EF_CRCCHECK) {
  738. if (ff_tak_check_crc(pkt->data + hsize,
  739. get_bits_count(gb) / 8 - hsize)) {
  740. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  741. return AVERROR_INVALIDDATA;
  742. }
  743. }
  744. /* convert to output buffer */
  745. switch (avctx->sample_fmt) {
  746. case AV_SAMPLE_FMT_U8P:
  747. for (chan = 0; chan < avctx->channels; chan++) {
  748. uint8_t *samples = (uint8_t *)s->frame.extended_data[chan];
  749. int32_t *decoded = s->decoded[chan];
  750. for (i = 0; i < s->nb_samples; i++)
  751. samples[i] = decoded[i] + 0x80;
  752. }
  753. break;
  754. case AV_SAMPLE_FMT_S16P:
  755. for (chan = 0; chan < avctx->channels; chan++) {
  756. int16_t *samples = (int16_t *)s->frame.extended_data[chan];
  757. int32_t *decoded = s->decoded[chan];
  758. for (i = 0; i < s->nb_samples; i++)
  759. samples[i] = decoded[i];
  760. }
  761. break;
  762. case AV_SAMPLE_FMT_S32P:
  763. for (chan = 0; chan < avctx->channels; chan++) {
  764. int32_t *samples = (int32_t *)s->frame.extended_data[chan];
  765. for (i = 0; i < s->nb_samples; i++)
  766. samples[i] <<= 8;
  767. }
  768. break;
  769. }
  770. *got_frame_ptr = 1;
  771. *(AVFrame *)data = s->frame;
  772. return pkt->size;
  773. }
  774. static av_cold int tak_decode_close(AVCodecContext *avctx)
  775. {
  776. TAKDecContext *s = avctx->priv_data;
  777. av_freep(&s->decode_buffer);
  778. av_freep(&s->residues);
  779. return 0;
  780. }
  781. AVCodec ff_tak_decoder = {
  782. .name = "tak",
  783. .type = AVMEDIA_TYPE_AUDIO,
  784. .id = AV_CODEC_ID_TAK,
  785. .priv_data_size = sizeof(TAKDecContext),
  786. .init = tak_decode_init,
  787. .init_static_data = tak_init_static_data,
  788. .close = tak_decode_close,
  789. .decode = tak_decode_frame,
  790. .capabilities = CODEC_CAP_DR1,
  791. .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
  792. .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
  793. AV_SAMPLE_FMT_S16P,
  794. AV_SAMPLE_FMT_S32P,
  795. AV_SAMPLE_FMT_NONE },
  796. };