You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

958 lines
31KB

  1. /*
  2. * TAK decoder
  3. * Copyright (c) 2012 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * TAK (Tom's lossless Audio Kompressor) decoder
  24. * @author Paul B Mahol
  25. */
  26. #include "libavutil/internal.h"
  27. #include "libavutil/samplefmt.h"
  28. #include "tak.h"
  29. #include "audiodsp.h"
  30. #include "thread.h"
  31. #include "avcodec.h"
  32. #include "internal.h"
  33. #include "unary.h"
  34. #define MAX_SUBFRAMES 8 ///< max number of subframes per channel
  35. #define MAX_PREDICTORS 256
  36. typedef struct MCDParam {
  37. int8_t present; ///< decorrelation parameter availability for this channel
  38. int8_t index; ///< index into array of decorrelation types
  39. int8_t chan1;
  40. int8_t chan2;
  41. } MCDParam;
  42. typedef struct TAKDecContext {
  43. AVCodecContext *avctx; ///< parent AVCodecContext
  44. AudioDSPContext adsp;
  45. TAKStreamInfo ti;
  46. GetBitContext gb; ///< bitstream reader initialized to start at the current frame
  47. int uval;
  48. int nb_samples; ///< number of samples in the current frame
  49. uint8_t *decode_buffer;
  50. unsigned int decode_buffer_size;
  51. int32_t *decoded[TAK_MAX_CHANNELS]; ///< decoded samples for each channel
  52. int8_t lpc_mode[TAK_MAX_CHANNELS];
  53. int8_t sample_shift[TAK_MAX_CHANNELS]; ///< shift applied to every sample in the channel
  54. int16_t predictors[MAX_PREDICTORS];
  55. int nb_subframes; ///< number of subframes in the current frame
  56. int16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
  57. int subframe_scale;
  58. int8_t dmode; ///< channel decorrelation type in the current frame
  59. MCDParam mcdparams[TAK_MAX_CHANNELS]; ///< multichannel decorrelation parameters
  60. int8_t coding_mode[128];
  61. DECLARE_ALIGNED(16, int16_t, filter)[MAX_PREDICTORS];
  62. DECLARE_ALIGNED(16, int16_t, residues)[544];
  63. } TAKDecContext;
  64. static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
  65. static const uint16_t predictor_sizes[] = {
  66. 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
  67. };
  68. static const struct CParam {
  69. int init;
  70. int escape;
  71. int scale;
  72. int aescape;
  73. int bias;
  74. } xcodes[50] = {
  75. { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
  76. { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
  77. { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
  78. { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
  79. { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
  80. { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
  81. { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
  82. { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
  83. { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
  84. { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
  85. { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
  86. { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
  87. { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
  88. { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
  89. { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
  90. { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
  91. { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
  92. { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
  93. { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
  94. { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
  95. { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
  96. { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
  97. { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
  98. { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
  99. { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
  100. { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
  101. { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
  102. { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
  103. { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
  104. { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
  105. { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
  106. { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
  107. { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
  108. { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
  109. { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
  110. { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
  111. { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
  112. { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
  113. { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
  114. { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
  115. { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
  116. { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
  117. { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
  118. { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
  119. { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
  120. { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
  121. { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
  122. { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
  123. { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
  124. { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
  125. };
  126. static int set_bps_params(AVCodecContext *avctx)
  127. {
  128. switch (avctx->bits_per_raw_sample) {
  129. case 8:
  130. avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
  131. break;
  132. case 16:
  133. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  134. break;
  135. case 24:
  136. avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
  137. break;
  138. default:
  139. av_log(avctx, AV_LOG_ERROR, "invalid/unsupported bits per sample: %d\n",
  140. avctx->bits_per_raw_sample);
  141. return AVERROR_INVALIDDATA;
  142. }
  143. return 0;
  144. }
  145. static void set_sample_rate_params(AVCodecContext *avctx)
  146. {
  147. TAKDecContext *s = avctx->priv_data;
  148. int shift = 3 - (avctx->sample_rate / 11025);
  149. shift = FFMAX(0, shift);
  150. s->uval = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << shift;
  151. s->subframe_scale = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << 1;
  152. }
  153. static av_cold int tak_decode_init(AVCodecContext *avctx)
  154. {
  155. TAKDecContext *s = avctx->priv_data;
  156. ff_audiodsp_init(&s->adsp);
  157. s->avctx = avctx;
  158. avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
  159. set_sample_rate_params(avctx);
  160. return set_bps_params(avctx);
  161. }
  162. static void decode_lpc(int32_t *coeffs, int mode, int length)
  163. {
  164. int i;
  165. if (length < 2)
  166. return;
  167. if (mode == 1) {
  168. int a1 = *coeffs++;
  169. for (i = 0; i < length - 1 >> 1; i++) {
  170. *coeffs += a1;
  171. coeffs[1] += *coeffs;
  172. a1 = coeffs[1];
  173. coeffs += 2;
  174. }
  175. if (length - 1 & 1)
  176. *coeffs += a1;
  177. } else if (mode == 2) {
  178. int a1 = coeffs[1];
  179. int a2 = a1 + *coeffs;
  180. coeffs[1] = a2;
  181. if (length > 2) {
  182. coeffs += 2;
  183. for (i = 0; i < length - 2 >> 1; i++) {
  184. int a3 = *coeffs + a1;
  185. int a4 = a3 + a2;
  186. *coeffs = a4;
  187. a1 = coeffs[1] + a3;
  188. a2 = a1 + a4;
  189. coeffs[1] = a2;
  190. coeffs += 2;
  191. }
  192. if (length & 1)
  193. *coeffs += a1 + a2;
  194. }
  195. } else if (mode == 3) {
  196. int a1 = coeffs[1];
  197. int a2 = a1 + *coeffs;
  198. coeffs[1] = a2;
  199. if (length > 2) {
  200. int a3 = coeffs[2];
  201. int a4 = a3 + a1;
  202. int a5 = a4 + a2;
  203. coeffs += 3;
  204. for (i = 0; i < length - 3; i++) {
  205. a3 += *coeffs;
  206. a4 += a3;
  207. a5 += a4;
  208. *coeffs = a5;
  209. coeffs++;
  210. }
  211. }
  212. }
  213. }
  214. static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
  215. {
  216. struct CParam code;
  217. GetBitContext *gb = &s->gb;
  218. int i;
  219. if (!mode) {
  220. memset(decoded, 0, len * sizeof(*decoded));
  221. return 0;
  222. }
  223. if (mode > FF_ARRAY_ELEMS(xcodes))
  224. return AVERROR_INVALIDDATA;
  225. code = xcodes[mode - 1];
  226. for (i = 0; i < len; i++) {
  227. int x = get_bits_long(gb, code.init);
  228. if (x >= code.escape && get_bits1(gb)) {
  229. x |= 1 << code.init;
  230. if (x >= code.aescape) {
  231. int scale = get_unary(gb, 1, 9);
  232. if (scale == 9) {
  233. int scale_bits = get_bits(gb, 3);
  234. if (scale_bits > 0) {
  235. if (scale_bits == 7) {
  236. scale_bits += get_bits(gb, 5);
  237. if (scale_bits > 29)
  238. return AVERROR_INVALIDDATA;
  239. }
  240. scale = get_bits_long(gb, scale_bits) + 1;
  241. x += code.scale * scale;
  242. }
  243. x += code.bias;
  244. } else
  245. x += code.scale * scale - code.escape;
  246. } else
  247. x -= code.escape;
  248. }
  249. decoded[i] = (x >> 1) ^ -(x & 1);
  250. }
  251. return 0;
  252. }
  253. static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
  254. {
  255. GetBitContext *gb = &s->gb;
  256. int i, mode, ret;
  257. if (length > s->nb_samples)
  258. return AVERROR_INVALIDDATA;
  259. if (get_bits1(gb)) {
  260. int wlength, rval;
  261. wlength = length / s->uval;
  262. rval = length - (wlength * s->uval);
  263. if (rval < s->uval / 2)
  264. rval += s->uval;
  265. else
  266. wlength++;
  267. if (wlength <= 1 || wlength > 128)
  268. return AVERROR_INVALIDDATA;
  269. s->coding_mode[0] = mode = get_bits(gb, 6);
  270. for (i = 1; i < wlength; i++) {
  271. int c = get_unary(gb, 1, 6);
  272. switch (c) {
  273. case 6:
  274. mode = get_bits(gb, 6);
  275. break;
  276. case 5:
  277. case 4:
  278. case 3: {
  279. /* mode += sign ? (1 - c) : (c - 1) */
  280. int sign = get_bits1(gb);
  281. mode += (-sign ^ (c - 1)) + sign;
  282. break;
  283. }
  284. case 2:
  285. mode++;
  286. break;
  287. case 1:
  288. mode--;
  289. break;
  290. }
  291. s->coding_mode[i] = mode;
  292. }
  293. i = 0;
  294. while (i < wlength) {
  295. int len = 0;
  296. mode = s->coding_mode[i];
  297. do {
  298. if (i >= wlength - 1)
  299. len += rval;
  300. else
  301. len += s->uval;
  302. i++;
  303. if (i == wlength)
  304. break;
  305. } while (s->coding_mode[i] == mode);
  306. if ((ret = decode_segment(s, mode, decoded, len)) < 0)
  307. return ret;
  308. decoded += len;
  309. }
  310. } else {
  311. mode = get_bits(gb, 6);
  312. if ((ret = decode_segment(s, mode, decoded, length)) < 0)
  313. return ret;
  314. }
  315. return 0;
  316. }
  317. static int get_bits_esc4(GetBitContext *gb)
  318. {
  319. if (get_bits1(gb))
  320. return get_bits(gb, 4) + 1;
  321. else
  322. return 0;
  323. }
  324. static int decode_subframe(TAKDecContext *s, int32_t *decoded,
  325. int subframe_size, int prev_subframe_size)
  326. {
  327. GetBitContext *gb = &s->gb;
  328. int x, y, i, j, ret = 0;
  329. int dshift, size, filter_quant, filter_order;
  330. int tfilter[MAX_PREDICTORS];
  331. if (!get_bits1(gb))
  332. return decode_residues(s, decoded, subframe_size);
  333. filter_order = predictor_sizes[get_bits(gb, 4)];
  334. if (prev_subframe_size > 0 && get_bits1(gb)) {
  335. if (filter_order > prev_subframe_size)
  336. return AVERROR_INVALIDDATA;
  337. decoded -= filter_order;
  338. subframe_size += filter_order;
  339. if (filter_order > subframe_size)
  340. return AVERROR_INVALIDDATA;
  341. } else {
  342. int lpc_mode;
  343. if (filter_order > subframe_size)
  344. return AVERROR_INVALIDDATA;
  345. lpc_mode = get_bits(gb, 2);
  346. if (lpc_mode > 2)
  347. return AVERROR_INVALIDDATA;
  348. if ((ret = decode_residues(s, decoded, filter_order)) < 0)
  349. return ret;
  350. if (lpc_mode)
  351. decode_lpc(decoded, lpc_mode, filter_order);
  352. }
  353. dshift = get_bits_esc4(gb);
  354. size = get_bits1(gb) + 6;
  355. filter_quant = 10;
  356. if (get_bits1(gb)) {
  357. filter_quant -= get_bits(gb, 3) + 1;
  358. if (filter_quant < 3)
  359. return AVERROR_INVALIDDATA;
  360. }
  361. s->predictors[0] = get_sbits(gb, 10);
  362. s->predictors[1] = get_sbits(gb, 10);
  363. s->predictors[2] = get_sbits(gb, size) << (10 - size);
  364. s->predictors[3] = get_sbits(gb, size) << (10 - size);
  365. if (filter_order > 4) {
  366. int tmp = size - get_bits1(gb);
  367. for (i = 4; i < filter_order; i++) {
  368. if (!(i & 3))
  369. x = tmp - get_bits(gb, 2);
  370. s->predictors[i] = get_sbits(gb, x) << (10 - size);
  371. }
  372. }
  373. tfilter[0] = s->predictors[0] << 6;
  374. for (i = 1; i < filter_order; i++) {
  375. int32_t *p1 = &tfilter[0];
  376. int32_t *p2 = &tfilter[i - 1];
  377. for (j = 0; j < (i + 1) / 2; j++) {
  378. x = *p1 + (s->predictors[i] * *p2 + 256 >> 9);
  379. *p2 += s->predictors[i] * *p1 + 256 >> 9;
  380. *p1++ = x;
  381. p2--;
  382. }
  383. tfilter[i] = s->predictors[i] << 6;
  384. }
  385. x = 1 << (32 - (15 - filter_quant));
  386. y = 1 << ((15 - filter_quant) - 1);
  387. for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
  388. s->filter[j] = x - ((tfilter[i] + y) >> (15 - filter_quant));
  389. s->filter[i] = x - ((tfilter[j] + y) >> (15 - filter_quant));
  390. }
  391. if ((ret = decode_residues(s, &decoded[filter_order],
  392. subframe_size - filter_order)) < 0)
  393. return ret;
  394. for (i = 0; i < filter_order; i++)
  395. s->residues[i] = *decoded++ >> dshift;
  396. y = FF_ARRAY_ELEMS(s->residues) - filter_order;
  397. x = subframe_size - filter_order;
  398. while (x > 0) {
  399. int tmp = FFMIN(y, x);
  400. for (i = 0; i < tmp; i++) {
  401. int v = 1 << (filter_quant - 1);
  402. if (filter_order & -16)
  403. v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
  404. filter_order & -16);
  405. for (j = filter_order & -16; j < filter_order; j += 4) {
  406. v += s->residues[i + j + 3] * s->filter[j + 3] +
  407. s->residues[i + j + 2] * s->filter[j + 2] +
  408. s->residues[i + j + 1] * s->filter[j + 1] +
  409. s->residues[i + j ] * s->filter[j ];
  410. }
  411. v = (av_clip_intp2(v >> filter_quant, 13) << dshift) - *decoded;
  412. *decoded++ = v;
  413. s->residues[filter_order + i] = v >> dshift;
  414. }
  415. x -= tmp;
  416. if (x > 0)
  417. memcpy(s->residues, &s->residues[y], 2 * filter_order);
  418. }
  419. emms_c();
  420. return 0;
  421. }
  422. static int decode_channel(TAKDecContext *s, int chan)
  423. {
  424. AVCodecContext *avctx = s->avctx;
  425. GetBitContext *gb = &s->gb;
  426. int32_t *decoded = s->decoded[chan];
  427. int left = s->nb_samples - 1;
  428. int i = 0, ret, prev = 0;
  429. s->sample_shift[chan] = get_bits_esc4(gb);
  430. if (s->sample_shift[chan] >= avctx->bits_per_raw_sample)
  431. return AVERROR_INVALIDDATA;
  432. *decoded++ = get_sbits(gb, avctx->bits_per_raw_sample - s->sample_shift[chan]);
  433. s->lpc_mode[chan] = get_bits(gb, 2);
  434. s->nb_subframes = get_bits(gb, 3) + 1;
  435. if (s->nb_subframes > 1) {
  436. if (get_bits_left(gb) < (s->nb_subframes - 1) * 6)
  437. return AVERROR_INVALIDDATA;
  438. for (; i < s->nb_subframes - 1; i++) {
  439. int v = get_bits(gb, 6);
  440. s->subframe_len[i] = (v - prev) * s->subframe_scale;
  441. if (s->subframe_len[i] <= 0)
  442. return AVERROR_INVALIDDATA;
  443. left -= s->subframe_len[i];
  444. prev = v;
  445. }
  446. if (left <= 0)
  447. return AVERROR_INVALIDDATA;
  448. }
  449. s->subframe_len[i] = left;
  450. prev = 0;
  451. for (i = 0; i < s->nb_subframes; i++) {
  452. if ((ret = decode_subframe(s, decoded, s->subframe_len[i], prev)) < 0)
  453. return ret;
  454. decoded += s->subframe_len[i];
  455. prev = s->subframe_len[i];
  456. }
  457. return 0;
  458. }
  459. static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
  460. {
  461. GetBitContext *gb = &s->gb;
  462. int32_t *p1 = s->decoded[c1] + 1;
  463. int32_t *p2 = s->decoded[c2] + 1;
  464. int i;
  465. int dshift, dfactor;
  466. switch (s->dmode) {
  467. case 1: /* left/side */
  468. for (i = 0; i < length; i++) {
  469. int32_t a = p1[i];
  470. int32_t b = p2[i];
  471. p2[i] = a + b;
  472. }
  473. break;
  474. case 2: /* side/right */
  475. for (i = 0; i < length; i++) {
  476. int32_t a = p1[i];
  477. int32_t b = p2[i];
  478. p1[i] = b - a;
  479. }
  480. break;
  481. case 3: /* side/mid */
  482. for (i = 0; i < length; i++) {
  483. int32_t a = p1[i];
  484. int32_t b = p2[i];
  485. a -= b >> 1;
  486. p1[i] = a;
  487. p2[i] = a + b;
  488. }
  489. break;
  490. case 4: /* side/left with scale factor */
  491. FFSWAP(int32_t*, p1, p2);
  492. case 5: /* side/right with scale factor */
  493. dshift = get_bits_esc4(gb);
  494. dfactor = get_sbits(gb, 10);
  495. for (i = 0; i < length; i++) {
  496. int32_t a = p1[i];
  497. int32_t b = p2[i];
  498. b = dfactor * (b >> dshift) + 128 >> 8 << dshift;
  499. p1[i] = b - a;
  500. }
  501. break;
  502. case 6:
  503. FFSWAP(int32_t*, p1, p2);
  504. case 7: {
  505. int length2, order_half, filter_order, dval1, dval2;
  506. int tmp, x, code_size;
  507. if (length < 256)
  508. return AVERROR_INVALIDDATA;
  509. dshift = get_bits_esc4(gb);
  510. filter_order = 8 << get_bits1(gb);
  511. dval1 = get_bits1(gb);
  512. dval2 = get_bits1(gb);
  513. for (i = 0; i < filter_order; i++) {
  514. if (!(i & 3))
  515. code_size = 14 - get_bits(gb, 3);
  516. s->filter[i] = get_sbits(gb, code_size);
  517. }
  518. order_half = filter_order / 2;
  519. length2 = length - (filter_order - 1);
  520. /* decorrelate beginning samples */
  521. if (dval1) {
  522. for (i = 0; i < order_half; i++) {
  523. int32_t a = p1[i];
  524. int32_t b = p2[i];
  525. p1[i] = a + b;
  526. }
  527. }
  528. /* decorrelate ending samples */
  529. if (dval2) {
  530. for (i = length2 + order_half; i < length; i++) {
  531. int32_t a = p1[i];
  532. int32_t b = p2[i];
  533. p1[i] = a + b;
  534. }
  535. }
  536. for (i = 0; i < filter_order; i++)
  537. s->residues[i] = *p2++ >> dshift;
  538. p1 += order_half;
  539. x = FF_ARRAY_ELEMS(s->residues) - filter_order;
  540. for (; length2 > 0; length2 -= tmp) {
  541. tmp = FFMIN(length2, x);
  542. for (i = 0; i < tmp; i++)
  543. s->residues[filter_order + i] = *p2++ >> dshift;
  544. for (i = 0; i < tmp; i++) {
  545. int v = 1 << 9;
  546. if (filter_order == 16) {
  547. v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
  548. filter_order);
  549. } else {
  550. v += s->residues[i + 7] * s->filter[7] +
  551. s->residues[i + 6] * s->filter[6] +
  552. s->residues[i + 5] * s->filter[5] +
  553. s->residues[i + 4] * s->filter[4] +
  554. s->residues[i + 3] * s->filter[3] +
  555. s->residues[i + 2] * s->filter[2] +
  556. s->residues[i + 1] * s->filter[1] +
  557. s->residues[i ] * s->filter[0];
  558. }
  559. v = (av_clip_intp2(v >> 10, 13) << dshift) - *p1;
  560. *p1++ = v;
  561. }
  562. memcpy(s->residues, &s->residues[tmp], 2 * filter_order);
  563. }
  564. emms_c();
  565. break;
  566. }
  567. }
  568. return 0;
  569. }
  570. static int tak_decode_frame(AVCodecContext *avctx, void *data,
  571. int *got_frame_ptr, AVPacket *pkt)
  572. {
  573. TAKDecContext *s = avctx->priv_data;
  574. AVFrame *frame = data;
  575. ThreadFrame tframe = { .f = data };
  576. GetBitContext *gb = &s->gb;
  577. int chan, i, ret, hsize;
  578. if (pkt->size < TAK_MIN_FRAME_HEADER_BYTES)
  579. return AVERROR_INVALIDDATA;
  580. if ((ret = init_get_bits8(gb, pkt->data, pkt->size)) < 0)
  581. return ret;
  582. if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
  583. return ret;
  584. hsize = get_bits_count(gb) / 8;
  585. if (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_COMPLIANT)) {
  586. if (ff_tak_check_crc(pkt->data, hsize)) {
  587. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  588. if (avctx->err_recognition & AV_EF_EXPLODE)
  589. return AVERROR_INVALIDDATA;
  590. }
  591. }
  592. if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
  593. s->ti.codec != TAK_CODEC_MULTICHANNEL) {
  594. av_log(avctx, AV_LOG_ERROR, "unsupported codec: %d\n", s->ti.codec);
  595. return AVERROR_PATCHWELCOME;
  596. }
  597. if (s->ti.data_type) {
  598. av_log(avctx, AV_LOG_ERROR,
  599. "unsupported data type: %d\n", s->ti.data_type);
  600. return AVERROR_INVALIDDATA;
  601. }
  602. if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
  603. av_log(avctx, AV_LOG_ERROR,
  604. "invalid number of channels: %d\n", s->ti.channels);
  605. return AVERROR_INVALIDDATA;
  606. }
  607. if (s->ti.channels > 6) {
  608. av_log(avctx, AV_LOG_ERROR,
  609. "unsupported number of channels: %d\n", s->ti.channels);
  610. return AVERROR_INVALIDDATA;
  611. }
  612. if (s->ti.frame_samples <= 0) {
  613. av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
  614. return AVERROR_INVALIDDATA;
  615. }
  616. avctx->bits_per_raw_sample = s->ti.bps;
  617. if ((ret = set_bps_params(avctx)) < 0)
  618. return ret;
  619. if (s->ti.sample_rate != avctx->sample_rate) {
  620. avctx->sample_rate = s->ti.sample_rate;
  621. set_sample_rate_params(avctx);
  622. }
  623. if (s->ti.ch_layout)
  624. avctx->channel_layout = s->ti.ch_layout;
  625. avctx->channels = s->ti.channels;
  626. s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
  627. : s->ti.frame_samples;
  628. frame->nb_samples = s->nb_samples;
  629. if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
  630. return ret;
  631. ff_thread_finish_setup(avctx);
  632. if (avctx->bits_per_raw_sample <= 16) {
  633. int buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
  634. s->nb_samples,
  635. AV_SAMPLE_FMT_S32P, 0);
  636. if (buf_size < 0)
  637. return buf_size;
  638. av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
  639. if (!s->decode_buffer)
  640. return AVERROR(ENOMEM);
  641. ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
  642. s->decode_buffer, avctx->channels,
  643. s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
  644. if (ret < 0)
  645. return ret;
  646. } else {
  647. for (chan = 0; chan < avctx->channels; chan++)
  648. s->decoded[chan] = (int32_t *)frame->extended_data[chan];
  649. }
  650. if (s->nb_samples < 16) {
  651. for (chan = 0; chan < avctx->channels; chan++) {
  652. int32_t *decoded = s->decoded[chan];
  653. for (i = 0; i < s->nb_samples; i++)
  654. decoded[i] = get_sbits(gb, avctx->bits_per_raw_sample);
  655. }
  656. } else {
  657. if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
  658. for (chan = 0; chan < avctx->channels; chan++)
  659. if (ret = decode_channel(s, chan))
  660. return ret;
  661. if (avctx->channels == 2) {
  662. s->nb_subframes = get_bits(gb, 1) + 1;
  663. if (s->nb_subframes > 1) {
  664. s->subframe_len[1] = get_bits(gb, 6);
  665. }
  666. s->dmode = get_bits(gb, 3);
  667. if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
  668. return ret;
  669. }
  670. } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
  671. if (get_bits1(gb)) {
  672. int ch_mask = 0;
  673. chan = get_bits(gb, 4) + 1;
  674. if (chan > avctx->channels)
  675. return AVERROR_INVALIDDATA;
  676. for (i = 0; i < chan; i++) {
  677. int nbit = get_bits(gb, 4);
  678. if (nbit >= avctx->channels)
  679. return AVERROR_INVALIDDATA;
  680. if (ch_mask & 1 << nbit)
  681. return AVERROR_INVALIDDATA;
  682. s->mcdparams[i].present = get_bits1(gb);
  683. if (s->mcdparams[i].present) {
  684. s->mcdparams[i].index = get_bits(gb, 2);
  685. s->mcdparams[i].chan2 = get_bits(gb, 4);
  686. if (s->mcdparams[i].chan2 >= avctx->channels) {
  687. av_log(avctx, AV_LOG_ERROR,
  688. "invalid channel 2 (%d) for %d channel(s)\n",
  689. s->mcdparams[i].chan2, avctx->channels);
  690. return AVERROR_INVALIDDATA;
  691. }
  692. if (s->mcdparams[i].index == 1) {
  693. if ((nbit == s->mcdparams[i].chan2) ||
  694. (ch_mask & 1 << s->mcdparams[i].chan2))
  695. return AVERROR_INVALIDDATA;
  696. ch_mask |= 1 << s->mcdparams[i].chan2;
  697. } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
  698. return AVERROR_INVALIDDATA;
  699. }
  700. }
  701. s->mcdparams[i].chan1 = nbit;
  702. ch_mask |= 1 << nbit;
  703. }
  704. } else {
  705. chan = avctx->channels;
  706. for (i = 0; i < chan; i++) {
  707. s->mcdparams[i].present = 0;
  708. s->mcdparams[i].chan1 = i;
  709. }
  710. }
  711. for (i = 0; i < chan; i++) {
  712. if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
  713. if (ret = decode_channel(s, s->mcdparams[i].chan2))
  714. return ret;
  715. if (ret = decode_channel(s, s->mcdparams[i].chan1))
  716. return ret;
  717. if (s->mcdparams[i].present) {
  718. s->dmode = mc_dmodes[s->mcdparams[i].index];
  719. if (ret = decorrelate(s,
  720. s->mcdparams[i].chan2,
  721. s->mcdparams[i].chan1,
  722. s->nb_samples - 1))
  723. return ret;
  724. }
  725. }
  726. }
  727. for (chan = 0; chan < avctx->channels; chan++) {
  728. int32_t *decoded = s->decoded[chan];
  729. if (s->lpc_mode[chan])
  730. decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
  731. if (s->sample_shift[chan] > 0)
  732. for (i = 0; i < s->nb_samples; i++)
  733. decoded[i] <<= s->sample_shift[chan];
  734. }
  735. }
  736. align_get_bits(gb);
  737. skip_bits(gb, 24);
  738. if (get_bits_left(gb) < 0)
  739. av_log(avctx, AV_LOG_DEBUG, "overread\n");
  740. else if (get_bits_left(gb) > 0)
  741. av_log(avctx, AV_LOG_DEBUG, "underread\n");
  742. if (avctx->err_recognition & (AV_EF_CRCCHECK | AV_EF_COMPLIANT)) {
  743. if (ff_tak_check_crc(pkt->data + hsize,
  744. get_bits_count(gb) / 8 - hsize)) {
  745. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  746. if (avctx->err_recognition & AV_EF_EXPLODE)
  747. return AVERROR_INVALIDDATA;
  748. }
  749. }
  750. /* convert to output buffer */
  751. switch (avctx->sample_fmt) {
  752. case AV_SAMPLE_FMT_U8P:
  753. for (chan = 0; chan < avctx->channels; chan++) {
  754. uint8_t *samples = (uint8_t *)frame->extended_data[chan];
  755. int32_t *decoded = s->decoded[chan];
  756. for (i = 0; i < s->nb_samples; i++)
  757. samples[i] = decoded[i] + 0x80;
  758. }
  759. break;
  760. case AV_SAMPLE_FMT_S16P:
  761. for (chan = 0; chan < avctx->channels; chan++) {
  762. int16_t *samples = (int16_t *)frame->extended_data[chan];
  763. int32_t *decoded = s->decoded[chan];
  764. for (i = 0; i < s->nb_samples; i++)
  765. samples[i] = decoded[i];
  766. }
  767. break;
  768. case AV_SAMPLE_FMT_S32P:
  769. for (chan = 0; chan < avctx->channels; chan++) {
  770. int32_t *samples = (int32_t *)frame->extended_data[chan];
  771. for (i = 0; i < s->nb_samples; i++)
  772. samples[i] <<= 8;
  773. }
  774. break;
  775. }
  776. *got_frame_ptr = 1;
  777. return pkt->size;
  778. }
  779. #if HAVE_THREADS
  780. static int init_thread_copy(AVCodecContext *avctx)
  781. {
  782. TAKDecContext *s = avctx->priv_data;
  783. s->avctx = avctx;
  784. return 0;
  785. }
  786. static int update_thread_context(AVCodecContext *dst,
  787. const AVCodecContext *src)
  788. {
  789. TAKDecContext *tsrc = src->priv_data;
  790. TAKDecContext *tdst = dst->priv_data;
  791. if (dst == src)
  792. return 0;
  793. memcpy(&tdst->ti, &tsrc->ti, sizeof(TAKStreamInfo));
  794. return 0;
  795. }
  796. #endif
  797. static av_cold int tak_decode_close(AVCodecContext *avctx)
  798. {
  799. TAKDecContext *s = avctx->priv_data;
  800. av_freep(&s->decode_buffer);
  801. return 0;
  802. }
  803. AVCodec ff_tak_decoder = {
  804. .name = "tak",
  805. .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
  806. .type = AVMEDIA_TYPE_AUDIO,
  807. .id = AV_CODEC_ID_TAK,
  808. .priv_data_size = sizeof(TAKDecContext),
  809. .init = tak_decode_init,
  810. .close = tak_decode_close,
  811. .decode = tak_decode_frame,
  812. .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
  813. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  814. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  815. .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
  816. AV_SAMPLE_FMT_S16P,
  817. AV_SAMPLE_FMT_S32P,
  818. AV_SAMPLE_FMT_NONE },
  819. };