You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

953 lines
31KB

  1. /*
  2. * TAK decoder
  3. * Copyright (c) 2012 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * TAK (Tom's lossless Audio Kompressor) decoder
  24. * @author Paul B Mahol
  25. */
  26. #include "libavutil/internal.h"
  27. #include "libavutil/samplefmt.h"
  28. #include "tak.h"
  29. #include "takdsp.h"
  30. #include "audiodsp.h"
  31. #include "thread.h"
  32. #include "avcodec.h"
  33. #include "internal.h"
  34. #include "unary.h"
  35. #define MAX_SUBFRAMES 8 ///< max number of subframes per channel
  36. #define MAX_PREDICTORS 256
  37. typedef struct MCDParam {
  38. int8_t present; ///< decorrelation parameter availability for this channel
  39. int8_t index; ///< index into array of decorrelation types
  40. int8_t chan1;
  41. int8_t chan2;
  42. } MCDParam;
  43. typedef struct TAKDecContext {
  44. AVCodecContext *avctx; ///< parent AVCodecContext
  45. AudioDSPContext adsp;
  46. TAKDSPContext tdsp;
  47. TAKStreamInfo ti;
  48. GetBitContext gb; ///< bitstream reader initialized to start at the current frame
  49. int uval;
  50. int nb_samples; ///< number of samples in the current frame
  51. uint8_t *decode_buffer;
  52. unsigned int decode_buffer_size;
  53. int32_t *decoded[TAK_MAX_CHANNELS]; ///< decoded samples for each channel
  54. int8_t lpc_mode[TAK_MAX_CHANNELS];
  55. int8_t sample_shift[TAK_MAX_CHANNELS]; ///< shift applied to every sample in the channel
  56. int16_t predictors[MAX_PREDICTORS];
  57. int nb_subframes; ///< number of subframes in the current frame
  58. int16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
  59. int subframe_scale;
  60. int8_t dmode; ///< channel decorrelation type in the current frame
  61. MCDParam mcdparams[TAK_MAX_CHANNELS]; ///< multichannel decorrelation parameters
  62. int8_t coding_mode[128];
  63. DECLARE_ALIGNED(16, int16_t, filter)[MAX_PREDICTORS];
  64. DECLARE_ALIGNED(16, int16_t, residues)[544];
  65. } TAKDecContext;
  66. static const int8_t mc_dmodes[] = { 1, 3, 4, 6, };
  67. static const uint16_t predictor_sizes[] = {
  68. 4, 8, 12, 16, 24, 32, 48, 64, 80, 96, 128, 160, 192, 224, 256, 0,
  69. };
  70. static const struct CParam {
  71. int init;
  72. int escape;
  73. int scale;
  74. int aescape;
  75. int bias;
  76. } xcodes[50] = {
  77. { 0x01, 0x0000001, 0x0000001, 0x0000003, 0x0000008 },
  78. { 0x02, 0x0000003, 0x0000001, 0x0000007, 0x0000006 },
  79. { 0x03, 0x0000005, 0x0000002, 0x000000E, 0x000000D },
  80. { 0x03, 0x0000003, 0x0000003, 0x000000D, 0x0000018 },
  81. { 0x04, 0x000000B, 0x0000004, 0x000001C, 0x0000019 },
  82. { 0x04, 0x0000006, 0x0000006, 0x000001A, 0x0000030 },
  83. { 0x05, 0x0000016, 0x0000008, 0x0000038, 0x0000032 },
  84. { 0x05, 0x000000C, 0x000000C, 0x0000034, 0x0000060 },
  85. { 0x06, 0x000002C, 0x0000010, 0x0000070, 0x0000064 },
  86. { 0x06, 0x0000018, 0x0000018, 0x0000068, 0x00000C0 },
  87. { 0x07, 0x0000058, 0x0000020, 0x00000E0, 0x00000C8 },
  88. { 0x07, 0x0000030, 0x0000030, 0x00000D0, 0x0000180 },
  89. { 0x08, 0x00000B0, 0x0000040, 0x00001C0, 0x0000190 },
  90. { 0x08, 0x0000060, 0x0000060, 0x00001A0, 0x0000300 },
  91. { 0x09, 0x0000160, 0x0000080, 0x0000380, 0x0000320 },
  92. { 0x09, 0x00000C0, 0x00000C0, 0x0000340, 0x0000600 },
  93. { 0x0A, 0x00002C0, 0x0000100, 0x0000700, 0x0000640 },
  94. { 0x0A, 0x0000180, 0x0000180, 0x0000680, 0x0000C00 },
  95. { 0x0B, 0x0000580, 0x0000200, 0x0000E00, 0x0000C80 },
  96. { 0x0B, 0x0000300, 0x0000300, 0x0000D00, 0x0001800 },
  97. { 0x0C, 0x0000B00, 0x0000400, 0x0001C00, 0x0001900 },
  98. { 0x0C, 0x0000600, 0x0000600, 0x0001A00, 0x0003000 },
  99. { 0x0D, 0x0001600, 0x0000800, 0x0003800, 0x0003200 },
  100. { 0x0D, 0x0000C00, 0x0000C00, 0x0003400, 0x0006000 },
  101. { 0x0E, 0x0002C00, 0x0001000, 0x0007000, 0x0006400 },
  102. { 0x0E, 0x0001800, 0x0001800, 0x0006800, 0x000C000 },
  103. { 0x0F, 0x0005800, 0x0002000, 0x000E000, 0x000C800 },
  104. { 0x0F, 0x0003000, 0x0003000, 0x000D000, 0x0018000 },
  105. { 0x10, 0x000B000, 0x0004000, 0x001C000, 0x0019000 },
  106. { 0x10, 0x0006000, 0x0006000, 0x001A000, 0x0030000 },
  107. { 0x11, 0x0016000, 0x0008000, 0x0038000, 0x0032000 },
  108. { 0x11, 0x000C000, 0x000C000, 0x0034000, 0x0060000 },
  109. { 0x12, 0x002C000, 0x0010000, 0x0070000, 0x0064000 },
  110. { 0x12, 0x0018000, 0x0018000, 0x0068000, 0x00C0000 },
  111. { 0x13, 0x0058000, 0x0020000, 0x00E0000, 0x00C8000 },
  112. { 0x13, 0x0030000, 0x0030000, 0x00D0000, 0x0180000 },
  113. { 0x14, 0x00B0000, 0x0040000, 0x01C0000, 0x0190000 },
  114. { 0x14, 0x0060000, 0x0060000, 0x01A0000, 0x0300000 },
  115. { 0x15, 0x0160000, 0x0080000, 0x0380000, 0x0320000 },
  116. { 0x15, 0x00C0000, 0x00C0000, 0x0340000, 0x0600000 },
  117. { 0x16, 0x02C0000, 0x0100000, 0x0700000, 0x0640000 },
  118. { 0x16, 0x0180000, 0x0180000, 0x0680000, 0x0C00000 },
  119. { 0x17, 0x0580000, 0x0200000, 0x0E00000, 0x0C80000 },
  120. { 0x17, 0x0300000, 0x0300000, 0x0D00000, 0x1800000 },
  121. { 0x18, 0x0B00000, 0x0400000, 0x1C00000, 0x1900000 },
  122. { 0x18, 0x0600000, 0x0600000, 0x1A00000, 0x3000000 },
  123. { 0x19, 0x1600000, 0x0800000, 0x3800000, 0x3200000 },
  124. { 0x19, 0x0C00000, 0x0C00000, 0x3400000, 0x6000000 },
  125. { 0x1A, 0x2C00000, 0x1000000, 0x7000000, 0x6400000 },
  126. { 0x1A, 0x1800000, 0x1800000, 0x6800000, 0xC000000 },
  127. };
  128. static int set_bps_params(AVCodecContext *avctx)
  129. {
  130. switch (avctx->bits_per_raw_sample) {
  131. case 8:
  132. avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
  133. break;
  134. case 16:
  135. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  136. break;
  137. case 24:
  138. avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
  139. break;
  140. default:
  141. av_log(avctx, AV_LOG_ERROR, "invalid/unsupported bits per sample: %d\n",
  142. avctx->bits_per_raw_sample);
  143. return AVERROR_INVALIDDATA;
  144. }
  145. return 0;
  146. }
  147. static void set_sample_rate_params(AVCodecContext *avctx)
  148. {
  149. TAKDecContext *s = avctx->priv_data;
  150. int shift = 3 - (avctx->sample_rate / 11025);
  151. shift = FFMAX(0, shift);
  152. s->uval = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << shift;
  153. s->subframe_scale = FFALIGN(avctx->sample_rate + 511 >> 9, 4) << 1;
  154. }
  155. static av_cold int tak_decode_init(AVCodecContext *avctx)
  156. {
  157. TAKDecContext *s = avctx->priv_data;
  158. ff_audiodsp_init(&s->adsp);
  159. ff_takdsp_init(&s->tdsp);
  160. s->avctx = avctx;
  161. avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
  162. set_sample_rate_params(avctx);
  163. return set_bps_params(avctx);
  164. }
  165. static void decode_lpc(int32_t *coeffs, int mode, int length)
  166. {
  167. int i;
  168. if (length < 2)
  169. return;
  170. if (mode == 1) {
  171. int a1 = *coeffs++;
  172. for (i = 0; i < length - 1 >> 1; i++) {
  173. *coeffs += a1;
  174. coeffs[1] += *coeffs;
  175. a1 = coeffs[1];
  176. coeffs += 2;
  177. }
  178. if (length - 1 & 1)
  179. *coeffs += a1;
  180. } else if (mode == 2) {
  181. int a1 = coeffs[1];
  182. int a2 = a1 + *coeffs;
  183. coeffs[1] = a2;
  184. if (length > 2) {
  185. coeffs += 2;
  186. for (i = 0; i < length - 2 >> 1; i++) {
  187. int a3 = *coeffs + a1;
  188. int a4 = a3 + a2;
  189. *coeffs = a4;
  190. a1 = coeffs[1] + a3;
  191. a2 = a1 + a4;
  192. coeffs[1] = a2;
  193. coeffs += 2;
  194. }
  195. if (length & 1)
  196. *coeffs += a1 + a2;
  197. }
  198. } else if (mode == 3) {
  199. int a1 = coeffs[1];
  200. int a2 = a1 + *coeffs;
  201. coeffs[1] = a2;
  202. if (length > 2) {
  203. int a3 = coeffs[2];
  204. int a4 = a3 + a1;
  205. int a5 = a4 + a2;
  206. coeffs[2] = a5;
  207. coeffs += 3;
  208. for (i = 0; i < length - 3; i++) {
  209. a3 += *coeffs;
  210. a4 += a3;
  211. a5 += a4;
  212. *coeffs = a5;
  213. coeffs++;
  214. }
  215. }
  216. }
  217. }
  218. static int decode_segment(TAKDecContext *s, int8_t mode, int32_t *decoded, int len)
  219. {
  220. struct CParam code;
  221. GetBitContext *gb = &s->gb;
  222. int i;
  223. if (!mode) {
  224. memset(decoded, 0, len * sizeof(*decoded));
  225. return 0;
  226. }
  227. if (mode > FF_ARRAY_ELEMS(xcodes))
  228. return AVERROR_INVALIDDATA;
  229. code = xcodes[mode - 1];
  230. for (i = 0; i < len; i++) {
  231. int x = get_bits_long(gb, code.init);
  232. if (x >= code.escape && get_bits1(gb)) {
  233. x |= 1 << code.init;
  234. if (x >= code.aescape) {
  235. int scale = get_unary(gb, 1, 9);
  236. if (scale == 9) {
  237. int scale_bits = get_bits(gb, 3);
  238. if (scale_bits > 0) {
  239. if (scale_bits == 7) {
  240. scale_bits += get_bits(gb, 5);
  241. if (scale_bits > 29)
  242. return AVERROR_INVALIDDATA;
  243. }
  244. scale = get_bits_long(gb, scale_bits) + 1;
  245. x += code.scale * scale;
  246. }
  247. x += code.bias;
  248. } else
  249. x += code.scale * scale - code.escape;
  250. } else
  251. x -= code.escape;
  252. }
  253. decoded[i] = (x >> 1) ^ -(x & 1);
  254. }
  255. return 0;
  256. }
  257. static int decode_residues(TAKDecContext *s, int32_t *decoded, int length)
  258. {
  259. GetBitContext *gb = &s->gb;
  260. int i, mode, ret;
  261. if (length > s->nb_samples)
  262. return AVERROR_INVALIDDATA;
  263. if (get_bits1(gb)) {
  264. int wlength, rval;
  265. wlength = length / s->uval;
  266. rval = length - (wlength * s->uval);
  267. if (rval < s->uval / 2)
  268. rval += s->uval;
  269. else
  270. wlength++;
  271. if (wlength <= 1 || wlength > 128)
  272. return AVERROR_INVALIDDATA;
  273. s->coding_mode[0] = mode = get_bits(gb, 6);
  274. for (i = 1; i < wlength; i++) {
  275. int c = get_unary(gb, 1, 6);
  276. switch (c) {
  277. case 6:
  278. mode = get_bits(gb, 6);
  279. break;
  280. case 5:
  281. case 4:
  282. case 3: {
  283. /* mode += sign ? (1 - c) : (c - 1) */
  284. int sign = get_bits1(gb);
  285. mode += (-sign ^ (c - 1)) + sign;
  286. break;
  287. }
  288. case 2:
  289. mode++;
  290. break;
  291. case 1:
  292. mode--;
  293. break;
  294. }
  295. s->coding_mode[i] = mode;
  296. }
  297. i = 0;
  298. while (i < wlength) {
  299. int len = 0;
  300. mode = s->coding_mode[i];
  301. do {
  302. if (i >= wlength - 1)
  303. len += rval;
  304. else
  305. len += s->uval;
  306. i++;
  307. if (i == wlength)
  308. break;
  309. } while (s->coding_mode[i] == mode);
  310. if ((ret = decode_segment(s, mode, decoded, len)) < 0)
  311. return ret;
  312. decoded += len;
  313. }
  314. } else {
  315. mode = get_bits(gb, 6);
  316. if ((ret = decode_segment(s, mode, decoded, length)) < 0)
  317. return ret;
  318. }
  319. return 0;
  320. }
  321. static int get_bits_esc4(GetBitContext *gb)
  322. {
  323. if (get_bits1(gb))
  324. return get_bits(gb, 4) + 1;
  325. else
  326. return 0;
  327. }
  328. static int decode_subframe(TAKDecContext *s, int32_t *decoded,
  329. int subframe_size, int prev_subframe_size)
  330. {
  331. GetBitContext *gb = &s->gb;
  332. int x, y, i, j, ret = 0;
  333. int dshift, size, filter_quant, filter_order;
  334. int tfilter[MAX_PREDICTORS];
  335. if (!get_bits1(gb))
  336. return decode_residues(s, decoded, subframe_size);
  337. filter_order = predictor_sizes[get_bits(gb, 4)];
  338. if (prev_subframe_size > 0 && get_bits1(gb)) {
  339. if (filter_order > prev_subframe_size)
  340. return AVERROR_INVALIDDATA;
  341. decoded -= filter_order;
  342. subframe_size += filter_order;
  343. if (filter_order > subframe_size)
  344. return AVERROR_INVALIDDATA;
  345. } else {
  346. int lpc_mode;
  347. if (filter_order > subframe_size)
  348. return AVERROR_INVALIDDATA;
  349. lpc_mode = get_bits(gb, 2);
  350. if (lpc_mode > 2)
  351. return AVERROR_INVALIDDATA;
  352. if ((ret = decode_residues(s, decoded, filter_order)) < 0)
  353. return ret;
  354. if (lpc_mode)
  355. decode_lpc(decoded, lpc_mode, filter_order);
  356. }
  357. dshift = get_bits_esc4(gb);
  358. size = get_bits1(gb) + 6;
  359. filter_quant = 10;
  360. if (get_bits1(gb)) {
  361. filter_quant -= get_bits(gb, 3) + 1;
  362. if (filter_quant < 3)
  363. return AVERROR_INVALIDDATA;
  364. }
  365. s->predictors[0] = get_sbits(gb, 10);
  366. s->predictors[1] = get_sbits(gb, 10);
  367. s->predictors[2] = get_sbits(gb, size) << (10 - size);
  368. s->predictors[3] = get_sbits(gb, size) << (10 - size);
  369. if (filter_order > 4) {
  370. int tmp = size - get_bits1(gb);
  371. for (i = 4; i < filter_order; i++) {
  372. if (!(i & 3))
  373. x = tmp - get_bits(gb, 2);
  374. s->predictors[i] = get_sbits(gb, x) << (10 - size);
  375. }
  376. }
  377. tfilter[0] = s->predictors[0] << 6;
  378. for (i = 1; i < filter_order; i++) {
  379. int32_t *p1 = &tfilter[0];
  380. int32_t *p2 = &tfilter[i - 1];
  381. for (j = 0; j < (i + 1) / 2; j++) {
  382. x = *p1 + (s->predictors[i] * *p2 + 256 >> 9);
  383. *p2 += s->predictors[i] * *p1 + 256 >> 9;
  384. *p1++ = x;
  385. p2--;
  386. }
  387. tfilter[i] = s->predictors[i] << 6;
  388. }
  389. x = 1 << (32 - (15 - filter_quant));
  390. y = 1 << ((15 - filter_quant) - 1);
  391. for (i = 0, j = filter_order - 1; i < filter_order / 2; i++, j--) {
  392. s->filter[j] = x - ((tfilter[i] + y) >> (15 - filter_quant));
  393. s->filter[i] = x - ((tfilter[j] + y) >> (15 - filter_quant));
  394. }
  395. if ((ret = decode_residues(s, &decoded[filter_order],
  396. subframe_size - filter_order)) < 0)
  397. return ret;
  398. for (i = 0; i < filter_order; i++)
  399. s->residues[i] = *decoded++ >> dshift;
  400. y = FF_ARRAY_ELEMS(s->residues) - filter_order;
  401. x = subframe_size - filter_order;
  402. while (x > 0) {
  403. int tmp = FFMIN(y, x);
  404. for (i = 0; i < tmp; i++) {
  405. int v = 1 << (filter_quant - 1);
  406. if (filter_order & -16)
  407. v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
  408. filter_order & -16);
  409. for (j = filter_order & -16; j < filter_order; j += 4) {
  410. v += s->residues[i + j + 3] * s->filter[j + 3] +
  411. s->residues[i + j + 2] * s->filter[j + 2] +
  412. s->residues[i + j + 1] * s->filter[j + 1] +
  413. s->residues[i + j ] * s->filter[j ];
  414. }
  415. v = (av_clip_intp2(v >> filter_quant, 13) << dshift) - *decoded;
  416. *decoded++ = v;
  417. s->residues[filter_order + i] = v >> dshift;
  418. }
  419. x -= tmp;
  420. if (x > 0)
  421. memcpy(s->residues, &s->residues[y], 2 * filter_order);
  422. }
  423. emms_c();
  424. return 0;
  425. }
  426. static int decode_channel(TAKDecContext *s, int chan)
  427. {
  428. AVCodecContext *avctx = s->avctx;
  429. GetBitContext *gb = &s->gb;
  430. int32_t *decoded = s->decoded[chan];
  431. int left = s->nb_samples - 1;
  432. int i = 0, ret, prev = 0;
  433. s->sample_shift[chan] = get_bits_esc4(gb);
  434. if (s->sample_shift[chan] >= avctx->bits_per_raw_sample)
  435. return AVERROR_INVALIDDATA;
  436. *decoded++ = get_sbits(gb, avctx->bits_per_raw_sample - s->sample_shift[chan]);
  437. s->lpc_mode[chan] = get_bits(gb, 2);
  438. s->nb_subframes = get_bits(gb, 3) + 1;
  439. if (s->nb_subframes > 1) {
  440. if (get_bits_left(gb) < (s->nb_subframes - 1) * 6)
  441. return AVERROR_INVALIDDATA;
  442. for (; i < s->nb_subframes - 1; i++) {
  443. int v = get_bits(gb, 6);
  444. s->subframe_len[i] = (v - prev) * s->subframe_scale;
  445. if (s->subframe_len[i] <= 0)
  446. return AVERROR_INVALIDDATA;
  447. left -= s->subframe_len[i];
  448. prev = v;
  449. }
  450. if (left <= 0)
  451. return AVERROR_INVALIDDATA;
  452. }
  453. s->subframe_len[i] = left;
  454. prev = 0;
  455. for (i = 0; i < s->nb_subframes; i++) {
  456. if ((ret = decode_subframe(s, decoded, s->subframe_len[i], prev)) < 0)
  457. return ret;
  458. decoded += s->subframe_len[i];
  459. prev = s->subframe_len[i];
  460. }
  461. return 0;
  462. }
  463. static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
  464. {
  465. GetBitContext *gb = &s->gb;
  466. int32_t *p1 = s->decoded[c1] + (s->dmode > 5);
  467. int32_t *p2 = s->decoded[c2] + (s->dmode > 5);
  468. int32_t bp1 = p1[0];
  469. int32_t bp2 = p2[0];
  470. int i;
  471. int dshift, dfactor;
  472. length += s->dmode < 6;
  473. switch (s->dmode) {
  474. case 1: /* left/side */
  475. s->tdsp.decorrelate_ls(p1, p2, length);
  476. break;
  477. case 2: /* side/right */
  478. s->tdsp.decorrelate_sr(p1, p2, length);
  479. break;
  480. case 3: /* side/mid */
  481. s->tdsp.decorrelate_sm(p1, p2, length);
  482. break;
  483. case 4: /* side/left with scale factor */
  484. FFSWAP(int32_t*, p1, p2);
  485. FFSWAP(int32_t, bp1, bp2);
  486. case 5: /* side/right with scale factor */
  487. dshift = get_bits_esc4(gb);
  488. dfactor = get_sbits(gb, 10);
  489. s->tdsp.decorrelate_sf(p1, p2, length, dshift, dfactor);
  490. break;
  491. case 6:
  492. FFSWAP(int32_t*, p1, p2);
  493. case 7: {
  494. int length2, order_half, filter_order, dval1, dval2;
  495. int tmp, x, code_size;
  496. if (length < 256)
  497. return AVERROR_INVALIDDATA;
  498. dshift = get_bits_esc4(gb);
  499. filter_order = 8 << get_bits1(gb);
  500. dval1 = get_bits1(gb);
  501. dval2 = get_bits1(gb);
  502. for (i = 0; i < filter_order; i++) {
  503. if (!(i & 3))
  504. code_size = 14 - get_bits(gb, 3);
  505. s->filter[i] = get_sbits(gb, code_size);
  506. }
  507. order_half = filter_order / 2;
  508. length2 = length - (filter_order - 1);
  509. /* decorrelate beginning samples */
  510. if (dval1) {
  511. for (i = 0; i < order_half; i++) {
  512. int32_t a = p1[i];
  513. int32_t b = p2[i];
  514. p1[i] = a + b;
  515. }
  516. }
  517. /* decorrelate ending samples */
  518. if (dval2) {
  519. for (i = length2 + order_half; i < length; i++) {
  520. int32_t a = p1[i];
  521. int32_t b = p2[i];
  522. p1[i] = a + b;
  523. }
  524. }
  525. for (i = 0; i < filter_order; i++)
  526. s->residues[i] = *p2++ >> dshift;
  527. p1 += order_half;
  528. x = FF_ARRAY_ELEMS(s->residues) - filter_order;
  529. for (; length2 > 0; length2 -= tmp) {
  530. tmp = FFMIN(length2, x);
  531. for (i = 0; i < tmp - (tmp == length2); i++)
  532. s->residues[filter_order + i] = *p2++ >> dshift;
  533. for (i = 0; i < tmp; i++) {
  534. int v = 1 << 9;
  535. if (filter_order == 16) {
  536. v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter,
  537. filter_order);
  538. } else {
  539. v += s->residues[i + 7] * s->filter[7] +
  540. s->residues[i + 6] * s->filter[6] +
  541. s->residues[i + 5] * s->filter[5] +
  542. s->residues[i + 4] * s->filter[4] +
  543. s->residues[i + 3] * s->filter[3] +
  544. s->residues[i + 2] * s->filter[2] +
  545. s->residues[i + 1] * s->filter[1] +
  546. s->residues[i ] * s->filter[0];
  547. }
  548. v = (av_clip_intp2(v >> 10, 13) << dshift) - *p1;
  549. *p1++ = v;
  550. }
  551. memmove(s->residues, &s->residues[tmp], 2 * filter_order);
  552. }
  553. emms_c();
  554. break;
  555. }
  556. }
  557. if (s->dmode > 0 && s->dmode < 6) {
  558. p1[0] = bp1;
  559. p2[0] = bp2;
  560. }
  561. return 0;
  562. }
  563. static int tak_decode_frame(AVCodecContext *avctx, void *data,
  564. int *got_frame_ptr, AVPacket *pkt)
  565. {
  566. TAKDecContext *s = avctx->priv_data;
  567. AVFrame *frame = data;
  568. ThreadFrame tframe = { .f = data };
  569. GetBitContext *gb = &s->gb;
  570. int chan, i, ret, hsize;
  571. if (pkt->size < TAK_MIN_FRAME_HEADER_BYTES)
  572. return AVERROR_INVALIDDATA;
  573. if ((ret = init_get_bits8(gb, pkt->data, pkt->size)) < 0)
  574. return ret;
  575. if ((ret = ff_tak_decode_frame_header(avctx, gb, &s->ti, 0)) < 0)
  576. return ret;
  577. hsize = get_bits_count(gb) / 8;
  578. if (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_COMPLIANT)) {
  579. if (ff_tak_check_crc(pkt->data, hsize)) {
  580. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  581. if (avctx->err_recognition & AV_EF_EXPLODE)
  582. return AVERROR_INVALIDDATA;
  583. }
  584. }
  585. if (s->ti.codec != TAK_CODEC_MONO_STEREO &&
  586. s->ti.codec != TAK_CODEC_MULTICHANNEL) {
  587. av_log(avctx, AV_LOG_ERROR, "unsupported codec: %d\n", s->ti.codec);
  588. return AVERROR_PATCHWELCOME;
  589. }
  590. if (s->ti.data_type) {
  591. av_log(avctx, AV_LOG_ERROR,
  592. "unsupported data type: %d\n", s->ti.data_type);
  593. return AVERROR_INVALIDDATA;
  594. }
  595. if (s->ti.codec == TAK_CODEC_MONO_STEREO && s->ti.channels > 2) {
  596. av_log(avctx, AV_LOG_ERROR,
  597. "invalid number of channels: %d\n", s->ti.channels);
  598. return AVERROR_INVALIDDATA;
  599. }
  600. if (s->ti.channels > 6) {
  601. av_log(avctx, AV_LOG_ERROR,
  602. "unsupported number of channels: %d\n", s->ti.channels);
  603. return AVERROR_INVALIDDATA;
  604. }
  605. if (s->ti.frame_samples <= 0) {
  606. av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of samples\n");
  607. return AVERROR_INVALIDDATA;
  608. }
  609. avctx->bits_per_raw_sample = s->ti.bps;
  610. if ((ret = set_bps_params(avctx)) < 0)
  611. return ret;
  612. if (s->ti.sample_rate != avctx->sample_rate) {
  613. avctx->sample_rate = s->ti.sample_rate;
  614. set_sample_rate_params(avctx);
  615. }
  616. if (s->ti.ch_layout)
  617. avctx->channel_layout = s->ti.ch_layout;
  618. avctx->channels = s->ti.channels;
  619. s->nb_samples = s->ti.last_frame_samples ? s->ti.last_frame_samples
  620. : s->ti.frame_samples;
  621. frame->nb_samples = s->nb_samples;
  622. if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
  623. return ret;
  624. ff_thread_finish_setup(avctx);
  625. if (avctx->bits_per_raw_sample <= 16) {
  626. int buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
  627. s->nb_samples,
  628. AV_SAMPLE_FMT_S32P, 0);
  629. if (buf_size < 0)
  630. return buf_size;
  631. av_fast_malloc(&s->decode_buffer, &s->decode_buffer_size, buf_size);
  632. if (!s->decode_buffer)
  633. return AVERROR(ENOMEM);
  634. ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
  635. s->decode_buffer, avctx->channels,
  636. s->nb_samples, AV_SAMPLE_FMT_S32P, 0);
  637. if (ret < 0)
  638. return ret;
  639. } else {
  640. for (chan = 0; chan < avctx->channels; chan++)
  641. s->decoded[chan] = (int32_t *)frame->extended_data[chan];
  642. }
  643. if (s->nb_samples < 16) {
  644. for (chan = 0; chan < avctx->channels; chan++) {
  645. int32_t *decoded = s->decoded[chan];
  646. for (i = 0; i < s->nb_samples; i++)
  647. decoded[i] = get_sbits(gb, avctx->bits_per_raw_sample);
  648. }
  649. } else {
  650. if (s->ti.codec == TAK_CODEC_MONO_STEREO) {
  651. for (chan = 0; chan < avctx->channels; chan++)
  652. if (ret = decode_channel(s, chan))
  653. return ret;
  654. if (avctx->channels == 2) {
  655. s->nb_subframes = get_bits(gb, 1) + 1;
  656. if (s->nb_subframes > 1) {
  657. s->subframe_len[1] = get_bits(gb, 6);
  658. }
  659. s->dmode = get_bits(gb, 3);
  660. if (ret = decorrelate(s, 0, 1, s->nb_samples - 1))
  661. return ret;
  662. }
  663. } else if (s->ti.codec == TAK_CODEC_MULTICHANNEL) {
  664. if (get_bits1(gb)) {
  665. int ch_mask = 0;
  666. chan = get_bits(gb, 4) + 1;
  667. if (chan > avctx->channels)
  668. return AVERROR_INVALIDDATA;
  669. for (i = 0; i < chan; i++) {
  670. int nbit = get_bits(gb, 4);
  671. if (nbit >= avctx->channels)
  672. return AVERROR_INVALIDDATA;
  673. if (ch_mask & 1 << nbit)
  674. return AVERROR_INVALIDDATA;
  675. s->mcdparams[i].present = get_bits1(gb);
  676. if (s->mcdparams[i].present) {
  677. s->mcdparams[i].index = get_bits(gb, 2);
  678. s->mcdparams[i].chan2 = get_bits(gb, 4);
  679. if (s->mcdparams[i].chan2 >= avctx->channels) {
  680. av_log(avctx, AV_LOG_ERROR,
  681. "invalid channel 2 (%d) for %d channel(s)\n",
  682. s->mcdparams[i].chan2, avctx->channels);
  683. return AVERROR_INVALIDDATA;
  684. }
  685. if (s->mcdparams[i].index == 1) {
  686. if ((nbit == s->mcdparams[i].chan2) ||
  687. (ch_mask & 1 << s->mcdparams[i].chan2))
  688. return AVERROR_INVALIDDATA;
  689. ch_mask |= 1 << s->mcdparams[i].chan2;
  690. } else if (!(ch_mask & 1 << s->mcdparams[i].chan2)) {
  691. return AVERROR_INVALIDDATA;
  692. }
  693. }
  694. s->mcdparams[i].chan1 = nbit;
  695. ch_mask |= 1 << nbit;
  696. }
  697. } else {
  698. chan = avctx->channels;
  699. for (i = 0; i < chan; i++) {
  700. s->mcdparams[i].present = 0;
  701. s->mcdparams[i].chan1 = i;
  702. }
  703. }
  704. for (i = 0; i < chan; i++) {
  705. if (s->mcdparams[i].present && s->mcdparams[i].index == 1)
  706. if (ret = decode_channel(s, s->mcdparams[i].chan2))
  707. return ret;
  708. if (ret = decode_channel(s, s->mcdparams[i].chan1))
  709. return ret;
  710. if (s->mcdparams[i].present) {
  711. s->dmode = mc_dmodes[s->mcdparams[i].index];
  712. if (ret = decorrelate(s,
  713. s->mcdparams[i].chan2,
  714. s->mcdparams[i].chan1,
  715. s->nb_samples - 1))
  716. return ret;
  717. }
  718. }
  719. }
  720. for (chan = 0; chan < avctx->channels; chan++) {
  721. int32_t *decoded = s->decoded[chan];
  722. if (s->lpc_mode[chan])
  723. decode_lpc(decoded, s->lpc_mode[chan], s->nb_samples);
  724. if (s->sample_shift[chan] > 0)
  725. for (i = 0; i < s->nb_samples; i++)
  726. decoded[i] <<= s->sample_shift[chan];
  727. }
  728. }
  729. align_get_bits(gb);
  730. skip_bits(gb, 24);
  731. if (get_bits_left(gb) < 0)
  732. av_log(avctx, AV_LOG_DEBUG, "overread\n");
  733. else if (get_bits_left(gb) > 0)
  734. av_log(avctx, AV_LOG_DEBUG, "underread\n");
  735. if (avctx->err_recognition & (AV_EF_CRCCHECK | AV_EF_COMPLIANT)) {
  736. if (ff_tak_check_crc(pkt->data + hsize,
  737. get_bits_count(gb) / 8 - hsize)) {
  738. av_log(avctx, AV_LOG_ERROR, "CRC error\n");
  739. if (avctx->err_recognition & AV_EF_EXPLODE)
  740. return AVERROR_INVALIDDATA;
  741. }
  742. }
  743. /* convert to output buffer */
  744. switch (avctx->sample_fmt) {
  745. case AV_SAMPLE_FMT_U8P:
  746. for (chan = 0; chan < avctx->channels; chan++) {
  747. uint8_t *samples = (uint8_t *)frame->extended_data[chan];
  748. int32_t *decoded = s->decoded[chan];
  749. for (i = 0; i < s->nb_samples; i++)
  750. samples[i] = decoded[i] + 0x80;
  751. }
  752. break;
  753. case AV_SAMPLE_FMT_S16P:
  754. for (chan = 0; chan < avctx->channels; chan++) {
  755. int16_t *samples = (int16_t *)frame->extended_data[chan];
  756. int32_t *decoded = s->decoded[chan];
  757. for (i = 0; i < s->nb_samples; i++)
  758. samples[i] = decoded[i];
  759. }
  760. break;
  761. case AV_SAMPLE_FMT_S32P:
  762. for (chan = 0; chan < avctx->channels; chan++) {
  763. int32_t *samples = (int32_t *)frame->extended_data[chan];
  764. for (i = 0; i < s->nb_samples; i++)
  765. samples[i] <<= 8;
  766. }
  767. break;
  768. }
  769. *got_frame_ptr = 1;
  770. return pkt->size;
  771. }
  772. #if HAVE_THREADS
  773. static int init_thread_copy(AVCodecContext *avctx)
  774. {
  775. TAKDecContext *s = avctx->priv_data;
  776. s->avctx = avctx;
  777. return 0;
  778. }
  779. static int update_thread_context(AVCodecContext *dst,
  780. const AVCodecContext *src)
  781. {
  782. TAKDecContext *tsrc = src->priv_data;
  783. TAKDecContext *tdst = dst->priv_data;
  784. if (dst == src)
  785. return 0;
  786. memcpy(&tdst->ti, &tsrc->ti, sizeof(TAKStreamInfo));
  787. return 0;
  788. }
  789. #endif
  790. static av_cold int tak_decode_close(AVCodecContext *avctx)
  791. {
  792. TAKDecContext *s = avctx->priv_data;
  793. av_freep(&s->decode_buffer);
  794. return 0;
  795. }
  796. AVCodec ff_tak_decoder = {
  797. .name = "tak",
  798. .long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
  799. .type = AVMEDIA_TYPE_AUDIO,
  800. .id = AV_CODEC_ID_TAK,
  801. .priv_data_size = sizeof(TAKDecContext),
  802. .init = tak_decode_init,
  803. .close = tak_decode_close,
  804. .decode = tak_decode_frame,
  805. .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
  806. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  807. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  808. .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
  809. AV_SAMPLE_FMT_S16P,
  810. AV_SAMPLE_FMT_S32P,
  811. AV_SAMPLE_FMT_NONE },
  812. };