You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1130 lines
37KB

  1. /*
  2. * WavPack lossless audio decoder
  3. * Copyright (c) 2006,2011 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #define BITSTREAM_READER_LE
  22. #include "libavutil/channel_layout.h"
  23. #include "avcodec.h"
  24. #include "get_bits.h"
  25. #include "internal.h"
  26. #include "thread.h"
  27. #include "unary.h"
  28. #include "bytestream.h"
  29. #include "wavpack.h"
  30. /**
  31. * @file
  32. * WavPack lossless audio decoder
  33. */
  34. typedef struct SavedContext {
  35. int offset;
  36. int size;
  37. int bits_used;
  38. uint32_t crc;
  39. } SavedContext;
  40. typedef struct WavpackFrameContext {
  41. AVCodecContext *avctx;
  42. int frame_flags;
  43. int stereo, stereo_in;
  44. int joint;
  45. uint32_t CRC;
  46. GetBitContext gb;
  47. int got_extra_bits;
  48. uint32_t crc_extra_bits;
  49. GetBitContext gb_extra_bits;
  50. int data_size; // in bits
  51. int samples;
  52. int terms;
  53. Decorr decorr[MAX_TERMS];
  54. int zero, one, zeroes;
  55. int extra_bits;
  56. int and, or, shift;
  57. int post_shift;
  58. int hybrid, hybrid_bitrate;
  59. int hybrid_maxclip, hybrid_minclip;
  60. int float_flag;
  61. int float_shift;
  62. int float_max_exp;
  63. WvChannel ch[2];
  64. int pos;
  65. SavedContext sc, extra_sc;
  66. } WavpackFrameContext;
  67. #define WV_MAX_FRAME_DECODERS 14
  68. typedef struct WavpackContext {
  69. AVCodecContext *avctx;
  70. WavpackFrameContext *fdec[WV_MAX_FRAME_DECODERS];
  71. int fdec_num;
  72. int block;
  73. int samples;
  74. int ch_offset;
  75. } WavpackContext;
  76. #define LEVEL_DECAY(a) (((a) + 0x80) >> 8)
  77. static av_always_inline int get_tail(GetBitContext *gb, int k)
  78. {
  79. int p, e, res;
  80. if (k < 1)
  81. return 0;
  82. p = av_log2(k);
  83. e = (1 << (p + 1)) - k - 1;
  84. res = p ? get_bits(gb, p) : 0;
  85. if (res >= e)
  86. res = (res << 1) - e + get_bits1(gb);
  87. return res;
  88. }
  89. static void update_error_limit(WavpackFrameContext *ctx)
  90. {
  91. int i, br[2], sl[2];
  92. for (i = 0; i <= ctx->stereo_in; i++) {
  93. ctx->ch[i].bitrate_acc += ctx->ch[i].bitrate_delta;
  94. br[i] = ctx->ch[i].bitrate_acc >> 16;
  95. sl[i] = LEVEL_DECAY(ctx->ch[i].slow_level);
  96. }
  97. if (ctx->stereo_in && ctx->hybrid_bitrate) {
  98. int balance = (sl[1] - sl[0] + br[1] + 1) >> 1;
  99. if (balance > br[0]) {
  100. br[1] = br[0] << 1;
  101. br[0] = 0;
  102. } else if (-balance > br[0]) {
  103. br[0] <<= 1;
  104. br[1] = 0;
  105. } else {
  106. br[1] = br[0] + balance;
  107. br[0] = br[0] - balance;
  108. }
  109. }
  110. for (i = 0; i <= ctx->stereo_in; i++) {
  111. if (ctx->hybrid_bitrate) {
  112. if (sl[i] - br[i] > -0x100)
  113. ctx->ch[i].error_limit = wp_exp2(sl[i] - br[i] + 0x100);
  114. else
  115. ctx->ch[i].error_limit = 0;
  116. } else {
  117. ctx->ch[i].error_limit = wp_exp2(br[i]);
  118. }
  119. }
  120. }
  121. static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
  122. int channel, int *last)
  123. {
  124. int t, t2;
  125. int sign, base, add, ret;
  126. WvChannel *c = &ctx->ch[channel];
  127. *last = 0;
  128. if ((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) &&
  129. !ctx->zero && !ctx->one) {
  130. if (ctx->zeroes) {
  131. ctx->zeroes--;
  132. if (ctx->zeroes) {
  133. c->slow_level -= LEVEL_DECAY(c->slow_level);
  134. return 0;
  135. }
  136. } else {
  137. t = get_unary_0_33(gb);
  138. if (t >= 2) {
  139. if (get_bits_left(gb) < t - 1)
  140. goto error;
  141. t = get_bits_long(gb, t - 1) | (1 << (t - 1));
  142. } else {
  143. if (get_bits_left(gb) < 0)
  144. goto error;
  145. }
  146. ctx->zeroes = t;
  147. if (ctx->zeroes) {
  148. memset(ctx->ch[0].median, 0, sizeof(ctx->ch[0].median));
  149. memset(ctx->ch[1].median, 0, sizeof(ctx->ch[1].median));
  150. c->slow_level -= LEVEL_DECAY(c->slow_level);
  151. return 0;
  152. }
  153. }
  154. }
  155. if (ctx->zero) {
  156. t = 0;
  157. ctx->zero = 0;
  158. } else {
  159. t = get_unary_0_33(gb);
  160. if (get_bits_left(gb) < 0)
  161. goto error;
  162. if (t == 16) {
  163. t2 = get_unary_0_33(gb);
  164. if (t2 < 2) {
  165. if (get_bits_left(gb) < 0)
  166. goto error;
  167. t += t2;
  168. } else {
  169. if (get_bits_left(gb) < t2 - 1)
  170. goto error;
  171. t += get_bits_long(gb, t2 - 1) | (1 << (t2 - 1));
  172. }
  173. }
  174. if (ctx->one) {
  175. ctx->one = t & 1;
  176. t = (t >> 1) + 1;
  177. } else {
  178. ctx->one = t & 1;
  179. t >>= 1;
  180. }
  181. ctx->zero = !ctx->one;
  182. }
  183. if (ctx->hybrid && !channel)
  184. update_error_limit(ctx);
  185. if (!t) {
  186. base = 0;
  187. add = GET_MED(0) - 1;
  188. DEC_MED(0);
  189. } else if (t == 1) {
  190. base = GET_MED(0);
  191. add = GET_MED(1) - 1;
  192. INC_MED(0);
  193. DEC_MED(1);
  194. } else if (t == 2) {
  195. base = GET_MED(0) + GET_MED(1);
  196. add = GET_MED(2) - 1;
  197. INC_MED(0);
  198. INC_MED(1);
  199. DEC_MED(2);
  200. } else {
  201. base = GET_MED(0) + GET_MED(1) + GET_MED(2) * (t - 2);
  202. add = GET_MED(2) - 1;
  203. INC_MED(0);
  204. INC_MED(1);
  205. INC_MED(2);
  206. }
  207. if (!c->error_limit) {
  208. if (add >= 0x2000000U) {
  209. av_log(ctx->avctx, AV_LOG_ERROR, "k %d is too large\n", add);
  210. goto error;
  211. }
  212. ret = base + get_tail(gb, add);
  213. if (get_bits_left(gb) <= 0)
  214. goto error;
  215. } else {
  216. int mid = (base * 2 + add + 1) >> 1;
  217. while (add > c->error_limit) {
  218. if (get_bits_left(gb) <= 0)
  219. goto error;
  220. if (get_bits1(gb)) {
  221. add -= (mid - base);
  222. base = mid;
  223. } else
  224. add = mid - base - 1;
  225. mid = (base * 2 + add + 1) >> 1;
  226. }
  227. ret = mid;
  228. }
  229. sign = get_bits1(gb);
  230. if (ctx->hybrid_bitrate)
  231. c->slow_level += wp_log2(ret) - LEVEL_DECAY(c->slow_level);
  232. return sign ? ~ret : ret;
  233. error:
  234. ret = get_bits_left(gb);
  235. if (ret <= 0) {
  236. av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
  237. }
  238. *last = 1;
  239. return 0;
  240. }
  241. static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc,
  242. int S)
  243. {
  244. int bit;
  245. if (s->extra_bits) {
  246. S <<= s->extra_bits;
  247. if (s->got_extra_bits &&
  248. get_bits_left(&s->gb_extra_bits) >= s->extra_bits) {
  249. S |= get_bits_long(&s->gb_extra_bits, s->extra_bits);
  250. *crc = *crc * 9 + (S & 0xffff) * 3 + ((unsigned)S >> 16);
  251. }
  252. }
  253. bit = (S & s->and) | s->or;
  254. bit = ((S + bit) << s->shift) - bit;
  255. if (s->hybrid)
  256. bit = av_clip(bit, s->hybrid_minclip, s->hybrid_maxclip);
  257. return bit << s->post_shift;
  258. }
  259. static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
  260. {
  261. union {
  262. float f;
  263. uint32_t u;
  264. } value;
  265. unsigned int sign;
  266. int exp = s->float_max_exp;
  267. if (s->got_extra_bits) {
  268. const int max_bits = 1 + 23 + 8 + 1;
  269. const int left_bits = get_bits_left(&s->gb_extra_bits);
  270. if (left_bits + 8 * AV_INPUT_BUFFER_PADDING_SIZE < max_bits)
  271. return 0.0;
  272. }
  273. if (S) {
  274. S <<= s->float_shift;
  275. sign = S < 0;
  276. if (sign)
  277. S = -S;
  278. if (S >= 0x1000000) {
  279. if (s->got_extra_bits && get_bits1(&s->gb_extra_bits))
  280. S = get_bits(&s->gb_extra_bits, 23);
  281. else
  282. S = 0;
  283. exp = 255;
  284. } else if (exp) {
  285. int shift = 23 - av_log2(S);
  286. exp = s->float_max_exp;
  287. if (exp <= shift)
  288. shift = --exp;
  289. exp -= shift;
  290. if (shift) {
  291. S <<= shift;
  292. if ((s->float_flag & WV_FLT_SHIFT_ONES) ||
  293. (s->got_extra_bits &&
  294. (s->float_flag & WV_FLT_SHIFT_SAME) &&
  295. get_bits1(&s->gb_extra_bits))) {
  296. S |= (1 << shift) - 1;
  297. } else if (s->got_extra_bits &&
  298. (s->float_flag & WV_FLT_SHIFT_SENT)) {
  299. S |= get_bits(&s->gb_extra_bits, shift);
  300. }
  301. }
  302. } else {
  303. exp = s->float_max_exp;
  304. }
  305. S &= 0x7fffff;
  306. } else {
  307. sign = 0;
  308. exp = 0;
  309. if (s->got_extra_bits && (s->float_flag & WV_FLT_ZERO_SENT)) {
  310. if (get_bits1(&s->gb_extra_bits)) {
  311. S = get_bits(&s->gb_extra_bits, 23);
  312. if (s->float_max_exp >= 25)
  313. exp = get_bits(&s->gb_extra_bits, 8);
  314. sign = get_bits1(&s->gb_extra_bits);
  315. } else {
  316. if (s->float_flag & WV_FLT_ZERO_SIGN)
  317. sign = get_bits1(&s->gb_extra_bits);
  318. }
  319. }
  320. }
  321. *crc = *crc * 27 + S * 9 + exp * 3 + sign;
  322. value.u = (sign << 31) | (exp << 23) | S;
  323. return value.f;
  324. }
  325. static void wv_reset_saved_context(WavpackFrameContext *s)
  326. {
  327. s->pos = 0;
  328. s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
  329. }
  330. static inline int wv_check_crc(WavpackFrameContext *s, uint32_t crc,
  331. uint32_t crc_extra_bits)
  332. {
  333. if (crc != s->CRC) {
  334. av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
  335. return AVERROR_INVALIDDATA;
  336. }
  337. if (s->got_extra_bits && crc_extra_bits != s->crc_extra_bits) {
  338. av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
  339. return AVERROR_INVALIDDATA;
  340. }
  341. return 0;
  342. }
  343. static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb,
  344. void *dst_l, void *dst_r, const int type)
  345. {
  346. int i, j, count = 0;
  347. int last, t;
  348. int A, B, L, L2, R, R2;
  349. int pos = s->pos;
  350. uint32_t crc = s->sc.crc;
  351. uint32_t crc_extra_bits = s->extra_sc.crc;
  352. int16_t *dst16_l = dst_l;
  353. int16_t *dst16_r = dst_r;
  354. int32_t *dst32_l = dst_l;
  355. int32_t *dst32_r = dst_r;
  356. float *dstfl_l = dst_l;
  357. float *dstfl_r = dst_r;
  358. s->one = s->zero = s->zeroes = 0;
  359. do {
  360. L = wv_get_value(s, gb, 0, &last);
  361. if (last)
  362. break;
  363. R = wv_get_value(s, gb, 1, &last);
  364. if (last)
  365. break;
  366. for (i = 0; i < s->terms; i++) {
  367. t = s->decorr[i].value;
  368. if (t > 0) {
  369. if (t > 8) {
  370. if (t & 1) {
  371. A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
  372. B = 2 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1];
  373. } else {
  374. A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
  375. B = (3 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1;
  376. }
  377. s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
  378. s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0];
  379. j = 0;
  380. } else {
  381. A = s->decorr[i].samplesA[pos];
  382. B = s->decorr[i].samplesB[pos];
  383. j = (pos + t) & 7;
  384. }
  385. if (type != AV_SAMPLE_FMT_S16P) {
  386. L2 = L + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
  387. R2 = R + ((s->decorr[i].weightB * (int64_t)B + 512) >> 10);
  388. } else {
  389. L2 = L + ((s->decorr[i].weightA * A + 512) >> 10);
  390. R2 = R + ((s->decorr[i].weightB * B + 512) >> 10);
  391. }
  392. if (A && L)
  393. s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
  394. if (B && R)
  395. s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta;
  396. s->decorr[i].samplesA[j] = L = L2;
  397. s->decorr[i].samplesB[j] = R = R2;
  398. } else if (t == -1) {
  399. if (type != AV_SAMPLE_FMT_S16P)
  400. L2 = L + ((s->decorr[i].weightA * (int64_t)s->decorr[i].samplesA[0] + 512) >> 10);
  401. else
  402. L2 = L + ((s->decorr[i].weightA * s->decorr[i].samplesA[0] + 512) >> 10);
  403. UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L);
  404. L = L2;
  405. if (type != AV_SAMPLE_FMT_S16P)
  406. R2 = R + ((s->decorr[i].weightB * (int64_t)L2 + 512) >> 10);
  407. else
  408. R2 = R + ((s->decorr[i].weightB * L2 + 512) >> 10);
  409. UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R);
  410. R = R2;
  411. s->decorr[i].samplesA[0] = R;
  412. } else {
  413. if (type != AV_SAMPLE_FMT_S16P)
  414. R2 = R + ((s->decorr[i].weightB * (int64_t)s->decorr[i].samplesB[0] + 512) >> 10);
  415. else
  416. R2 = R + ((s->decorr[i].weightB * s->decorr[i].samplesB[0] + 512) >> 10);
  417. UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R);
  418. R = R2;
  419. if (t == -3) {
  420. R2 = s->decorr[i].samplesA[0];
  421. s->decorr[i].samplesA[0] = R;
  422. }
  423. if (type != AV_SAMPLE_FMT_S16P)
  424. L2 = L + ((s->decorr[i].weightA * (int64_t)R2 + 512) >> 10);
  425. else
  426. L2 = L + ((s->decorr[i].weightA * R2 + 512) >> 10);
  427. UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, R2, L);
  428. L = L2;
  429. s->decorr[i].samplesB[0] = L;
  430. }
  431. }
  432. if (type == AV_SAMPLE_FMT_S16P) {
  433. if (FFABS(L) + FFABS(R) > (1<<19)) {
  434. av_log(s->avctx, AV_LOG_ERROR, "sample %d %d too large\n", L, R);
  435. return AVERROR_INVALIDDATA;
  436. }
  437. }
  438. pos = (pos + 1) & 7;
  439. if (s->joint)
  440. L += (R -= (L >> 1));
  441. crc = (crc * 3 + L) * 3 + R;
  442. if (type == AV_SAMPLE_FMT_FLTP) {
  443. *dstfl_l++ = wv_get_value_float(s, &crc_extra_bits, L);
  444. *dstfl_r++ = wv_get_value_float(s, &crc_extra_bits, R);
  445. } else if (type == AV_SAMPLE_FMT_S32P) {
  446. *dst32_l++ = wv_get_value_integer(s, &crc_extra_bits, L);
  447. *dst32_r++ = wv_get_value_integer(s, &crc_extra_bits, R);
  448. } else {
  449. *dst16_l++ = wv_get_value_integer(s, &crc_extra_bits, L);
  450. *dst16_r++ = wv_get_value_integer(s, &crc_extra_bits, R);
  451. }
  452. count++;
  453. } while (!last && count < s->samples);
  454. wv_reset_saved_context(s);
  455. if (last && count < s->samples) {
  456. int size = av_get_bytes_per_sample(type);
  457. memset((uint8_t*)dst_l + count*size, 0, (s->samples-count)*size);
  458. memset((uint8_t*)dst_r + count*size, 0, (s->samples-count)*size);
  459. }
  460. if ((s->avctx->err_recognition & AV_EF_CRCCHECK) &&
  461. wv_check_crc(s, crc, crc_extra_bits))
  462. return AVERROR_INVALIDDATA;
  463. return 0;
  464. }
  465. static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb,
  466. void *dst, const int type)
  467. {
  468. int i, j, count = 0;
  469. int last, t;
  470. int A, S, T;
  471. int pos = s->pos;
  472. uint32_t crc = s->sc.crc;
  473. uint32_t crc_extra_bits = s->extra_sc.crc;
  474. int16_t *dst16 = dst;
  475. int32_t *dst32 = dst;
  476. float *dstfl = dst;
  477. s->one = s->zero = s->zeroes = 0;
  478. do {
  479. T = wv_get_value(s, gb, 0, &last);
  480. S = 0;
  481. if (last)
  482. break;
  483. for (i = 0; i < s->terms; i++) {
  484. t = s->decorr[i].value;
  485. if (t > 8) {
  486. if (t & 1)
  487. A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
  488. else
  489. A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
  490. s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
  491. j = 0;
  492. } else {
  493. A = s->decorr[i].samplesA[pos];
  494. j = (pos + t) & 7;
  495. }
  496. if (type != AV_SAMPLE_FMT_S16P)
  497. S = T + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
  498. else
  499. S = T + ((s->decorr[i].weightA * A + 512) >> 10);
  500. if (A && T)
  501. s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
  502. s->decorr[i].samplesA[j] = T = S;
  503. }
  504. pos = (pos + 1) & 7;
  505. crc = crc * 3 + S;
  506. if (type == AV_SAMPLE_FMT_FLTP) {
  507. *dstfl++ = wv_get_value_float(s, &crc_extra_bits, S);
  508. } else if (type == AV_SAMPLE_FMT_S32P) {
  509. *dst32++ = wv_get_value_integer(s, &crc_extra_bits, S);
  510. } else {
  511. *dst16++ = wv_get_value_integer(s, &crc_extra_bits, S);
  512. }
  513. count++;
  514. } while (!last && count < s->samples);
  515. wv_reset_saved_context(s);
  516. if (last && count < s->samples) {
  517. int size = av_get_bytes_per_sample(type);
  518. memset((uint8_t*)dst + count*size, 0, (s->samples-count)*size);
  519. }
  520. if (s->avctx->err_recognition & AV_EF_CRCCHECK) {
  521. int ret = wv_check_crc(s, crc, crc_extra_bits);
  522. if (ret < 0 && s->avctx->err_recognition & AV_EF_EXPLODE)
  523. return ret;
  524. }
  525. return 0;
  526. }
  527. static av_cold int wv_alloc_frame_context(WavpackContext *c)
  528. {
  529. if (c->fdec_num == WV_MAX_FRAME_DECODERS)
  530. return -1;
  531. c->fdec[c->fdec_num] = av_mallocz(sizeof(**c->fdec));
  532. if (!c->fdec[c->fdec_num])
  533. return -1;
  534. c->fdec_num++;
  535. c->fdec[c->fdec_num - 1]->avctx = c->avctx;
  536. wv_reset_saved_context(c->fdec[c->fdec_num - 1]);
  537. return 0;
  538. }
  539. static int init_thread_copy(AVCodecContext *avctx)
  540. {
  541. WavpackContext *s = avctx->priv_data;
  542. s->avctx = avctx;
  543. return 0;
  544. }
  545. static av_cold int wavpack_decode_init(AVCodecContext *avctx)
  546. {
  547. WavpackContext *s = avctx->priv_data;
  548. s->avctx = avctx;
  549. s->fdec_num = 0;
  550. return 0;
  551. }
  552. static av_cold int wavpack_decode_end(AVCodecContext *avctx)
  553. {
  554. WavpackContext *s = avctx->priv_data;
  555. int i;
  556. for (i = 0; i < s->fdec_num; i++)
  557. av_freep(&s->fdec[i]);
  558. s->fdec_num = 0;
  559. return 0;
  560. }
  561. static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
  562. AVFrame *frame, const uint8_t *buf, int buf_size)
  563. {
  564. WavpackContext *wc = avctx->priv_data;
  565. ThreadFrame tframe = { .f = frame };
  566. WavpackFrameContext *s;
  567. GetByteContext gb;
  568. void *samples_l = NULL, *samples_r = NULL;
  569. int ret;
  570. int got_terms = 0, got_weights = 0, got_samples = 0,
  571. got_entropy = 0, got_bs = 0, got_float = 0, got_hybrid = 0;
  572. int i, j, id, size, ssize, weights, t;
  573. int bpp, chan = 0, chmask = 0, orig_bpp, sample_rate = 0;
  574. int multiblock;
  575. if (block_no >= wc->fdec_num && wv_alloc_frame_context(wc) < 0) {
  576. av_log(avctx, AV_LOG_ERROR, "Error creating frame decode context\n");
  577. return AVERROR_INVALIDDATA;
  578. }
  579. s = wc->fdec[block_no];
  580. if (!s) {
  581. av_log(avctx, AV_LOG_ERROR, "Context for block %d is not present\n",
  582. block_no);
  583. return AVERROR_INVALIDDATA;
  584. }
  585. memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
  586. memset(s->ch, 0, sizeof(s->ch));
  587. s->extra_bits = 0;
  588. s->and = s->or = s->shift = 0;
  589. s->got_extra_bits = 0;
  590. bytestream2_init(&gb, buf, buf_size);
  591. s->samples = bytestream2_get_le32(&gb);
  592. if (s->samples != wc->samples) {
  593. av_log(avctx, AV_LOG_ERROR, "Mismatching number of samples in "
  594. "a sequence: %d and %d\n", wc->samples, s->samples);
  595. return AVERROR_INVALIDDATA;
  596. }
  597. s->frame_flags = bytestream2_get_le32(&gb);
  598. bpp = av_get_bytes_per_sample(avctx->sample_fmt);
  599. orig_bpp = ((s->frame_flags & 0x03) + 1) << 3;
  600. multiblock = (s->frame_flags & WV_SINGLE_BLOCK) != WV_SINGLE_BLOCK;
  601. s->stereo = !(s->frame_flags & WV_MONO);
  602. s->stereo_in = (s->frame_flags & WV_FALSE_STEREO) ? 0 : s->stereo;
  603. s->joint = s->frame_flags & WV_JOINT_STEREO;
  604. s->hybrid = s->frame_flags & WV_HYBRID_MODE;
  605. s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
  606. s->post_shift = bpp * 8 - orig_bpp + ((s->frame_flags >> 13) & 0x1f);
  607. s->hybrid_maxclip = ((1LL << (orig_bpp - 1)) - 1);
  608. s->hybrid_minclip = ((-1LL << (orig_bpp - 1)));
  609. s->CRC = bytestream2_get_le32(&gb);
  610. // parse metadata blocks
  611. while (bytestream2_get_bytes_left(&gb)) {
  612. id = bytestream2_get_byte(&gb);
  613. size = bytestream2_get_byte(&gb);
  614. if (id & WP_IDF_LONG) {
  615. size |= (bytestream2_get_byte(&gb)) << 8;
  616. size |= (bytestream2_get_byte(&gb)) << 16;
  617. }
  618. size <<= 1; // size is specified in words
  619. ssize = size;
  620. if (id & WP_IDF_ODD)
  621. size--;
  622. if (size < 0) {
  623. av_log(avctx, AV_LOG_ERROR,
  624. "Got incorrect block %02X with size %i\n", id, size);
  625. break;
  626. }
  627. if (bytestream2_get_bytes_left(&gb) < ssize) {
  628. av_log(avctx, AV_LOG_ERROR,
  629. "Block size %i is out of bounds\n", size);
  630. break;
  631. }
  632. switch (id & WP_IDF_MASK) {
  633. case WP_ID_DECTERMS:
  634. if (size > MAX_TERMS) {
  635. av_log(avctx, AV_LOG_ERROR, "Too many decorrelation terms\n");
  636. s->terms = 0;
  637. bytestream2_skip(&gb, ssize);
  638. continue;
  639. }
  640. s->terms = size;
  641. for (i = 0; i < s->terms; i++) {
  642. uint8_t val = bytestream2_get_byte(&gb);
  643. s->decorr[s->terms - i - 1].value = (val & 0x1F) - 5;
  644. s->decorr[s->terms - i - 1].delta = val >> 5;
  645. }
  646. got_terms = 1;
  647. break;
  648. case WP_ID_DECWEIGHTS:
  649. if (!got_terms) {
  650. av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
  651. continue;
  652. }
  653. weights = size >> s->stereo_in;
  654. if (weights > MAX_TERMS || weights > s->terms) {
  655. av_log(avctx, AV_LOG_ERROR, "Too many decorrelation weights\n");
  656. bytestream2_skip(&gb, ssize);
  657. continue;
  658. }
  659. for (i = 0; i < weights; i++) {
  660. t = (int8_t)bytestream2_get_byte(&gb);
  661. s->decorr[s->terms - i - 1].weightA = t << 3;
  662. if (s->decorr[s->terms - i - 1].weightA > 0)
  663. s->decorr[s->terms - i - 1].weightA +=
  664. (s->decorr[s->terms - i - 1].weightA + 64) >> 7;
  665. if (s->stereo_in) {
  666. t = (int8_t)bytestream2_get_byte(&gb);
  667. s->decorr[s->terms - i - 1].weightB = t << 3;
  668. if (s->decorr[s->terms - i - 1].weightB > 0)
  669. s->decorr[s->terms - i - 1].weightB +=
  670. (s->decorr[s->terms - i - 1].weightB + 64) >> 7;
  671. }
  672. }
  673. got_weights = 1;
  674. break;
  675. case WP_ID_DECSAMPLES:
  676. if (!got_terms) {
  677. av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
  678. continue;
  679. }
  680. t = 0;
  681. for (i = s->terms - 1; (i >= 0) && (t < size); i--) {
  682. if (s->decorr[i].value > 8) {
  683. s->decorr[i].samplesA[0] =
  684. wp_exp2(bytestream2_get_le16(&gb));
  685. s->decorr[i].samplesA[1] =
  686. wp_exp2(bytestream2_get_le16(&gb));
  687. if (s->stereo_in) {
  688. s->decorr[i].samplesB[0] =
  689. wp_exp2(bytestream2_get_le16(&gb));
  690. s->decorr[i].samplesB[1] =
  691. wp_exp2(bytestream2_get_le16(&gb));
  692. t += 4;
  693. }
  694. t += 4;
  695. } else if (s->decorr[i].value < 0) {
  696. s->decorr[i].samplesA[0] =
  697. wp_exp2(bytestream2_get_le16(&gb));
  698. s->decorr[i].samplesB[0] =
  699. wp_exp2(bytestream2_get_le16(&gb));
  700. t += 4;
  701. } else {
  702. for (j = 0; j < s->decorr[i].value; j++) {
  703. s->decorr[i].samplesA[j] =
  704. wp_exp2(bytestream2_get_le16(&gb));
  705. if (s->stereo_in) {
  706. s->decorr[i].samplesB[j] =
  707. wp_exp2(bytestream2_get_le16(&gb));
  708. }
  709. }
  710. t += s->decorr[i].value * 2 * (s->stereo_in + 1);
  711. }
  712. }
  713. got_samples = 1;
  714. break;
  715. case WP_ID_ENTROPY:
  716. if (size != 6 * (s->stereo_in + 1)) {
  717. av_log(avctx, AV_LOG_ERROR,
  718. "Entropy vars size should be %i, got %i.\n",
  719. 6 * (s->stereo_in + 1), size);
  720. bytestream2_skip(&gb, ssize);
  721. continue;
  722. }
  723. for (j = 0; j <= s->stereo_in; j++)
  724. for (i = 0; i < 3; i++) {
  725. s->ch[j].median[i] = wp_exp2(bytestream2_get_le16(&gb));
  726. }
  727. got_entropy = 1;
  728. break;
  729. case WP_ID_HYBRID:
  730. if (s->hybrid_bitrate) {
  731. for (i = 0; i <= s->stereo_in; i++) {
  732. s->ch[i].slow_level = wp_exp2(bytestream2_get_le16(&gb));
  733. size -= 2;
  734. }
  735. }
  736. for (i = 0; i < (s->stereo_in + 1); i++) {
  737. s->ch[i].bitrate_acc = bytestream2_get_le16(&gb) << 16;
  738. size -= 2;
  739. }
  740. if (size > 0) {
  741. for (i = 0; i < (s->stereo_in + 1); i++) {
  742. s->ch[i].bitrate_delta =
  743. wp_exp2((int16_t)bytestream2_get_le16(&gb));
  744. }
  745. } else {
  746. for (i = 0; i < (s->stereo_in + 1); i++)
  747. s->ch[i].bitrate_delta = 0;
  748. }
  749. got_hybrid = 1;
  750. break;
  751. case WP_ID_INT32INFO: {
  752. uint8_t val[4];
  753. if (size != 4) {
  754. av_log(avctx, AV_LOG_ERROR,
  755. "Invalid INT32INFO, size = %i\n",
  756. size);
  757. bytestream2_skip(&gb, ssize - 4);
  758. continue;
  759. }
  760. bytestream2_get_buffer(&gb, val, 4);
  761. if (val[0] > 32) {
  762. av_log(avctx, AV_LOG_ERROR,
  763. "Invalid INT32INFO, extra_bits = %d (> 32)\n", val[0]);
  764. continue;
  765. } else if (val[0]) {
  766. s->extra_bits = val[0];
  767. } else if (val[1]) {
  768. s->shift = val[1];
  769. } else if (val[2]) {
  770. s->and = s->or = 1;
  771. s->shift = val[2];
  772. } else if (val[3]) {
  773. s->and = 1;
  774. s->shift = val[3];
  775. }
  776. /* original WavPack decoder forces 32-bit lossy sound to be treated
  777. * as 24-bit one in order to have proper clipping */
  778. if (s->hybrid && bpp == 4 && s->post_shift < 8 && s->shift > 8) {
  779. s->post_shift += 8;
  780. s->shift -= 8;
  781. s->hybrid_maxclip >>= 8;
  782. s->hybrid_minclip >>= 8;
  783. }
  784. break;
  785. }
  786. case WP_ID_FLOATINFO:
  787. if (size != 4) {
  788. av_log(avctx, AV_LOG_ERROR,
  789. "Invalid FLOATINFO, size = %i\n", size);
  790. bytestream2_skip(&gb, ssize);
  791. continue;
  792. }
  793. s->float_flag = bytestream2_get_byte(&gb);
  794. s->float_shift = bytestream2_get_byte(&gb);
  795. s->float_max_exp = bytestream2_get_byte(&gb);
  796. got_float = 1;
  797. bytestream2_skip(&gb, 1);
  798. break;
  799. case WP_ID_DATA:
  800. s->sc.offset = bytestream2_tell(&gb);
  801. s->sc.size = size * 8;
  802. if ((ret = init_get_bits8(&s->gb, gb.buffer, size)) < 0)
  803. return ret;
  804. s->data_size = size * 8;
  805. bytestream2_skip(&gb, size);
  806. got_bs = 1;
  807. break;
  808. case WP_ID_EXTRABITS:
  809. if (size <= 4) {
  810. av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n",
  811. size);
  812. bytestream2_skip(&gb, size);
  813. continue;
  814. }
  815. s->extra_sc.offset = bytestream2_tell(&gb);
  816. s->extra_sc.size = size * 8;
  817. if ((ret = init_get_bits8(&s->gb_extra_bits, gb.buffer, size)) < 0)
  818. return ret;
  819. s->crc_extra_bits = get_bits_long(&s->gb_extra_bits, 32);
  820. bytestream2_skip(&gb, size);
  821. s->got_extra_bits = 1;
  822. break;
  823. case WP_ID_CHANINFO:
  824. if (size <= 1) {
  825. av_log(avctx, AV_LOG_ERROR,
  826. "Insufficient channel information\n");
  827. return AVERROR_INVALIDDATA;
  828. }
  829. chan = bytestream2_get_byte(&gb);
  830. switch (size - 2) {
  831. case 0:
  832. chmask = bytestream2_get_byte(&gb);
  833. break;
  834. case 1:
  835. chmask = bytestream2_get_le16(&gb);
  836. break;
  837. case 2:
  838. chmask = bytestream2_get_le24(&gb);
  839. break;
  840. case 3:
  841. chmask = bytestream2_get_le32(&gb);
  842. break;
  843. case 5:
  844. size = bytestream2_get_byte(&gb);
  845. if (avctx->channels != size)
  846. av_log(avctx, AV_LOG_WARNING, "%i channels signalled"
  847. " instead of %i.\n", size, avctx->channels);
  848. chan |= (bytestream2_get_byte(&gb) & 0xF) << 8;
  849. chmask = bytestream2_get_le16(&gb);
  850. break;
  851. default:
  852. av_log(avctx, AV_LOG_ERROR, "Invalid channel info size %d\n",
  853. size);
  854. chan = avctx->channels;
  855. chmask = avctx->channel_layout;
  856. }
  857. break;
  858. case WP_ID_SAMPLE_RATE:
  859. if (size != 3) {
  860. av_log(avctx, AV_LOG_ERROR, "Invalid custom sample rate.\n");
  861. return AVERROR_INVALIDDATA;
  862. }
  863. sample_rate = bytestream2_get_le24(&gb);
  864. break;
  865. default:
  866. bytestream2_skip(&gb, size);
  867. }
  868. if (id & WP_IDF_ODD)
  869. bytestream2_skip(&gb, 1);
  870. }
  871. if (!got_terms) {
  872. av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
  873. return AVERROR_INVALIDDATA;
  874. }
  875. if (!got_weights) {
  876. av_log(avctx, AV_LOG_ERROR, "No block with decorrelation weights\n");
  877. return AVERROR_INVALIDDATA;
  878. }
  879. if (!got_samples) {
  880. av_log(avctx, AV_LOG_ERROR, "No block with decorrelation samples\n");
  881. return AVERROR_INVALIDDATA;
  882. }
  883. if (!got_entropy) {
  884. av_log(avctx, AV_LOG_ERROR, "No block with entropy info\n");
  885. return AVERROR_INVALIDDATA;
  886. }
  887. if (s->hybrid && !got_hybrid) {
  888. av_log(avctx, AV_LOG_ERROR, "Hybrid config not found\n");
  889. return AVERROR_INVALIDDATA;
  890. }
  891. if (!got_bs) {
  892. av_log(avctx, AV_LOG_ERROR, "Packed samples not found\n");
  893. return AVERROR_INVALIDDATA;
  894. }
  895. if (!got_float && avctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
  896. av_log(avctx, AV_LOG_ERROR, "Float information not found\n");
  897. return AVERROR_INVALIDDATA;
  898. }
  899. if (s->got_extra_bits && avctx->sample_fmt != AV_SAMPLE_FMT_FLTP) {
  900. const int size = get_bits_left(&s->gb_extra_bits);
  901. const int wanted = s->samples * s->extra_bits << s->stereo_in;
  902. if (size < wanted) {
  903. av_log(avctx, AV_LOG_ERROR, "Too small EXTRABITS\n");
  904. s->got_extra_bits = 0;
  905. }
  906. }
  907. if (!wc->ch_offset) {
  908. int sr = (s->frame_flags >> 23) & 0xf;
  909. if (sr == 0xf) {
  910. if (!sample_rate) {
  911. av_log(avctx, AV_LOG_ERROR, "Custom sample rate missing.\n");
  912. return AVERROR_INVALIDDATA;
  913. }
  914. avctx->sample_rate = sample_rate;
  915. } else
  916. avctx->sample_rate = wv_rates[sr];
  917. if (multiblock) {
  918. if (chan)
  919. avctx->channels = chan;
  920. if (chmask)
  921. avctx->channel_layout = chmask;
  922. } else {
  923. avctx->channels = s->stereo ? 2 : 1;
  924. avctx->channel_layout = s->stereo ? AV_CH_LAYOUT_STEREO :
  925. AV_CH_LAYOUT_MONO;
  926. }
  927. /* get output buffer */
  928. frame->nb_samples = s->samples + 1;
  929. if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
  930. return ret;
  931. frame->nb_samples = s->samples;
  932. }
  933. if (wc->ch_offset + s->stereo >= avctx->channels) {
  934. av_log(avctx, AV_LOG_WARNING, "Too many channels coded in a packet.\n");
  935. return (avctx->err_recognition & AV_EF_EXPLODE) ? AVERROR_INVALIDDATA : 0;
  936. }
  937. samples_l = frame->extended_data[wc->ch_offset];
  938. if (s->stereo)
  939. samples_r = frame->extended_data[wc->ch_offset + 1];
  940. wc->ch_offset += 1 + s->stereo;
  941. if (s->stereo_in) {
  942. ret = wv_unpack_stereo(s, &s->gb, samples_l, samples_r, avctx->sample_fmt);
  943. if (ret < 0)
  944. return ret;
  945. } else {
  946. ret = wv_unpack_mono(s, &s->gb, samples_l, avctx->sample_fmt);
  947. if (ret < 0)
  948. return ret;
  949. if (s->stereo)
  950. memcpy(samples_r, samples_l, bpp * s->samples);
  951. }
  952. return 0;
  953. }
  954. static void wavpack_decode_flush(AVCodecContext *avctx)
  955. {
  956. WavpackContext *s = avctx->priv_data;
  957. int i;
  958. for (i = 0; i < s->fdec_num; i++)
  959. wv_reset_saved_context(s->fdec[i]);
  960. }
  961. static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
  962. int *got_frame_ptr, AVPacket *avpkt)
  963. {
  964. WavpackContext *s = avctx->priv_data;
  965. const uint8_t *buf = avpkt->data;
  966. int buf_size = avpkt->size;
  967. AVFrame *frame = data;
  968. int frame_size, ret, frame_flags;
  969. if (avpkt->size <= WV_HEADER_SIZE)
  970. return AVERROR_INVALIDDATA;
  971. s->block = 0;
  972. s->ch_offset = 0;
  973. /* determine number of samples */
  974. s->samples = AV_RL32(buf + 20);
  975. frame_flags = AV_RL32(buf + 24);
  976. if (s->samples <= 0 || s->samples > WV_MAX_SAMPLES) {
  977. av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
  978. s->samples);
  979. return AVERROR_INVALIDDATA;
  980. }
  981. if (frame_flags & 0x80) {
  982. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  983. } else if ((frame_flags & 0x03) <= 1) {
  984. avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
  985. } else {
  986. avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
  987. avctx->bits_per_raw_sample = ((frame_flags & 0x03) + 1) << 3;
  988. }
  989. while (buf_size > 0) {
  990. if (buf_size <= WV_HEADER_SIZE)
  991. break;
  992. frame_size = AV_RL32(buf + 4) - 12;
  993. buf += 20;
  994. buf_size -= 20;
  995. if (frame_size <= 0 || frame_size > buf_size) {
  996. av_log(avctx, AV_LOG_ERROR,
  997. "Block %d has invalid size (size %d vs. %d bytes left)\n",
  998. s->block, frame_size, buf_size);
  999. wavpack_decode_flush(avctx);
  1000. return AVERROR_INVALIDDATA;
  1001. }
  1002. if ((ret = wavpack_decode_block(avctx, s->block,
  1003. frame, buf, frame_size)) < 0) {
  1004. wavpack_decode_flush(avctx);
  1005. return ret;
  1006. }
  1007. s->block++;
  1008. buf += frame_size;
  1009. buf_size -= frame_size;
  1010. }
  1011. if (s->ch_offset != avctx->channels) {
  1012. av_log(avctx, AV_LOG_ERROR, "Not enough channels coded in a packet.\n");
  1013. return AVERROR_INVALIDDATA;
  1014. }
  1015. *got_frame_ptr = 1;
  1016. return avpkt->size;
  1017. }
  1018. AVCodec ff_wavpack_decoder = {
  1019. .name = "wavpack",
  1020. .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
  1021. .type = AVMEDIA_TYPE_AUDIO,
  1022. .id = AV_CODEC_ID_WAVPACK,
  1023. .priv_data_size = sizeof(WavpackContext),
  1024. .init = wavpack_decode_init,
  1025. .close = wavpack_decode_end,
  1026. .decode = wavpack_decode_frame,
  1027. .flush = wavpack_decode_flush,
  1028. .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
  1029. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  1030. };