You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2189 lines
78KB

  1. /*
  2. * MPEG-4 ALS decoder
  3. * Copyright (c) 2009 Thilo Borgmann <thilo.borgmann _at_ mail.de>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG-4 ALS decoder
  24. * @author Thilo Borgmann <thilo.borgmann _at_ mail.de>
  25. */
  26. #include <inttypes.h>
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "unary.h"
  30. #include "mpeg4audio.h"
  31. #include "bgmc.h"
  32. #include "bswapdsp.h"
  33. #include "internal.h"
  34. #include "mlz.h"
  35. #include "libavutil/samplefmt.h"
  36. #include "libavutil/crc.h"
  37. #include "libavutil/softfloat_ieee754.h"
  38. #include "libavutil/intfloat.h"
  39. #include "libavutil/intreadwrite.h"
  40. #include <stdint.h>
  41. /** Rice parameters and corresponding index offsets for decoding the
  42. * indices of scaled PARCOR values. The table chosen is set globally
  43. * by the encoder and stored in ALSSpecificConfig.
  44. */
  45. static const int8_t parcor_rice_table[3][20][2] = {
  46. { {-52, 4}, {-29, 5}, {-31, 4}, { 19, 4}, {-16, 4},
  47. { 12, 3}, { -7, 3}, { 9, 3}, { -5, 3}, { 6, 3},
  48. { -4, 3}, { 3, 3}, { -3, 2}, { 3, 2}, { -2, 2},
  49. { 3, 2}, { -1, 2}, { 2, 2}, { -1, 2}, { 2, 2} },
  50. { {-58, 3}, {-42, 4}, {-46, 4}, { 37, 5}, {-36, 4},
  51. { 29, 4}, {-29, 4}, { 25, 4}, {-23, 4}, { 20, 4},
  52. {-17, 4}, { 16, 4}, {-12, 4}, { 12, 3}, {-10, 4},
  53. { 7, 3}, { -4, 4}, { 3, 3}, { -1, 3}, { 1, 3} },
  54. { {-59, 3}, {-45, 5}, {-50, 4}, { 38, 4}, {-39, 4},
  55. { 32, 4}, {-30, 4}, { 25, 3}, {-23, 3}, { 20, 3},
  56. {-20, 3}, { 16, 3}, {-13, 3}, { 10, 3}, { -7, 3},
  57. { 3, 3}, { 0, 3}, { -1, 3}, { 2, 3}, { -1, 2} }
  58. };
  59. /** Scaled PARCOR values used for the first two PARCOR coefficients.
  60. * To be indexed by the Rice coded indices.
  61. * Generated by: parcor_scaled_values[i] = 32 + ((i * (i+1)) << 7) - (1 << 20)
  62. * Actual values are divided by 32 in order to be stored in 16 bits.
  63. */
  64. static const int16_t parcor_scaled_values[] = {
  65. -1048544 / 32, -1048288 / 32, -1047776 / 32, -1047008 / 32,
  66. -1045984 / 32, -1044704 / 32, -1043168 / 32, -1041376 / 32,
  67. -1039328 / 32, -1037024 / 32, -1034464 / 32, -1031648 / 32,
  68. -1028576 / 32, -1025248 / 32, -1021664 / 32, -1017824 / 32,
  69. -1013728 / 32, -1009376 / 32, -1004768 / 32, -999904 / 32,
  70. -994784 / 32, -989408 / 32, -983776 / 32, -977888 / 32,
  71. -971744 / 32, -965344 / 32, -958688 / 32, -951776 / 32,
  72. -944608 / 32, -937184 / 32, -929504 / 32, -921568 / 32,
  73. -913376 / 32, -904928 / 32, -896224 / 32, -887264 / 32,
  74. -878048 / 32, -868576 / 32, -858848 / 32, -848864 / 32,
  75. -838624 / 32, -828128 / 32, -817376 / 32, -806368 / 32,
  76. -795104 / 32, -783584 / 32, -771808 / 32, -759776 / 32,
  77. -747488 / 32, -734944 / 32, -722144 / 32, -709088 / 32,
  78. -695776 / 32, -682208 / 32, -668384 / 32, -654304 / 32,
  79. -639968 / 32, -625376 / 32, -610528 / 32, -595424 / 32,
  80. -580064 / 32, -564448 / 32, -548576 / 32, -532448 / 32,
  81. -516064 / 32, -499424 / 32, -482528 / 32, -465376 / 32,
  82. -447968 / 32, -430304 / 32, -412384 / 32, -394208 / 32,
  83. -375776 / 32, -357088 / 32, -338144 / 32, -318944 / 32,
  84. -299488 / 32, -279776 / 32, -259808 / 32, -239584 / 32,
  85. -219104 / 32, -198368 / 32, -177376 / 32, -156128 / 32,
  86. -134624 / 32, -112864 / 32, -90848 / 32, -68576 / 32,
  87. -46048 / 32, -23264 / 32, -224 / 32, 23072 / 32,
  88. 46624 / 32, 70432 / 32, 94496 / 32, 118816 / 32,
  89. 143392 / 32, 168224 / 32, 193312 / 32, 218656 / 32,
  90. 244256 / 32, 270112 / 32, 296224 / 32, 322592 / 32,
  91. 349216 / 32, 376096 / 32, 403232 / 32, 430624 / 32,
  92. 458272 / 32, 486176 / 32, 514336 / 32, 542752 / 32,
  93. 571424 / 32, 600352 / 32, 629536 / 32, 658976 / 32,
  94. 688672 / 32, 718624 / 32, 748832 / 32, 779296 / 32,
  95. 810016 / 32, 840992 / 32, 872224 / 32, 903712 / 32,
  96. 935456 / 32, 967456 / 32, 999712 / 32, 1032224 / 32
  97. };
  98. /** Gain values of p(0) for long-term prediction.
  99. * To be indexed by the Rice coded indices.
  100. */
  101. static const uint8_t ltp_gain_values [4][4] = {
  102. { 0, 8, 16, 24},
  103. {32, 40, 48, 56},
  104. {64, 70, 76, 82},
  105. {88, 92, 96, 100}
  106. };
  107. /** Inter-channel weighting factors for multi-channel correlation.
  108. * To be indexed by the Rice coded indices.
  109. */
  110. static const int16_t mcc_weightings[] = {
  111. 204, 192, 179, 166, 153, 140, 128, 115,
  112. 102, 89, 76, 64, 51, 38, 25, 12,
  113. 0, -12, -25, -38, -51, -64, -76, -89,
  114. -102, -115, -128, -140, -153, -166, -179, -192
  115. };
  116. /** Tail codes used in arithmetic coding using block Gilbert-Moore codes.
  117. */
  118. static const uint8_t tail_code[16][6] = {
  119. { 74, 44, 25, 13, 7, 3},
  120. { 68, 42, 24, 13, 7, 3},
  121. { 58, 39, 23, 13, 7, 3},
  122. {126, 70, 37, 19, 10, 5},
  123. {132, 70, 37, 20, 10, 5},
  124. {124, 70, 38, 20, 10, 5},
  125. {120, 69, 37, 20, 11, 5},
  126. {116, 67, 37, 20, 11, 5},
  127. {108, 66, 36, 20, 10, 5},
  128. {102, 62, 36, 20, 10, 5},
  129. { 88, 58, 34, 19, 10, 5},
  130. {162, 89, 49, 25, 13, 7},
  131. {156, 87, 49, 26, 14, 7},
  132. {150, 86, 47, 26, 14, 7},
  133. {142, 84, 47, 26, 14, 7},
  134. {131, 79, 46, 26, 14, 7}
  135. };
  136. enum RA_Flag {
  137. RA_FLAG_NONE,
  138. RA_FLAG_FRAMES,
  139. RA_FLAG_HEADER
  140. };
  141. typedef struct ALSSpecificConfig {
  142. uint32_t samples; ///< number of samples, 0xFFFFFFFF if unknown
  143. int resolution; ///< 000 = 8-bit; 001 = 16-bit; 010 = 24-bit; 011 = 32-bit
  144. int floating; ///< 1 = IEEE 32-bit floating-point, 0 = integer
  145. int msb_first; ///< 1 = original CRC calculated on big-endian system, 0 = little-endian
  146. int frame_length; ///< frame length for each frame (last frame may differ)
  147. int ra_distance; ///< distance between RA frames (in frames, 0...255)
  148. enum RA_Flag ra_flag; ///< indicates where the size of ra units is stored
  149. int adapt_order; ///< adaptive order: 1 = on, 0 = off
  150. int coef_table; ///< table index of Rice code parameters
  151. int long_term_prediction; ///< long term prediction (LTP): 1 = on, 0 = off
  152. int max_order; ///< maximum prediction order (0..1023)
  153. int block_switching; ///< number of block switching levels
  154. int bgmc; ///< "Block Gilbert-Moore Code": 1 = on, 0 = off (Rice coding only)
  155. int sb_part; ///< sub-block partition
  156. int joint_stereo; ///< joint stereo: 1 = on, 0 = off
  157. int mc_coding; ///< extended inter-channel coding (multi channel coding): 1 = on, 0 = off
  158. int chan_config; ///< indicates that a chan_config_info field is present
  159. int chan_sort; ///< channel rearrangement: 1 = on, 0 = off
  160. int rlslms; ///< use "Recursive Least Square-Least Mean Square" predictor: 1 = on, 0 = off
  161. int chan_config_info; ///< mapping of channels to loudspeaker locations. Unused until setting channel configuration is implemented.
  162. int *chan_pos; ///< original channel positions
  163. int crc_enabled; ///< enable Cyclic Redundancy Checksum
  164. } ALSSpecificConfig;
  165. typedef struct ALSChannelData {
  166. int stop_flag;
  167. int master_channel;
  168. int time_diff_flag;
  169. int time_diff_sign;
  170. int time_diff_index;
  171. int weighting[6];
  172. } ALSChannelData;
  173. typedef struct ALSDecContext {
  174. AVCodecContext *avctx;
  175. ALSSpecificConfig sconf;
  176. GetBitContext gb;
  177. BswapDSPContext bdsp;
  178. const AVCRC *crc_table;
  179. uint32_t crc_org; ///< CRC value of the original input data
  180. uint32_t crc; ///< CRC value calculated from decoded data
  181. unsigned int cur_frame_length; ///< length of the current frame to decode
  182. unsigned int frame_id; ///< the frame ID / number of the current frame
  183. unsigned int js_switch; ///< if true, joint-stereo decoding is enforced
  184. unsigned int cs_switch; ///< if true, channel rearrangement is done
  185. unsigned int num_blocks; ///< number of blocks used in the current frame
  186. unsigned int s_max; ///< maximum Rice parameter allowed in entropy coding
  187. uint8_t *bgmc_lut; ///< pointer at lookup tables used for BGMC
  188. int *bgmc_lut_status; ///< pointer at lookup table status flags used for BGMC
  189. int ltp_lag_length; ///< number of bits used for ltp lag value
  190. int *const_block; ///< contains const_block flags for all channels
  191. unsigned int *shift_lsbs; ///< contains shift_lsbs flags for all channels
  192. unsigned int *opt_order; ///< contains opt_order flags for all channels
  193. int *store_prev_samples; ///< contains store_prev_samples flags for all channels
  194. int *use_ltp; ///< contains use_ltp flags for all channels
  195. int *ltp_lag; ///< contains ltp lag values for all channels
  196. int **ltp_gain; ///< gain values for ltp 5-tap filter for a channel
  197. int *ltp_gain_buffer; ///< contains all gain values for ltp 5-tap filter
  198. int32_t **quant_cof; ///< quantized parcor coefficients for a channel
  199. int32_t *quant_cof_buffer; ///< contains all quantized parcor coefficients
  200. int32_t **lpc_cof; ///< coefficients of the direct form prediction filter for a channel
  201. int32_t *lpc_cof_buffer; ///< contains all coefficients of the direct form prediction filter
  202. int32_t *lpc_cof_reversed_buffer; ///< temporary buffer to set up a reversed versio of lpc_cof_buffer
  203. ALSChannelData **chan_data; ///< channel data for multi-channel correlation
  204. ALSChannelData *chan_data_buffer; ///< contains channel data for all channels
  205. int *reverted_channels; ///< stores a flag for each reverted channel
  206. int32_t *prev_raw_samples; ///< contains unshifted raw samples from the previous block
  207. int32_t **raw_samples; ///< decoded raw samples for each channel
  208. int32_t *raw_buffer; ///< contains all decoded raw samples including carryover samples
  209. uint8_t *crc_buffer; ///< buffer of byte order corrected samples used for CRC check
  210. MLZ* mlz; ///< masked lz decompression structure
  211. SoftFloat_IEEE754 *acf; ///< contains common multiplier for all channels
  212. int *last_acf_mantissa; ///< contains the last acf mantissa data of common multiplier for all channels
  213. int *shift_value; ///< value by which the binary point is to be shifted for all channels
  214. int *last_shift_value; ///< contains last shift value for all channels
  215. int **raw_mantissa; ///< decoded mantissa bits of the difference signal
  216. unsigned char *larray; ///< buffer to store the output of masked lz decompression
  217. int *nbits; ///< contains the number of bits to read for masked lz decompression for all samples
  218. int highest_decoded_channel;
  219. } ALSDecContext;
  220. typedef struct ALSBlockData {
  221. unsigned int block_length; ///< number of samples within the block
  222. unsigned int ra_block; ///< if true, this is a random access block
  223. int *const_block; ///< if true, this is a constant value block
  224. int js_blocks; ///< true if this block contains a difference signal
  225. unsigned int *shift_lsbs; ///< shift of values for this block
  226. unsigned int *opt_order; ///< prediction order of this block
  227. int *store_prev_samples;///< if true, carryover samples have to be stored
  228. int *use_ltp; ///< if true, long-term prediction is used
  229. int *ltp_lag; ///< lag value for long-term prediction
  230. int *ltp_gain; ///< gain values for ltp 5-tap filter
  231. int32_t *quant_cof; ///< quantized parcor coefficients
  232. int32_t *lpc_cof; ///< coefficients of the direct form prediction
  233. int32_t *raw_samples; ///< decoded raw samples / residuals for this block
  234. int32_t *prev_raw_samples; ///< contains unshifted raw samples from the previous block
  235. int32_t *raw_other; ///< decoded raw samples of the other channel of a channel pair
  236. } ALSBlockData;
  237. static av_cold void dprint_specific_config(ALSDecContext *ctx)
  238. {
  239. #ifdef DEBUG
  240. AVCodecContext *avctx = ctx->avctx;
  241. ALSSpecificConfig *sconf = &ctx->sconf;
  242. ff_dlog(avctx, "resolution = %i\n", sconf->resolution);
  243. ff_dlog(avctx, "floating = %i\n", sconf->floating);
  244. ff_dlog(avctx, "frame_length = %i\n", sconf->frame_length);
  245. ff_dlog(avctx, "ra_distance = %i\n", sconf->ra_distance);
  246. ff_dlog(avctx, "ra_flag = %i\n", sconf->ra_flag);
  247. ff_dlog(avctx, "adapt_order = %i\n", sconf->adapt_order);
  248. ff_dlog(avctx, "coef_table = %i\n", sconf->coef_table);
  249. ff_dlog(avctx, "long_term_prediction = %i\n", sconf->long_term_prediction);
  250. ff_dlog(avctx, "max_order = %i\n", sconf->max_order);
  251. ff_dlog(avctx, "block_switching = %i\n", sconf->block_switching);
  252. ff_dlog(avctx, "bgmc = %i\n", sconf->bgmc);
  253. ff_dlog(avctx, "sb_part = %i\n", sconf->sb_part);
  254. ff_dlog(avctx, "joint_stereo = %i\n", sconf->joint_stereo);
  255. ff_dlog(avctx, "mc_coding = %i\n", sconf->mc_coding);
  256. ff_dlog(avctx, "chan_config = %i\n", sconf->chan_config);
  257. ff_dlog(avctx, "chan_sort = %i\n", sconf->chan_sort);
  258. ff_dlog(avctx, "RLSLMS = %i\n", sconf->rlslms);
  259. ff_dlog(avctx, "chan_config_info = %i\n", sconf->chan_config_info);
  260. #endif
  261. }
  262. /** Read an ALSSpecificConfig from a buffer into the output struct.
  263. */
  264. static av_cold int read_specific_config(ALSDecContext *ctx)
  265. {
  266. GetBitContext gb;
  267. uint64_t ht_size;
  268. int i, config_offset;
  269. MPEG4AudioConfig m4ac = {0};
  270. ALSSpecificConfig *sconf = &ctx->sconf;
  271. AVCodecContext *avctx = ctx->avctx;
  272. uint32_t als_id, header_size, trailer_size;
  273. int ret;
  274. if ((ret = init_get_bits8(&gb, avctx->extradata, avctx->extradata_size)) < 0)
  275. return ret;
  276. config_offset = avpriv_mpeg4audio_get_config2(&m4ac, avctx->extradata,
  277. avctx->extradata_size, 1, avctx);
  278. if (config_offset < 0)
  279. return AVERROR_INVALIDDATA;
  280. skip_bits_long(&gb, config_offset);
  281. if (get_bits_left(&gb) < (30 << 3))
  282. return AVERROR_INVALIDDATA;
  283. // read the fixed items
  284. als_id = get_bits_long(&gb, 32);
  285. avctx->sample_rate = m4ac.sample_rate;
  286. skip_bits_long(&gb, 32); // sample rate already known
  287. sconf->samples = get_bits_long(&gb, 32);
  288. avctx->channels = m4ac.channels;
  289. skip_bits(&gb, 16); // number of channels already known
  290. skip_bits(&gb, 3); // skip file_type
  291. sconf->resolution = get_bits(&gb, 3);
  292. sconf->floating = get_bits1(&gb);
  293. sconf->msb_first = get_bits1(&gb);
  294. sconf->frame_length = get_bits(&gb, 16) + 1;
  295. sconf->ra_distance = get_bits(&gb, 8);
  296. sconf->ra_flag = get_bits(&gb, 2);
  297. sconf->adapt_order = get_bits1(&gb);
  298. sconf->coef_table = get_bits(&gb, 2);
  299. sconf->long_term_prediction = get_bits1(&gb);
  300. sconf->max_order = get_bits(&gb, 10);
  301. sconf->block_switching = get_bits(&gb, 2);
  302. sconf->bgmc = get_bits1(&gb);
  303. sconf->sb_part = get_bits1(&gb);
  304. sconf->joint_stereo = get_bits1(&gb);
  305. sconf->mc_coding = get_bits1(&gb);
  306. sconf->chan_config = get_bits1(&gb);
  307. sconf->chan_sort = get_bits1(&gb);
  308. sconf->crc_enabled = get_bits1(&gb);
  309. sconf->rlslms = get_bits1(&gb);
  310. skip_bits(&gb, 5); // skip 5 reserved bits
  311. skip_bits1(&gb); // skip aux_data_enabled
  312. // check for ALSSpecificConfig struct
  313. if (als_id != MKBETAG('A','L','S','\0'))
  314. return AVERROR_INVALIDDATA;
  315. if (avctx->channels > FF_SANE_NB_CHANNELS) {
  316. avpriv_request_sample(avctx, "Huge number of channels");
  317. return AVERROR_PATCHWELCOME;
  318. }
  319. ctx->cur_frame_length = sconf->frame_length;
  320. // read channel config
  321. if (sconf->chan_config)
  322. sconf->chan_config_info = get_bits(&gb, 16);
  323. // TODO: use this to set avctx->channel_layout
  324. // read channel sorting
  325. if (sconf->chan_sort && avctx->channels > 1) {
  326. int chan_pos_bits = av_ceil_log2(avctx->channels);
  327. int bits_needed = avctx->channels * chan_pos_bits + 7;
  328. if (get_bits_left(&gb) < bits_needed)
  329. return AVERROR_INVALIDDATA;
  330. if (!(sconf->chan_pos = av_malloc_array(avctx->channels, sizeof(*sconf->chan_pos))))
  331. return AVERROR(ENOMEM);
  332. ctx->cs_switch = 1;
  333. for (i = 0; i < avctx->channels; i++) {
  334. sconf->chan_pos[i] = -1;
  335. }
  336. for (i = 0; i < avctx->channels; i++) {
  337. int idx;
  338. idx = get_bits(&gb, chan_pos_bits);
  339. if (idx >= avctx->channels || sconf->chan_pos[idx] != -1) {
  340. av_log(avctx, AV_LOG_WARNING, "Invalid channel reordering.\n");
  341. ctx->cs_switch = 0;
  342. break;
  343. }
  344. sconf->chan_pos[idx] = i;
  345. }
  346. align_get_bits(&gb);
  347. }
  348. // read fixed header and trailer sizes,
  349. // if size = 0xFFFFFFFF then there is no data field!
  350. if (get_bits_left(&gb) < 64)
  351. return AVERROR_INVALIDDATA;
  352. header_size = get_bits_long(&gb, 32);
  353. trailer_size = get_bits_long(&gb, 32);
  354. if (header_size == 0xFFFFFFFF)
  355. header_size = 0;
  356. if (trailer_size == 0xFFFFFFFF)
  357. trailer_size = 0;
  358. ht_size = ((int64_t)(header_size) + (int64_t)(trailer_size)) << 3;
  359. // skip the header and trailer data
  360. if (get_bits_left(&gb) < ht_size)
  361. return AVERROR_INVALIDDATA;
  362. if (ht_size > INT32_MAX)
  363. return AVERROR_PATCHWELCOME;
  364. skip_bits_long(&gb, ht_size);
  365. // initialize CRC calculation
  366. if (sconf->crc_enabled) {
  367. if (get_bits_left(&gb) < 32)
  368. return AVERROR_INVALIDDATA;
  369. if (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL)) {
  370. ctx->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
  371. ctx->crc = 0xFFFFFFFF;
  372. ctx->crc_org = ~get_bits_long(&gb, 32);
  373. } else
  374. skip_bits_long(&gb, 32);
  375. }
  376. // no need to read the rest of ALSSpecificConfig (ra_unit_size & aux data)
  377. dprint_specific_config(ctx);
  378. return 0;
  379. }
  380. /** Check the ALSSpecificConfig for unsupported features.
  381. */
  382. static int check_specific_config(ALSDecContext *ctx)
  383. {
  384. ALSSpecificConfig *sconf = &ctx->sconf;
  385. int error = 0;
  386. // report unsupported feature and set error value
  387. #define MISSING_ERR(cond, str, errval) \
  388. { \
  389. if (cond) { \
  390. avpriv_report_missing_feature(ctx->avctx, \
  391. str); \
  392. error = errval; \
  393. } \
  394. }
  395. MISSING_ERR(sconf->rlslms, "Adaptive RLS-LMS prediction", AVERROR_PATCHWELCOME);
  396. return error;
  397. }
  398. /** Parse the bs_info field to extract the block partitioning used in
  399. * block switching mode, refer to ISO/IEC 14496-3, section 11.6.2.
  400. */
  401. static void parse_bs_info(const uint32_t bs_info, unsigned int n,
  402. unsigned int div, unsigned int **div_blocks,
  403. unsigned int *num_blocks)
  404. {
  405. if (n < 31 && ((bs_info << n) & 0x40000000)) {
  406. // if the level is valid and the investigated bit n is set
  407. // then recursively check both children at bits (2n+1) and (2n+2)
  408. n *= 2;
  409. div += 1;
  410. parse_bs_info(bs_info, n + 1, div, div_blocks, num_blocks);
  411. parse_bs_info(bs_info, n + 2, div, div_blocks, num_blocks);
  412. } else {
  413. // else the bit is not set or the last level has been reached
  414. // (bit implicitly not set)
  415. **div_blocks = div;
  416. (*div_blocks)++;
  417. (*num_blocks)++;
  418. }
  419. }
  420. /** Read and decode a Rice codeword.
  421. */
  422. static int32_t decode_rice(GetBitContext *gb, unsigned int k)
  423. {
  424. int max = get_bits_left(gb) - k;
  425. unsigned q = get_unary(gb, 0, max);
  426. int r = k ? get_bits1(gb) : !(q & 1);
  427. if (k > 1) {
  428. q <<= (k - 1);
  429. q += get_bits_long(gb, k - 1);
  430. } else if (!k) {
  431. q >>= 1;
  432. }
  433. return r ? q : ~q;
  434. }
  435. /** Convert PARCOR coefficient k to direct filter coefficient.
  436. */
  437. static void parcor_to_lpc(unsigned int k, const int32_t *par, int32_t *cof)
  438. {
  439. int i, j;
  440. for (i = 0, j = k - 1; i < j; i++, j--) {
  441. unsigned tmp1 = ((MUL64(par[k], cof[j]) + (1 << 19)) >> 20);
  442. cof[j] += ((MUL64(par[k], cof[i]) + (1 << 19)) >> 20);
  443. cof[i] += tmp1;
  444. }
  445. if (i == j)
  446. cof[i] += ((MUL64(par[k], cof[j]) + (1 << 19)) >> 20);
  447. cof[k] = par[k];
  448. }
  449. /** Read block switching field if necessary and set actual block sizes.
  450. * Also assure that the block sizes of the last frame correspond to the
  451. * actual number of samples.
  452. */
  453. static void get_block_sizes(ALSDecContext *ctx, unsigned int *div_blocks,
  454. uint32_t *bs_info)
  455. {
  456. ALSSpecificConfig *sconf = &ctx->sconf;
  457. GetBitContext *gb = &ctx->gb;
  458. unsigned int *ptr_div_blocks = div_blocks;
  459. unsigned int b;
  460. if (sconf->block_switching) {
  461. unsigned int bs_info_len = 1 << (sconf->block_switching + 2);
  462. *bs_info = get_bits_long(gb, bs_info_len);
  463. *bs_info <<= (32 - bs_info_len);
  464. }
  465. ctx->num_blocks = 0;
  466. parse_bs_info(*bs_info, 0, 0, &ptr_div_blocks, &ctx->num_blocks);
  467. // The last frame may have an overdetermined block structure given in
  468. // the bitstream. In that case the defined block structure would need
  469. // more samples than available to be consistent.
  470. // The block structure is actually used but the block sizes are adapted
  471. // to fit the actual number of available samples.
  472. // Example: 5 samples, 2nd level block sizes: 2 2 2 2.
  473. // This results in the actual block sizes: 2 2 1 0.
  474. // This is not specified in 14496-3 but actually done by the reference
  475. // codec RM22 revision 2.
  476. // This appears to happen in case of an odd number of samples in the last
  477. // frame which is actually not allowed by the block length switching part
  478. // of 14496-3.
  479. // The ALS conformance files feature an odd number of samples in the last
  480. // frame.
  481. for (b = 0; b < ctx->num_blocks; b++)
  482. div_blocks[b] = ctx->sconf.frame_length >> div_blocks[b];
  483. if (ctx->cur_frame_length != ctx->sconf.frame_length) {
  484. unsigned int remaining = ctx->cur_frame_length;
  485. for (b = 0; b < ctx->num_blocks; b++) {
  486. if (remaining <= div_blocks[b]) {
  487. div_blocks[b] = remaining;
  488. ctx->num_blocks = b + 1;
  489. break;
  490. }
  491. remaining -= div_blocks[b];
  492. }
  493. }
  494. }
  495. /** Read the block data for a constant block
  496. */
  497. static int read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  498. {
  499. ALSSpecificConfig *sconf = &ctx->sconf;
  500. AVCodecContext *avctx = ctx->avctx;
  501. GetBitContext *gb = &ctx->gb;
  502. if (bd->block_length <= 0)
  503. return AVERROR_INVALIDDATA;
  504. *bd->raw_samples = 0;
  505. *bd->const_block = get_bits1(gb); // 1 = constant value, 0 = zero block (silence)
  506. bd->js_blocks = get_bits1(gb);
  507. // skip 5 reserved bits
  508. skip_bits(gb, 5);
  509. if (*bd->const_block) {
  510. unsigned int const_val_bits = sconf->floating ? 24 : avctx->bits_per_raw_sample;
  511. *bd->raw_samples = get_sbits_long(gb, const_val_bits);
  512. }
  513. // ensure constant block decoding by reusing this field
  514. *bd->const_block = 1;
  515. return 0;
  516. }
  517. /** Decode the block data for a constant block
  518. */
  519. static void decode_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  520. {
  521. int smp = bd->block_length - 1;
  522. int32_t val = *bd->raw_samples;
  523. int32_t *dst = bd->raw_samples + 1;
  524. // write raw samples into buffer
  525. for (; smp; smp--)
  526. *dst++ = val;
  527. }
  528. /** Read the block data for a non-constant block
  529. */
  530. static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  531. {
  532. ALSSpecificConfig *sconf = &ctx->sconf;
  533. AVCodecContext *avctx = ctx->avctx;
  534. GetBitContext *gb = &ctx->gb;
  535. unsigned int k;
  536. unsigned int s[8];
  537. unsigned int sx[8];
  538. unsigned int sub_blocks, log2_sub_blocks, sb_length;
  539. unsigned int start = 0;
  540. unsigned int opt_order;
  541. int sb;
  542. int32_t *quant_cof = bd->quant_cof;
  543. int32_t *current_res;
  544. // ensure variable block decoding by reusing this field
  545. *bd->const_block = 0;
  546. *bd->opt_order = 1;
  547. bd->js_blocks = get_bits1(gb);
  548. opt_order = *bd->opt_order;
  549. // determine the number of subblocks for entropy decoding
  550. if (!sconf->bgmc && !sconf->sb_part) {
  551. log2_sub_blocks = 0;
  552. } else {
  553. if (sconf->bgmc && sconf->sb_part)
  554. log2_sub_blocks = get_bits(gb, 2);
  555. else
  556. log2_sub_blocks = 2 * get_bits1(gb);
  557. }
  558. sub_blocks = 1 << log2_sub_blocks;
  559. // do not continue in case of a damaged stream since
  560. // block_length must be evenly divisible by sub_blocks
  561. if (bd->block_length & (sub_blocks - 1) || bd->block_length <= 0) {
  562. av_log(avctx, AV_LOG_WARNING,
  563. "Block length is not evenly divisible by the number of subblocks.\n");
  564. return AVERROR_INVALIDDATA;
  565. }
  566. sb_length = bd->block_length >> log2_sub_blocks;
  567. if (sconf->bgmc) {
  568. s[0] = get_bits(gb, 8 + (sconf->resolution > 1));
  569. for (k = 1; k < sub_blocks; k++)
  570. s[k] = s[k - 1] + decode_rice(gb, 2);
  571. for (k = 0; k < sub_blocks; k++) {
  572. sx[k] = s[k] & 0x0F;
  573. s [k] >>= 4;
  574. }
  575. } else {
  576. s[0] = get_bits(gb, 4 + (sconf->resolution > 1));
  577. for (k = 1; k < sub_blocks; k++)
  578. s[k] = s[k - 1] + decode_rice(gb, 0);
  579. }
  580. for (k = 1; k < sub_blocks; k++)
  581. if (s[k] > 32) {
  582. av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
  583. return AVERROR_INVALIDDATA;
  584. }
  585. if (get_bits1(gb))
  586. *bd->shift_lsbs = get_bits(gb, 4) + 1;
  587. *bd->store_prev_samples = (bd->js_blocks && bd->raw_other) || *bd->shift_lsbs;
  588. if (!sconf->rlslms) {
  589. if (sconf->adapt_order && sconf->max_order) {
  590. int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
  591. 2, sconf->max_order + 1));
  592. *bd->opt_order = get_bits(gb, opt_order_length);
  593. if (*bd->opt_order > sconf->max_order) {
  594. *bd->opt_order = sconf->max_order;
  595. av_log(avctx, AV_LOG_ERROR, "Predictor order too large.\n");
  596. return AVERROR_INVALIDDATA;
  597. }
  598. } else {
  599. *bd->opt_order = sconf->max_order;
  600. }
  601. opt_order = *bd->opt_order;
  602. if (opt_order) {
  603. int add_base;
  604. if (sconf->coef_table == 3) {
  605. add_base = 0x7F;
  606. // read coefficient 0
  607. quant_cof[0] = 32 * parcor_scaled_values[get_bits(gb, 7)];
  608. // read coefficient 1
  609. if (opt_order > 1)
  610. quant_cof[1] = -32 * parcor_scaled_values[get_bits(gb, 7)];
  611. // read coefficients 2 to opt_order
  612. for (k = 2; k < opt_order; k++)
  613. quant_cof[k] = get_bits(gb, 7);
  614. } else {
  615. int k_max;
  616. add_base = 1;
  617. // read coefficient 0 to 19
  618. k_max = FFMIN(opt_order, 20);
  619. for (k = 0; k < k_max; k++) {
  620. int rice_param = parcor_rice_table[sconf->coef_table][k][1];
  621. int offset = parcor_rice_table[sconf->coef_table][k][0];
  622. quant_cof[k] = decode_rice(gb, rice_param) + offset;
  623. if (quant_cof[k] < -64 || quant_cof[k] > 63) {
  624. av_log(avctx, AV_LOG_ERROR,
  625. "quant_cof %"PRId32" is out of range.\n",
  626. quant_cof[k]);
  627. return AVERROR_INVALIDDATA;
  628. }
  629. }
  630. // read coefficients 20 to 126
  631. k_max = FFMIN(opt_order, 127);
  632. for (; k < k_max; k++)
  633. quant_cof[k] = decode_rice(gb, 2) + (k & 1);
  634. // read coefficients 127 to opt_order
  635. for (; k < opt_order; k++)
  636. quant_cof[k] = decode_rice(gb, 1);
  637. quant_cof[0] = 32 * parcor_scaled_values[quant_cof[0] + 64];
  638. if (opt_order > 1)
  639. quant_cof[1] = -32 * parcor_scaled_values[quant_cof[1] + 64];
  640. }
  641. for (k = 2; k < opt_order; k++)
  642. quant_cof[k] = (quant_cof[k] * (1U << 14)) + (add_base << 13);
  643. }
  644. }
  645. // read LTP gain and lag values
  646. if (sconf->long_term_prediction) {
  647. *bd->use_ltp = get_bits1(gb);
  648. if (*bd->use_ltp) {
  649. int r, c;
  650. bd->ltp_gain[0] = decode_rice(gb, 1) * 8;
  651. bd->ltp_gain[1] = decode_rice(gb, 2) * 8;
  652. r = get_unary(gb, 0, 4);
  653. c = get_bits(gb, 2);
  654. if (r >= 4) {
  655. av_log(avctx, AV_LOG_ERROR, "r overflow\n");
  656. return AVERROR_INVALIDDATA;
  657. }
  658. bd->ltp_gain[2] = ltp_gain_values[r][c];
  659. bd->ltp_gain[3] = decode_rice(gb, 2) * 8;
  660. bd->ltp_gain[4] = decode_rice(gb, 1) * 8;
  661. *bd->ltp_lag = get_bits(gb, ctx->ltp_lag_length);
  662. *bd->ltp_lag += FFMAX(4, opt_order + 1);
  663. }
  664. }
  665. // read first value and residuals in case of a random access block
  666. if (bd->ra_block) {
  667. start = FFMIN(opt_order, 3);
  668. av_assert0(sb_length <= sconf->frame_length);
  669. if (sb_length <= start) {
  670. // opt_order or sb_length may be corrupted, either way this is unsupported and not well defined in the specification
  671. av_log(avctx, AV_LOG_ERROR, "Sub block length smaller or equal start\n");
  672. return AVERROR_PATCHWELCOME;
  673. }
  674. if (opt_order)
  675. bd->raw_samples[0] = decode_rice(gb, avctx->bits_per_raw_sample - 4);
  676. if (opt_order > 1)
  677. bd->raw_samples[1] = decode_rice(gb, FFMIN(s[0] + 3, ctx->s_max));
  678. if (opt_order > 2)
  679. bd->raw_samples[2] = decode_rice(gb, FFMIN(s[0] + 1, ctx->s_max));
  680. }
  681. // read all residuals
  682. if (sconf->bgmc) {
  683. int delta[8];
  684. unsigned int k [8];
  685. unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
  686. // read most significant bits
  687. unsigned int high;
  688. unsigned int low;
  689. unsigned int value;
  690. int ret = ff_bgmc_decode_init(gb, &high, &low, &value);
  691. if (ret < 0)
  692. return ret;
  693. current_res = bd->raw_samples + start;
  694. for (sb = 0; sb < sub_blocks; sb++) {
  695. unsigned int sb_len = sb_length - (sb ? 0 : start);
  696. k [sb] = s[sb] > b ? s[sb] - b : 0;
  697. delta[sb] = 5 - s[sb] + k[sb];
  698. if (k[sb] >= 32)
  699. return AVERROR_INVALIDDATA;
  700. ff_bgmc_decode(gb, sb_len, current_res,
  701. delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
  702. current_res += sb_len;
  703. }
  704. ff_bgmc_decode_end(gb);
  705. // read least significant bits and tails
  706. current_res = bd->raw_samples + start;
  707. for (sb = 0; sb < sub_blocks; sb++, start = 0) {
  708. unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
  709. unsigned int cur_k = k[sb];
  710. unsigned int cur_s = s[sb];
  711. for (; start < sb_length; start++) {
  712. int32_t res = *current_res;
  713. if (res == cur_tail_code) {
  714. unsigned int max_msb = (2 + (sx[sb] > 2) + (sx[sb] > 10))
  715. << (5 - delta[sb]);
  716. res = decode_rice(gb, cur_s);
  717. if (res >= 0) {
  718. res += (max_msb ) << cur_k;
  719. } else {
  720. res -= (max_msb - 1) << cur_k;
  721. }
  722. } else {
  723. if (res > cur_tail_code)
  724. res--;
  725. if (res & 1)
  726. res = -res;
  727. res >>= 1;
  728. if (cur_k) {
  729. res *= 1U << cur_k;
  730. res |= get_bits_long(gb, cur_k);
  731. }
  732. }
  733. *current_res++ = res;
  734. }
  735. }
  736. } else {
  737. current_res = bd->raw_samples + start;
  738. for (sb = 0; sb < sub_blocks; sb++, start = 0)
  739. for (; start < sb_length; start++)
  740. *current_res++ = decode_rice(gb, s[sb]);
  741. }
  742. return 0;
  743. }
  744. /** Decode the block data for a non-constant block
  745. */
  746. static int decode_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  747. {
  748. ALSSpecificConfig *sconf = &ctx->sconf;
  749. unsigned int block_length = bd->block_length;
  750. unsigned int smp = 0;
  751. unsigned int k;
  752. int opt_order = *bd->opt_order;
  753. int sb;
  754. int64_t y;
  755. int32_t *quant_cof = bd->quant_cof;
  756. int32_t *lpc_cof = bd->lpc_cof;
  757. int32_t *raw_samples = bd->raw_samples;
  758. int32_t *raw_samples_end = bd->raw_samples + bd->block_length;
  759. int32_t *lpc_cof_reversed = ctx->lpc_cof_reversed_buffer;
  760. // reverse long-term prediction
  761. if (*bd->use_ltp) {
  762. int ltp_smp;
  763. for (ltp_smp = FFMAX(*bd->ltp_lag - 2, 0); ltp_smp < block_length; ltp_smp++) {
  764. int center = ltp_smp - *bd->ltp_lag;
  765. int begin = FFMAX(0, center - 2);
  766. int end = center + 3;
  767. int tab = 5 - (end - begin);
  768. int base;
  769. y = 1 << 6;
  770. for (base = begin; base < end; base++, tab++)
  771. y += (uint64_t)MUL64(bd->ltp_gain[tab], raw_samples[base]);
  772. raw_samples[ltp_smp] += y >> 7;
  773. }
  774. }
  775. // reconstruct all samples from residuals
  776. if (bd->ra_block) {
  777. for (smp = 0; smp < FFMIN(opt_order, block_length); smp++) {
  778. y = 1 << 19;
  779. for (sb = 0; sb < smp; sb++)
  780. y += (uint64_t)MUL64(lpc_cof[sb], raw_samples[-(sb + 1)]);
  781. *raw_samples++ -= y >> 20;
  782. parcor_to_lpc(smp, quant_cof, lpc_cof);
  783. }
  784. } else {
  785. for (k = 0; k < opt_order; k++)
  786. parcor_to_lpc(k, quant_cof, lpc_cof);
  787. // store previous samples in case that they have to be altered
  788. if (*bd->store_prev_samples)
  789. memcpy(bd->prev_raw_samples, raw_samples - sconf->max_order,
  790. sizeof(*bd->prev_raw_samples) * sconf->max_order);
  791. // reconstruct difference signal for prediction (joint-stereo)
  792. if (bd->js_blocks && bd->raw_other) {
  793. uint32_t *left, *right;
  794. if (bd->raw_other > raw_samples) { // D = R - L
  795. left = raw_samples;
  796. right = bd->raw_other;
  797. } else { // D = R - L
  798. left = bd->raw_other;
  799. right = raw_samples;
  800. }
  801. for (sb = -1; sb >= -sconf->max_order; sb--)
  802. raw_samples[sb] = right[sb] - left[sb];
  803. }
  804. // reconstruct shifted signal
  805. if (*bd->shift_lsbs)
  806. for (sb = -1; sb >= -sconf->max_order; sb--)
  807. raw_samples[sb] >>= *bd->shift_lsbs;
  808. }
  809. // reverse linear prediction coefficients for efficiency
  810. lpc_cof = lpc_cof + opt_order;
  811. for (sb = 0; sb < opt_order; sb++)
  812. lpc_cof_reversed[sb] = lpc_cof[-(sb + 1)];
  813. // reconstruct raw samples
  814. raw_samples = bd->raw_samples + smp;
  815. lpc_cof = lpc_cof_reversed + opt_order;
  816. for (; raw_samples < raw_samples_end; raw_samples++) {
  817. y = 1 << 19;
  818. for (sb = -opt_order; sb < 0; sb++)
  819. y += (uint64_t)MUL64(lpc_cof[sb], raw_samples[sb]);
  820. *raw_samples -= y >> 20;
  821. }
  822. raw_samples = bd->raw_samples;
  823. // restore previous samples in case that they have been altered
  824. if (*bd->store_prev_samples)
  825. memcpy(raw_samples - sconf->max_order, bd->prev_raw_samples,
  826. sizeof(*raw_samples) * sconf->max_order);
  827. return 0;
  828. }
  829. /** Read the block data.
  830. */
  831. static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
  832. {
  833. int ret;
  834. GetBitContext *gb = &ctx->gb;
  835. ALSSpecificConfig *sconf = &ctx->sconf;
  836. *bd->shift_lsbs = 0;
  837. if (get_bits_left(gb) < 1)
  838. return AVERROR_INVALIDDATA;
  839. // read block type flag and read the samples accordingly
  840. if (get_bits1(gb)) {
  841. ret = read_var_block_data(ctx, bd);
  842. } else {
  843. ret = read_const_block_data(ctx, bd);
  844. }
  845. if (!sconf->mc_coding || ctx->js_switch)
  846. align_get_bits(gb);
  847. return ret;
  848. }
  849. /** Decode the block data.
  850. */
  851. static int decode_block(ALSDecContext *ctx, ALSBlockData *bd)
  852. {
  853. unsigned int smp;
  854. int ret = 0;
  855. // read block type flag and read the samples accordingly
  856. if (*bd->const_block)
  857. decode_const_block_data(ctx, bd);
  858. else
  859. ret = decode_var_block_data(ctx, bd); // always return 0
  860. if (ret < 0)
  861. return ret;
  862. // TODO: read RLSLMS extension data
  863. if (*bd->shift_lsbs)
  864. for (smp = 0; smp < bd->block_length; smp++)
  865. bd->raw_samples[smp] = (unsigned)bd->raw_samples[smp] << *bd->shift_lsbs;
  866. return 0;
  867. }
  868. /** Read and decode block data successively.
  869. */
  870. static int read_decode_block(ALSDecContext *ctx, ALSBlockData *bd)
  871. {
  872. int ret;
  873. if ((ret = read_block(ctx, bd)) < 0)
  874. return ret;
  875. return decode_block(ctx, bd);
  876. }
  877. /** Compute the number of samples left to decode for the current frame and
  878. * sets these samples to zero.
  879. */
  880. static void zero_remaining(unsigned int b, unsigned int b_max,
  881. const unsigned int *div_blocks, int32_t *buf)
  882. {
  883. unsigned int count = 0;
  884. while (b < b_max)
  885. count += div_blocks[b++];
  886. if (count)
  887. memset(buf, 0, sizeof(*buf) * count);
  888. }
  889. /** Decode blocks independently.
  890. */
  891. static int decode_blocks_ind(ALSDecContext *ctx, unsigned int ra_frame,
  892. unsigned int c, const unsigned int *div_blocks,
  893. unsigned int *js_blocks)
  894. {
  895. int ret;
  896. unsigned int b;
  897. ALSBlockData bd = { 0 };
  898. bd.ra_block = ra_frame;
  899. bd.const_block = ctx->const_block;
  900. bd.shift_lsbs = ctx->shift_lsbs;
  901. bd.opt_order = ctx->opt_order;
  902. bd.store_prev_samples = ctx->store_prev_samples;
  903. bd.use_ltp = ctx->use_ltp;
  904. bd.ltp_lag = ctx->ltp_lag;
  905. bd.ltp_gain = ctx->ltp_gain[0];
  906. bd.quant_cof = ctx->quant_cof[0];
  907. bd.lpc_cof = ctx->lpc_cof[0];
  908. bd.prev_raw_samples = ctx->prev_raw_samples;
  909. bd.raw_samples = ctx->raw_samples[c];
  910. for (b = 0; b < ctx->num_blocks; b++) {
  911. bd.block_length = div_blocks[b];
  912. if ((ret = read_decode_block(ctx, &bd)) < 0) {
  913. // damaged block, write zero for the rest of the frame
  914. zero_remaining(b, ctx->num_blocks, div_blocks, bd.raw_samples);
  915. return ret;
  916. }
  917. bd.raw_samples += div_blocks[b];
  918. bd.ra_block = 0;
  919. }
  920. return 0;
  921. }
  922. /** Decode blocks dependently.
  923. */
  924. static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame,
  925. unsigned int c, const unsigned int *div_blocks,
  926. unsigned int *js_blocks)
  927. {
  928. ALSSpecificConfig *sconf = &ctx->sconf;
  929. unsigned int offset = 0;
  930. unsigned int b;
  931. int ret;
  932. ALSBlockData bd[2] = { { 0 } };
  933. bd[0].ra_block = ra_frame;
  934. bd[0].const_block = ctx->const_block;
  935. bd[0].shift_lsbs = ctx->shift_lsbs;
  936. bd[0].opt_order = ctx->opt_order;
  937. bd[0].store_prev_samples = ctx->store_prev_samples;
  938. bd[0].use_ltp = ctx->use_ltp;
  939. bd[0].ltp_lag = ctx->ltp_lag;
  940. bd[0].ltp_gain = ctx->ltp_gain[0];
  941. bd[0].quant_cof = ctx->quant_cof[0];
  942. bd[0].lpc_cof = ctx->lpc_cof[0];
  943. bd[0].prev_raw_samples = ctx->prev_raw_samples;
  944. bd[0].js_blocks = *js_blocks;
  945. bd[1].ra_block = ra_frame;
  946. bd[1].const_block = ctx->const_block;
  947. bd[1].shift_lsbs = ctx->shift_lsbs;
  948. bd[1].opt_order = ctx->opt_order;
  949. bd[1].store_prev_samples = ctx->store_prev_samples;
  950. bd[1].use_ltp = ctx->use_ltp;
  951. bd[1].ltp_lag = ctx->ltp_lag;
  952. bd[1].ltp_gain = ctx->ltp_gain[0];
  953. bd[1].quant_cof = ctx->quant_cof[0];
  954. bd[1].lpc_cof = ctx->lpc_cof[0];
  955. bd[1].prev_raw_samples = ctx->prev_raw_samples;
  956. bd[1].js_blocks = *(js_blocks + 1);
  957. // decode all blocks
  958. for (b = 0; b < ctx->num_blocks; b++) {
  959. unsigned int s;
  960. bd[0].block_length = div_blocks[b];
  961. bd[1].block_length = div_blocks[b];
  962. bd[0].raw_samples = ctx->raw_samples[c ] + offset;
  963. bd[1].raw_samples = ctx->raw_samples[c + 1] + offset;
  964. bd[0].raw_other = bd[1].raw_samples;
  965. bd[1].raw_other = bd[0].raw_samples;
  966. if ((ret = read_decode_block(ctx, &bd[0])) < 0 ||
  967. (ret = read_decode_block(ctx, &bd[1])) < 0)
  968. goto fail;
  969. // reconstruct joint-stereo blocks
  970. if (bd[0].js_blocks) {
  971. if (bd[1].js_blocks)
  972. av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel pair.\n");
  973. for (s = 0; s < div_blocks[b]; s++)
  974. bd[0].raw_samples[s] = bd[1].raw_samples[s] - (unsigned)bd[0].raw_samples[s];
  975. } else if (bd[1].js_blocks) {
  976. for (s = 0; s < div_blocks[b]; s++)
  977. bd[1].raw_samples[s] = bd[1].raw_samples[s] + (unsigned)bd[0].raw_samples[s];
  978. }
  979. offset += div_blocks[b];
  980. bd[0].ra_block = 0;
  981. bd[1].ra_block = 0;
  982. }
  983. // store carryover raw samples,
  984. // the others channel raw samples are stored by the calling function.
  985. memmove(ctx->raw_samples[c] - sconf->max_order,
  986. ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
  987. sizeof(*ctx->raw_samples[c]) * sconf->max_order);
  988. return 0;
  989. fail:
  990. // damaged block, write zero for the rest of the frame
  991. zero_remaining(b, ctx->num_blocks, div_blocks, bd[0].raw_samples);
  992. zero_remaining(b, ctx->num_blocks, div_blocks, bd[1].raw_samples);
  993. return ret;
  994. }
  995. static inline int als_weighting(GetBitContext *gb, int k, int off)
  996. {
  997. int idx = av_clip(decode_rice(gb, k) + off,
  998. 0, FF_ARRAY_ELEMS(mcc_weightings) - 1);
  999. return mcc_weightings[idx];
  1000. }
  1001. /** Read the channel data.
  1002. */
  1003. static int read_channel_data(ALSDecContext *ctx, ALSChannelData *cd, int c)
  1004. {
  1005. GetBitContext *gb = &ctx->gb;
  1006. ALSChannelData *current = cd;
  1007. unsigned int channels = ctx->avctx->channels;
  1008. int entries = 0;
  1009. while (entries < channels && !(current->stop_flag = get_bits1(gb))) {
  1010. current->master_channel = get_bits_long(gb, av_ceil_log2(channels));
  1011. if (current->master_channel >= channels) {
  1012. av_log(ctx->avctx, AV_LOG_ERROR, "Invalid master channel.\n");
  1013. return AVERROR_INVALIDDATA;
  1014. }
  1015. if (current->master_channel != c) {
  1016. current->time_diff_flag = get_bits1(gb);
  1017. current->weighting[0] = als_weighting(gb, 1, 16);
  1018. current->weighting[1] = als_weighting(gb, 2, 14);
  1019. current->weighting[2] = als_weighting(gb, 1, 16);
  1020. if (current->time_diff_flag) {
  1021. current->weighting[3] = als_weighting(gb, 1, 16);
  1022. current->weighting[4] = als_weighting(gb, 1, 16);
  1023. current->weighting[5] = als_weighting(gb, 1, 16);
  1024. current->time_diff_sign = get_bits1(gb);
  1025. current->time_diff_index = get_bits(gb, ctx->ltp_lag_length - 3) + 3;
  1026. }
  1027. }
  1028. current++;
  1029. entries++;
  1030. }
  1031. if (entries == channels) {
  1032. av_log(ctx->avctx, AV_LOG_ERROR, "Damaged channel data.\n");
  1033. return AVERROR_INVALIDDATA;
  1034. }
  1035. align_get_bits(gb);
  1036. return 0;
  1037. }
  1038. /** Recursively reverts the inter-channel correlation for a block.
  1039. */
  1040. static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
  1041. ALSChannelData **cd, int *reverted,
  1042. unsigned int offset, int c)
  1043. {
  1044. ALSChannelData *ch = cd[c];
  1045. unsigned int dep = 0;
  1046. unsigned int channels = ctx->avctx->channels;
  1047. unsigned int channel_size = ctx->sconf.frame_length + ctx->sconf.max_order;
  1048. if (reverted[c])
  1049. return 0;
  1050. reverted[c] = 1;
  1051. while (dep < channels && !ch[dep].stop_flag) {
  1052. revert_channel_correlation(ctx, bd, cd, reverted, offset,
  1053. ch[dep].master_channel);
  1054. dep++;
  1055. }
  1056. if (dep == channels) {
  1057. av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel correlation.\n");
  1058. return AVERROR_INVALIDDATA;
  1059. }
  1060. bd->const_block = ctx->const_block + c;
  1061. bd->shift_lsbs = ctx->shift_lsbs + c;
  1062. bd->opt_order = ctx->opt_order + c;
  1063. bd->store_prev_samples = ctx->store_prev_samples + c;
  1064. bd->use_ltp = ctx->use_ltp + c;
  1065. bd->ltp_lag = ctx->ltp_lag + c;
  1066. bd->ltp_gain = ctx->ltp_gain[c];
  1067. bd->lpc_cof = ctx->lpc_cof[c];
  1068. bd->quant_cof = ctx->quant_cof[c];
  1069. bd->raw_samples = ctx->raw_samples[c] + offset;
  1070. for (dep = 0; !ch[dep].stop_flag; dep++) {
  1071. ptrdiff_t smp;
  1072. ptrdiff_t begin = 1;
  1073. ptrdiff_t end = bd->block_length - 1;
  1074. int64_t y;
  1075. int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset;
  1076. if (ch[dep].master_channel == c)
  1077. continue;
  1078. if (ch[dep].time_diff_flag) {
  1079. int t = ch[dep].time_diff_index;
  1080. if (ch[dep].time_diff_sign) {
  1081. t = -t;
  1082. if (begin < t) {
  1083. av_log(ctx->avctx, AV_LOG_ERROR, "begin %"PTRDIFF_SPECIFIER" smaller than time diff index %d.\n", begin, t);
  1084. return AVERROR_INVALIDDATA;
  1085. }
  1086. begin -= t;
  1087. } else {
  1088. if (end < t) {
  1089. av_log(ctx->avctx, AV_LOG_ERROR, "end %"PTRDIFF_SPECIFIER" smaller than time diff index %d.\n", end, t);
  1090. return AVERROR_INVALIDDATA;
  1091. }
  1092. end -= t;
  1093. }
  1094. if (FFMIN(begin - 1, begin - 1 + t) < ctx->raw_buffer - master ||
  1095. FFMAX(end + 1, end + 1 + t) > ctx->raw_buffer + channels * channel_size - master) {
  1096. av_log(ctx->avctx, AV_LOG_ERROR,
  1097. "sample pointer range [%p, %p] not contained in raw_buffer [%p, %p].\n",
  1098. master + FFMIN(begin - 1, begin - 1 + t), master + FFMAX(end + 1, end + 1 + t),
  1099. ctx->raw_buffer, ctx->raw_buffer + channels * channel_size);
  1100. return AVERROR_INVALIDDATA;
  1101. }
  1102. for (smp = begin; smp < end; smp++) {
  1103. y = (1 << 6) +
  1104. MUL64(ch[dep].weighting[0], master[smp - 1 ]) +
  1105. MUL64(ch[dep].weighting[1], master[smp ]) +
  1106. MUL64(ch[dep].weighting[2], master[smp + 1 ]) +
  1107. MUL64(ch[dep].weighting[3], master[smp - 1 + t]) +
  1108. MUL64(ch[dep].weighting[4], master[smp + t]) +
  1109. MUL64(ch[dep].weighting[5], master[smp + 1 + t]);
  1110. bd->raw_samples[smp] += y >> 7;
  1111. }
  1112. } else {
  1113. if (begin - 1 < ctx->raw_buffer - master ||
  1114. end + 1 > ctx->raw_buffer + channels * channel_size - master) {
  1115. av_log(ctx->avctx, AV_LOG_ERROR,
  1116. "sample pointer range [%p, %p] not contained in raw_buffer [%p, %p].\n",
  1117. master + begin - 1, master + end + 1,
  1118. ctx->raw_buffer, ctx->raw_buffer + channels * channel_size);
  1119. return AVERROR_INVALIDDATA;
  1120. }
  1121. for (smp = begin; smp < end; smp++) {
  1122. y = (1 << 6) +
  1123. MUL64(ch[dep].weighting[0], master[smp - 1]) +
  1124. MUL64(ch[dep].weighting[1], master[smp ]) +
  1125. MUL64(ch[dep].weighting[2], master[smp + 1]);
  1126. bd->raw_samples[smp] += y >> 7;
  1127. }
  1128. }
  1129. }
  1130. return 0;
  1131. }
  1132. /** multiply two softfloats and handle the rounding off
  1133. */
  1134. static SoftFloat_IEEE754 multiply(SoftFloat_IEEE754 a, SoftFloat_IEEE754 b) {
  1135. uint64_t mantissa_temp;
  1136. uint64_t mask_64;
  1137. int cutoff_bit_count;
  1138. unsigned char last_2_bits;
  1139. unsigned int mantissa;
  1140. int32_t sign;
  1141. uint32_t return_val = 0;
  1142. int bit_count = 48;
  1143. sign = a.sign ^ b.sign;
  1144. // Multiply mantissa bits in a 64-bit register
  1145. mantissa_temp = (uint64_t)a.mant * (uint64_t)b.mant;
  1146. mask_64 = (uint64_t)0x1 << 47;
  1147. if (!mantissa_temp)
  1148. return FLOAT_0;
  1149. // Count the valid bit count
  1150. while (!(mantissa_temp & mask_64) && mask_64) {
  1151. bit_count--;
  1152. mask_64 >>= 1;
  1153. }
  1154. // Round off
  1155. cutoff_bit_count = bit_count - 24;
  1156. if (cutoff_bit_count > 0) {
  1157. last_2_bits = (unsigned char)(((unsigned int)mantissa_temp >> (cutoff_bit_count - 1)) & 0x3 );
  1158. if ((last_2_bits == 0x3) || ((last_2_bits == 0x1) && ((unsigned int)mantissa_temp & ((0x1UL << (cutoff_bit_count - 1)) - 1)))) {
  1159. // Need to round up
  1160. mantissa_temp += (uint64_t)0x1 << cutoff_bit_count;
  1161. }
  1162. }
  1163. if (cutoff_bit_count >= 0) {
  1164. mantissa = (unsigned int)(mantissa_temp >> cutoff_bit_count);
  1165. } else {
  1166. mantissa = (unsigned int)(mantissa_temp <<-cutoff_bit_count);
  1167. }
  1168. // Need one more shift?
  1169. if (mantissa & 0x01000000ul) {
  1170. bit_count++;
  1171. mantissa >>= 1;
  1172. }
  1173. if (!sign) {
  1174. return_val = 0x80000000U;
  1175. }
  1176. return_val |= ((unsigned)av_clip(a.exp + b.exp + bit_count - 47, -126, 127) << 23) & 0x7F800000;
  1177. return_val |= mantissa;
  1178. return av_bits2sf_ieee754(return_val);
  1179. }
  1180. /** Read and decode the floating point sample data
  1181. */
  1182. static int read_diff_float_data(ALSDecContext *ctx, unsigned int ra_frame) {
  1183. AVCodecContext *avctx = ctx->avctx;
  1184. GetBitContext *gb = &ctx->gb;
  1185. SoftFloat_IEEE754 *acf = ctx->acf;
  1186. int *shift_value = ctx->shift_value;
  1187. int *last_shift_value = ctx->last_shift_value;
  1188. int *last_acf_mantissa = ctx->last_acf_mantissa;
  1189. int **raw_mantissa = ctx->raw_mantissa;
  1190. int *nbits = ctx->nbits;
  1191. unsigned char *larray = ctx->larray;
  1192. int frame_length = ctx->cur_frame_length;
  1193. SoftFloat_IEEE754 scale = av_int2sf_ieee754(0x1u, 23);
  1194. unsigned int partA_flag;
  1195. unsigned int highest_byte;
  1196. unsigned int shift_amp;
  1197. uint32_t tmp_32;
  1198. int use_acf;
  1199. int nchars;
  1200. int i;
  1201. int c;
  1202. long k;
  1203. long nbits_aligned;
  1204. unsigned long acc;
  1205. unsigned long j;
  1206. uint32_t sign;
  1207. uint32_t e;
  1208. uint32_t mantissa;
  1209. skip_bits_long(gb, 32); //num_bytes_diff_float
  1210. use_acf = get_bits1(gb);
  1211. if (ra_frame) {
  1212. memset(last_acf_mantissa, 0, avctx->channels * sizeof(*last_acf_mantissa));
  1213. memset(last_shift_value, 0, avctx->channels * sizeof(*last_shift_value) );
  1214. ff_mlz_flush_dict(ctx->mlz);
  1215. }
  1216. if (avctx->channels * 8 > get_bits_left(gb))
  1217. return AVERROR_INVALIDDATA;
  1218. for (c = 0; c < avctx->channels; ++c) {
  1219. if (use_acf) {
  1220. //acf_flag
  1221. if (get_bits1(gb)) {
  1222. tmp_32 = get_bits(gb, 23);
  1223. last_acf_mantissa[c] = tmp_32;
  1224. } else {
  1225. tmp_32 = last_acf_mantissa[c];
  1226. }
  1227. acf[c] = av_bits2sf_ieee754(tmp_32);
  1228. } else {
  1229. acf[c] = FLOAT_1;
  1230. }
  1231. highest_byte = get_bits(gb, 2);
  1232. partA_flag = get_bits1(gb);
  1233. shift_amp = get_bits1(gb);
  1234. if (shift_amp) {
  1235. shift_value[c] = get_bits(gb, 8);
  1236. last_shift_value[c] = shift_value[c];
  1237. } else {
  1238. shift_value[c] = last_shift_value[c];
  1239. }
  1240. if (partA_flag) {
  1241. if (!get_bits1(gb)) { //uncompressed
  1242. for (i = 0; i < frame_length; ++i) {
  1243. if (ctx->raw_samples[c][i] == 0) {
  1244. ctx->raw_mantissa[c][i] = get_bits_long(gb, 32);
  1245. }
  1246. }
  1247. } else { //compressed
  1248. nchars = 0;
  1249. for (i = 0; i < frame_length; ++i) {
  1250. if (ctx->raw_samples[c][i] == 0) {
  1251. nchars += 4;
  1252. }
  1253. }
  1254. tmp_32 = ff_mlz_decompression(ctx->mlz, gb, nchars, larray);
  1255. if(tmp_32 != nchars) {
  1256. av_log(ctx->avctx, AV_LOG_ERROR, "Error in MLZ decompression (%"PRId32", %d).\n", tmp_32, nchars);
  1257. return AVERROR_INVALIDDATA;
  1258. }
  1259. for (i = 0; i < frame_length; ++i) {
  1260. ctx->raw_mantissa[c][i] = AV_RB32(larray);
  1261. }
  1262. }
  1263. }
  1264. //decode part B
  1265. if (highest_byte) {
  1266. for (i = 0; i < frame_length; ++i) {
  1267. if (ctx->raw_samples[c][i] != 0) {
  1268. //The following logic is taken from Tabel 14.45 and 14.46 from the ISO spec
  1269. if (av_cmp_sf_ieee754(acf[c], FLOAT_1)) {
  1270. nbits[i] = 23 - av_log2(abs(ctx->raw_samples[c][i]));
  1271. } else {
  1272. nbits[i] = 23;
  1273. }
  1274. nbits[i] = FFMIN(nbits[i], highest_byte*8);
  1275. }
  1276. }
  1277. if (!get_bits1(gb)) { //uncompressed
  1278. for (i = 0; i < frame_length; ++i) {
  1279. if (ctx->raw_samples[c][i] != 0) {
  1280. raw_mantissa[c][i] = get_bitsz(gb, nbits[i]);
  1281. }
  1282. }
  1283. } else { //compressed
  1284. nchars = 0;
  1285. for (i = 0; i < frame_length; ++i) {
  1286. if (ctx->raw_samples[c][i]) {
  1287. nchars += (int) nbits[i] / 8;
  1288. if (nbits[i] & 7) {
  1289. ++nchars;
  1290. }
  1291. }
  1292. }
  1293. tmp_32 = ff_mlz_decompression(ctx->mlz, gb, nchars, larray);
  1294. if(tmp_32 != nchars) {
  1295. av_log(ctx->avctx, AV_LOG_ERROR, "Error in MLZ decompression (%"PRId32", %d).\n", tmp_32, nchars);
  1296. return AVERROR_INVALIDDATA;
  1297. }
  1298. j = 0;
  1299. for (i = 0; i < frame_length; ++i) {
  1300. if (ctx->raw_samples[c][i]) {
  1301. if (nbits[i] & 7) {
  1302. nbits_aligned = 8 * ((unsigned int)(nbits[i] / 8) + 1);
  1303. } else {
  1304. nbits_aligned = nbits[i];
  1305. }
  1306. acc = 0;
  1307. for (k = 0; k < nbits_aligned/8; ++k) {
  1308. acc = (acc << 8) + larray[j++];
  1309. }
  1310. acc >>= (nbits_aligned - nbits[i]);
  1311. raw_mantissa[c][i] = acc;
  1312. }
  1313. }
  1314. }
  1315. }
  1316. for (i = 0; i < frame_length; ++i) {
  1317. SoftFloat_IEEE754 pcm_sf = av_int2sf_ieee754(ctx->raw_samples[c][i], 0);
  1318. pcm_sf = av_div_sf_ieee754(pcm_sf, scale);
  1319. if (ctx->raw_samples[c][i] != 0) {
  1320. if (!av_cmp_sf_ieee754(acf[c], FLOAT_1)) {
  1321. pcm_sf = multiply(acf[c], pcm_sf);
  1322. }
  1323. sign = pcm_sf.sign;
  1324. e = pcm_sf.exp;
  1325. mantissa = (pcm_sf.mant | 0x800000) + raw_mantissa[c][i];
  1326. while(mantissa >= 0x1000000) {
  1327. e++;
  1328. mantissa >>= 1;
  1329. }
  1330. if (mantissa) e += (shift_value[c] - 127);
  1331. mantissa &= 0x007fffffUL;
  1332. tmp_32 = (sign << 31) | ((e + EXP_BIAS) << 23) | (mantissa);
  1333. ctx->raw_samples[c][i] = tmp_32;
  1334. } else {
  1335. ctx->raw_samples[c][i] = raw_mantissa[c][i] & 0x007fffffUL;
  1336. }
  1337. }
  1338. align_get_bits(gb);
  1339. }
  1340. return 0;
  1341. }
  1342. /** Read the frame data.
  1343. */
  1344. static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
  1345. {
  1346. ALSSpecificConfig *sconf = &ctx->sconf;
  1347. AVCodecContext *avctx = ctx->avctx;
  1348. GetBitContext *gb = &ctx->gb;
  1349. unsigned int div_blocks[32]; ///< block sizes.
  1350. unsigned int c;
  1351. unsigned int js_blocks[2];
  1352. uint32_t bs_info = 0;
  1353. int ret;
  1354. // skip the size of the ra unit if present in the frame
  1355. if (sconf->ra_flag == RA_FLAG_FRAMES && ra_frame)
  1356. skip_bits_long(gb, 32);
  1357. if (sconf->mc_coding && sconf->joint_stereo) {
  1358. ctx->js_switch = get_bits1(gb);
  1359. align_get_bits(gb);
  1360. }
  1361. if (!sconf->mc_coding || ctx->js_switch) {
  1362. int independent_bs = !sconf->joint_stereo;
  1363. for (c = 0; c < avctx->channels; c++) {
  1364. js_blocks[0] = 0;
  1365. js_blocks[1] = 0;
  1366. get_block_sizes(ctx, div_blocks, &bs_info);
  1367. // if joint_stereo and block_switching is set, independent decoding
  1368. // is signaled via the first bit of bs_info
  1369. if (sconf->joint_stereo && sconf->block_switching)
  1370. if (bs_info >> 31)
  1371. independent_bs = 2;
  1372. // if this is the last channel, it has to be decoded independently
  1373. if (c == avctx->channels - 1 || (c & 1))
  1374. independent_bs = 1;
  1375. if (independent_bs) {
  1376. ret = decode_blocks_ind(ctx, ra_frame, c,
  1377. div_blocks, js_blocks);
  1378. if (ret < 0)
  1379. return ret;
  1380. independent_bs--;
  1381. } else {
  1382. ret = decode_blocks(ctx, ra_frame, c, div_blocks, js_blocks);
  1383. if (ret < 0)
  1384. return ret;
  1385. c++;
  1386. }
  1387. // store carryover raw samples
  1388. memmove(ctx->raw_samples[c] - sconf->max_order,
  1389. ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
  1390. sizeof(*ctx->raw_samples[c]) * sconf->max_order);
  1391. ctx->highest_decoded_channel = c;
  1392. }
  1393. } else { // multi-channel coding
  1394. ALSBlockData bd = { 0 };
  1395. int b, ret;
  1396. int *reverted_channels = ctx->reverted_channels;
  1397. unsigned int offset = 0;
  1398. for (c = 0; c < avctx->channels; c++)
  1399. if (ctx->chan_data[c] < ctx->chan_data_buffer) {
  1400. av_log(ctx->avctx, AV_LOG_ERROR, "Invalid channel data.\n");
  1401. return AVERROR_INVALIDDATA;
  1402. }
  1403. memset(reverted_channels, 0, sizeof(*reverted_channels) * avctx->channels);
  1404. bd.ra_block = ra_frame;
  1405. bd.prev_raw_samples = ctx->prev_raw_samples;
  1406. get_block_sizes(ctx, div_blocks, &bs_info);
  1407. for (b = 0; b < ctx->num_blocks; b++) {
  1408. bd.block_length = div_blocks[b];
  1409. if (bd.block_length <= 0) {
  1410. av_log(ctx->avctx, AV_LOG_WARNING,
  1411. "Invalid block length %u in channel data!\n",
  1412. bd.block_length);
  1413. continue;
  1414. }
  1415. for (c = 0; c < avctx->channels; c++) {
  1416. bd.const_block = ctx->const_block + c;
  1417. bd.shift_lsbs = ctx->shift_lsbs + c;
  1418. bd.opt_order = ctx->opt_order + c;
  1419. bd.store_prev_samples = ctx->store_prev_samples + c;
  1420. bd.use_ltp = ctx->use_ltp + c;
  1421. bd.ltp_lag = ctx->ltp_lag + c;
  1422. bd.ltp_gain = ctx->ltp_gain[c];
  1423. bd.lpc_cof = ctx->lpc_cof[c];
  1424. bd.quant_cof = ctx->quant_cof[c];
  1425. bd.raw_samples = ctx->raw_samples[c] + offset;
  1426. bd.raw_other = NULL;
  1427. if ((ret = read_block(ctx, &bd)) < 0)
  1428. return ret;
  1429. if ((ret = read_channel_data(ctx, ctx->chan_data[c], c)) < 0)
  1430. return ret;
  1431. }
  1432. for (c = 0; c < avctx->channels; c++) {
  1433. ret = revert_channel_correlation(ctx, &bd, ctx->chan_data,
  1434. reverted_channels, offset, c);
  1435. if (ret < 0)
  1436. return ret;
  1437. }
  1438. for (c = 0; c < avctx->channels; c++) {
  1439. bd.const_block = ctx->const_block + c;
  1440. bd.shift_lsbs = ctx->shift_lsbs + c;
  1441. bd.opt_order = ctx->opt_order + c;
  1442. bd.store_prev_samples = ctx->store_prev_samples + c;
  1443. bd.use_ltp = ctx->use_ltp + c;
  1444. bd.ltp_lag = ctx->ltp_lag + c;
  1445. bd.ltp_gain = ctx->ltp_gain[c];
  1446. bd.lpc_cof = ctx->lpc_cof[c];
  1447. bd.quant_cof = ctx->quant_cof[c];
  1448. bd.raw_samples = ctx->raw_samples[c] + offset;
  1449. if ((ret = decode_block(ctx, &bd)) < 0)
  1450. return ret;
  1451. ctx->highest_decoded_channel = FFMAX(ctx->highest_decoded_channel, c);
  1452. }
  1453. memset(reverted_channels, 0, avctx->channels * sizeof(*reverted_channels));
  1454. offset += div_blocks[b];
  1455. bd.ra_block = 0;
  1456. }
  1457. // store carryover raw samples
  1458. for (c = 0; c < avctx->channels; c++)
  1459. memmove(ctx->raw_samples[c] - sconf->max_order,
  1460. ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
  1461. sizeof(*ctx->raw_samples[c]) * sconf->max_order);
  1462. }
  1463. if (sconf->floating) {
  1464. read_diff_float_data(ctx, ra_frame);
  1465. }
  1466. if (get_bits_left(gb) < 0) {
  1467. av_log(ctx->avctx, AV_LOG_ERROR, "Overread %d\n", -get_bits_left(gb));
  1468. return AVERROR_INVALIDDATA;
  1469. }
  1470. return 0;
  1471. }
  1472. /** Decode an ALS frame.
  1473. */
  1474. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
  1475. AVPacket *avpkt)
  1476. {
  1477. ALSDecContext *ctx = avctx->priv_data;
  1478. AVFrame *frame = data;
  1479. ALSSpecificConfig *sconf = &ctx->sconf;
  1480. const uint8_t *buffer = avpkt->data;
  1481. int buffer_size = avpkt->size;
  1482. int invalid_frame, ret;
  1483. unsigned int c, sample, ra_frame, bytes_read, shift;
  1484. if ((ret = init_get_bits8(&ctx->gb, buffer, buffer_size)) < 0)
  1485. return ret;
  1486. // In the case that the distance between random access frames is set to zero
  1487. // (sconf->ra_distance == 0) no frame is treated as a random access frame.
  1488. // For the first frame, if prediction is used, all samples used from the
  1489. // previous frame are assumed to be zero.
  1490. ra_frame = sconf->ra_distance && !(ctx->frame_id % sconf->ra_distance);
  1491. // the last frame to decode might have a different length
  1492. if (sconf->samples != 0xFFFFFFFF)
  1493. ctx->cur_frame_length = FFMIN(sconf->samples - ctx->frame_id * (uint64_t) sconf->frame_length,
  1494. sconf->frame_length);
  1495. else
  1496. ctx->cur_frame_length = sconf->frame_length;
  1497. ctx->highest_decoded_channel = 0;
  1498. // decode the frame data
  1499. if ((invalid_frame = read_frame_data(ctx, ra_frame)) < 0)
  1500. av_log(ctx->avctx, AV_LOG_WARNING,
  1501. "Reading frame data failed. Skipping RA unit.\n");
  1502. if (ctx->highest_decoded_channel == 0)
  1503. return AVERROR_INVALIDDATA;
  1504. ctx->frame_id++;
  1505. /* get output buffer */
  1506. frame->nb_samples = ctx->cur_frame_length;
  1507. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  1508. return ret;
  1509. // transform decoded frame into output format
  1510. #define INTERLEAVE_OUTPUT(bps) \
  1511. { \
  1512. int##bps##_t *dest = (int##bps##_t*)frame->data[0]; \
  1513. int channels = avctx->channels; \
  1514. int32_t *raw_samples = ctx->raw_samples[0]; \
  1515. int raw_step = channels > 1 ? ctx->raw_samples[1] - raw_samples : 1; \
  1516. shift = bps - ctx->avctx->bits_per_raw_sample; \
  1517. if (!ctx->cs_switch) { \
  1518. for (sample = 0; sample < ctx->cur_frame_length; sample++) \
  1519. for (c = 0; c < channels; c++) \
  1520. *dest++ = raw_samples[c*raw_step + sample] * (1U << shift); \
  1521. } else { \
  1522. for (sample = 0; sample < ctx->cur_frame_length; sample++) \
  1523. for (c = 0; c < channels; c++) \
  1524. *dest++ = raw_samples[sconf->chan_pos[c]*raw_step + sample] * (1U << shift);\
  1525. } \
  1526. }
  1527. if (ctx->avctx->bits_per_raw_sample <= 16) {
  1528. INTERLEAVE_OUTPUT(16)
  1529. } else {
  1530. INTERLEAVE_OUTPUT(32)
  1531. }
  1532. // update CRC
  1533. if (sconf->crc_enabled && (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL))) {
  1534. int swap = HAVE_BIGENDIAN != sconf->msb_first;
  1535. if (ctx->avctx->bits_per_raw_sample == 24) {
  1536. int32_t *src = (int32_t *)frame->data[0];
  1537. for (sample = 0;
  1538. sample < ctx->cur_frame_length * avctx->channels;
  1539. sample++) {
  1540. int32_t v;
  1541. if (swap)
  1542. v = av_bswap32(src[sample]);
  1543. else
  1544. v = src[sample];
  1545. if (!HAVE_BIGENDIAN)
  1546. v >>= 8;
  1547. ctx->crc = av_crc(ctx->crc_table, ctx->crc, (uint8_t*)(&v), 3);
  1548. }
  1549. } else {
  1550. uint8_t *crc_source;
  1551. if (swap) {
  1552. if (ctx->avctx->bits_per_raw_sample <= 16) {
  1553. int16_t *src = (int16_t*) frame->data[0];
  1554. int16_t *dest = (int16_t*) ctx->crc_buffer;
  1555. for (sample = 0;
  1556. sample < ctx->cur_frame_length * avctx->channels;
  1557. sample++)
  1558. *dest++ = av_bswap16(src[sample]);
  1559. } else {
  1560. ctx->bdsp.bswap_buf((uint32_t *) ctx->crc_buffer,
  1561. (uint32_t *) frame->data[0],
  1562. ctx->cur_frame_length * avctx->channels);
  1563. }
  1564. crc_source = ctx->crc_buffer;
  1565. } else {
  1566. crc_source = frame->data[0];
  1567. }
  1568. ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source,
  1569. ctx->cur_frame_length * avctx->channels *
  1570. av_get_bytes_per_sample(avctx->sample_fmt));
  1571. }
  1572. // check CRC sums if this is the last frame
  1573. if (ctx->cur_frame_length != sconf->frame_length &&
  1574. ctx->crc_org != ctx->crc) {
  1575. av_log(avctx, AV_LOG_ERROR, "CRC error.\n");
  1576. if (avctx->err_recognition & AV_EF_EXPLODE)
  1577. return AVERROR_INVALIDDATA;
  1578. }
  1579. }
  1580. *got_frame_ptr = 1;
  1581. bytes_read = invalid_frame ? buffer_size :
  1582. (get_bits_count(&ctx->gb) + 7) >> 3;
  1583. return bytes_read;
  1584. }
  1585. /** Uninitialize the ALS decoder.
  1586. */
  1587. static av_cold int decode_end(AVCodecContext *avctx)
  1588. {
  1589. ALSDecContext *ctx = avctx->priv_data;
  1590. int i;
  1591. av_freep(&ctx->sconf.chan_pos);
  1592. ff_bgmc_end(&ctx->bgmc_lut, &ctx->bgmc_lut_status);
  1593. av_freep(&ctx->const_block);
  1594. av_freep(&ctx->shift_lsbs);
  1595. av_freep(&ctx->opt_order);
  1596. av_freep(&ctx->store_prev_samples);
  1597. av_freep(&ctx->use_ltp);
  1598. av_freep(&ctx->ltp_lag);
  1599. av_freep(&ctx->ltp_gain);
  1600. av_freep(&ctx->ltp_gain_buffer);
  1601. av_freep(&ctx->quant_cof);
  1602. av_freep(&ctx->lpc_cof);
  1603. av_freep(&ctx->quant_cof_buffer);
  1604. av_freep(&ctx->lpc_cof_buffer);
  1605. av_freep(&ctx->lpc_cof_reversed_buffer);
  1606. av_freep(&ctx->prev_raw_samples);
  1607. av_freep(&ctx->raw_samples);
  1608. av_freep(&ctx->raw_buffer);
  1609. av_freep(&ctx->chan_data);
  1610. av_freep(&ctx->chan_data_buffer);
  1611. av_freep(&ctx->reverted_channels);
  1612. av_freep(&ctx->crc_buffer);
  1613. if (ctx->mlz) {
  1614. av_freep(&ctx->mlz->dict);
  1615. av_freep(&ctx->mlz);
  1616. }
  1617. av_freep(&ctx->acf);
  1618. av_freep(&ctx->last_acf_mantissa);
  1619. av_freep(&ctx->shift_value);
  1620. av_freep(&ctx->last_shift_value);
  1621. if (ctx->raw_mantissa) {
  1622. for (i = 0; i < avctx->channels; i++) {
  1623. av_freep(&ctx->raw_mantissa[i]);
  1624. }
  1625. av_freep(&ctx->raw_mantissa);
  1626. }
  1627. av_freep(&ctx->larray);
  1628. av_freep(&ctx->nbits);
  1629. return 0;
  1630. }
  1631. /** Initialize the ALS decoder.
  1632. */
  1633. static av_cold int decode_init(AVCodecContext *avctx)
  1634. {
  1635. unsigned int c;
  1636. unsigned int channel_size;
  1637. int num_buffers, ret;
  1638. ALSDecContext *ctx = avctx->priv_data;
  1639. ALSSpecificConfig *sconf = &ctx->sconf;
  1640. ctx->avctx = avctx;
  1641. if (!avctx->extradata) {
  1642. av_log(avctx, AV_LOG_ERROR, "Missing required ALS extradata.\n");
  1643. return AVERROR_INVALIDDATA;
  1644. }
  1645. if ((ret = read_specific_config(ctx)) < 0) {
  1646. av_log(avctx, AV_LOG_ERROR, "Reading ALSSpecificConfig failed.\n");
  1647. goto fail;
  1648. }
  1649. if ((ret = check_specific_config(ctx)) < 0) {
  1650. goto fail;
  1651. }
  1652. if (sconf->bgmc) {
  1653. ret = ff_bgmc_init(avctx, &ctx->bgmc_lut, &ctx->bgmc_lut_status);
  1654. if (ret < 0)
  1655. goto fail;
  1656. }
  1657. if (sconf->floating) {
  1658. avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
  1659. avctx->bits_per_raw_sample = 32;
  1660. } else {
  1661. avctx->sample_fmt = sconf->resolution > 1
  1662. ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S16;
  1663. avctx->bits_per_raw_sample = (sconf->resolution + 1) * 8;
  1664. if (avctx->bits_per_raw_sample > 32) {
  1665. av_log(avctx, AV_LOG_ERROR, "Bits per raw sample %d larger than 32.\n",
  1666. avctx->bits_per_raw_sample);
  1667. ret = AVERROR_INVALIDDATA;
  1668. goto fail;
  1669. }
  1670. }
  1671. // set maximum Rice parameter for progressive decoding based on resolution
  1672. // This is not specified in 14496-3 but actually done by the reference
  1673. // codec RM22 revision 2.
  1674. ctx->s_max = sconf->resolution > 1 ? 31 : 15;
  1675. // set lag value for long-term prediction
  1676. ctx->ltp_lag_length = 8 + (avctx->sample_rate >= 96000) +
  1677. (avctx->sample_rate >= 192000);
  1678. // allocate quantized parcor coefficient buffer
  1679. num_buffers = sconf->mc_coding ? avctx->channels : 1;
  1680. if (num_buffers * (uint64_t)num_buffers > INT_MAX) // protect chan_data_buffer allocation
  1681. return AVERROR_INVALIDDATA;
  1682. ctx->quant_cof = av_malloc_array(num_buffers, sizeof(*ctx->quant_cof));
  1683. ctx->lpc_cof = av_malloc_array(num_buffers, sizeof(*ctx->lpc_cof));
  1684. ctx->quant_cof_buffer = av_malloc_array(num_buffers * sconf->max_order,
  1685. sizeof(*ctx->quant_cof_buffer));
  1686. ctx->lpc_cof_buffer = av_malloc_array(num_buffers * sconf->max_order,
  1687. sizeof(*ctx->lpc_cof_buffer));
  1688. ctx->lpc_cof_reversed_buffer = av_malloc_array(sconf->max_order,
  1689. sizeof(*ctx->lpc_cof_buffer));
  1690. if (!ctx->quant_cof || !ctx->lpc_cof ||
  1691. !ctx->quant_cof_buffer || !ctx->lpc_cof_buffer ||
  1692. !ctx->lpc_cof_reversed_buffer) {
  1693. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1694. ret = AVERROR(ENOMEM);
  1695. goto fail;
  1696. }
  1697. // assign quantized parcor coefficient buffers
  1698. for (c = 0; c < num_buffers; c++) {
  1699. ctx->quant_cof[c] = ctx->quant_cof_buffer + c * sconf->max_order;
  1700. ctx->lpc_cof[c] = ctx->lpc_cof_buffer + c * sconf->max_order;
  1701. }
  1702. // allocate and assign lag and gain data buffer for ltp mode
  1703. ctx->const_block = av_malloc_array(num_buffers, sizeof(*ctx->const_block));
  1704. ctx->shift_lsbs = av_malloc_array(num_buffers, sizeof(*ctx->shift_lsbs));
  1705. ctx->opt_order = av_malloc_array(num_buffers, sizeof(*ctx->opt_order));
  1706. ctx->store_prev_samples = av_malloc_array(num_buffers, sizeof(*ctx->store_prev_samples));
  1707. ctx->use_ltp = av_mallocz_array(num_buffers, sizeof(*ctx->use_ltp));
  1708. ctx->ltp_lag = av_malloc_array(num_buffers, sizeof(*ctx->ltp_lag));
  1709. ctx->ltp_gain = av_malloc_array(num_buffers, sizeof(*ctx->ltp_gain));
  1710. ctx->ltp_gain_buffer = av_malloc_array(num_buffers * 5, sizeof(*ctx->ltp_gain_buffer));
  1711. if (!ctx->const_block || !ctx->shift_lsbs ||
  1712. !ctx->opt_order || !ctx->store_prev_samples ||
  1713. !ctx->use_ltp || !ctx->ltp_lag ||
  1714. !ctx->ltp_gain || !ctx->ltp_gain_buffer) {
  1715. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1716. ret = AVERROR(ENOMEM);
  1717. goto fail;
  1718. }
  1719. for (c = 0; c < num_buffers; c++)
  1720. ctx->ltp_gain[c] = ctx->ltp_gain_buffer + c * 5;
  1721. // allocate and assign channel data buffer for mcc mode
  1722. if (sconf->mc_coding) {
  1723. ctx->chan_data_buffer = av_mallocz_array(num_buffers * num_buffers,
  1724. sizeof(*ctx->chan_data_buffer));
  1725. ctx->chan_data = av_mallocz_array(num_buffers,
  1726. sizeof(*ctx->chan_data));
  1727. ctx->reverted_channels = av_malloc_array(num_buffers,
  1728. sizeof(*ctx->reverted_channels));
  1729. if (!ctx->chan_data_buffer || !ctx->chan_data || !ctx->reverted_channels) {
  1730. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1731. ret = AVERROR(ENOMEM);
  1732. goto fail;
  1733. }
  1734. for (c = 0; c < num_buffers; c++)
  1735. ctx->chan_data[c] = ctx->chan_data_buffer + c * num_buffers;
  1736. } else {
  1737. ctx->chan_data = NULL;
  1738. ctx->chan_data_buffer = NULL;
  1739. ctx->reverted_channels = NULL;
  1740. }
  1741. channel_size = sconf->frame_length + sconf->max_order;
  1742. ctx->prev_raw_samples = av_malloc_array(sconf->max_order, sizeof(*ctx->prev_raw_samples));
  1743. ctx->raw_buffer = av_mallocz_array(avctx->channels * channel_size, sizeof(*ctx->raw_buffer));
  1744. ctx->raw_samples = av_malloc_array(avctx->channels, sizeof(*ctx->raw_samples));
  1745. if (sconf->floating) {
  1746. ctx->acf = av_malloc_array(avctx->channels, sizeof(*ctx->acf));
  1747. ctx->shift_value = av_malloc_array(avctx->channels, sizeof(*ctx->shift_value));
  1748. ctx->last_shift_value = av_malloc_array(avctx->channels, sizeof(*ctx->last_shift_value));
  1749. ctx->last_acf_mantissa = av_malloc_array(avctx->channels, sizeof(*ctx->last_acf_mantissa));
  1750. ctx->raw_mantissa = av_mallocz_array(avctx->channels, sizeof(*ctx->raw_mantissa));
  1751. ctx->larray = av_malloc_array(ctx->cur_frame_length * 4, sizeof(*ctx->larray));
  1752. ctx->nbits = av_malloc_array(ctx->cur_frame_length, sizeof(*ctx->nbits));
  1753. ctx->mlz = av_mallocz(sizeof(*ctx->mlz));
  1754. if (!ctx->mlz || !ctx->acf || !ctx->shift_value || !ctx->last_shift_value
  1755. || !ctx->last_acf_mantissa || !ctx->raw_mantissa) {
  1756. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1757. ret = AVERROR(ENOMEM);
  1758. goto fail;
  1759. }
  1760. ff_mlz_init_dict(avctx, ctx->mlz);
  1761. ff_mlz_flush_dict(ctx->mlz);
  1762. for (c = 0; c < avctx->channels; ++c) {
  1763. ctx->raw_mantissa[c] = av_mallocz_array(ctx->cur_frame_length, sizeof(**ctx->raw_mantissa));
  1764. }
  1765. }
  1766. // allocate previous raw sample buffer
  1767. if (!ctx->prev_raw_samples || !ctx->raw_buffer|| !ctx->raw_samples) {
  1768. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1769. ret = AVERROR(ENOMEM);
  1770. goto fail;
  1771. }
  1772. // assign raw samples buffers
  1773. ctx->raw_samples[0] = ctx->raw_buffer + sconf->max_order;
  1774. for (c = 1; c < avctx->channels; c++)
  1775. ctx->raw_samples[c] = ctx->raw_samples[c - 1] + channel_size;
  1776. // allocate crc buffer
  1777. if (HAVE_BIGENDIAN != sconf->msb_first && sconf->crc_enabled &&
  1778. (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL))) {
  1779. ctx->crc_buffer = av_malloc_array(ctx->cur_frame_length *
  1780. avctx->channels *
  1781. av_get_bytes_per_sample(avctx->sample_fmt),
  1782. sizeof(*ctx->crc_buffer));
  1783. if (!ctx->crc_buffer) {
  1784. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1785. ret = AVERROR(ENOMEM);
  1786. goto fail;
  1787. }
  1788. }
  1789. ff_bswapdsp_init(&ctx->bdsp);
  1790. return 0;
  1791. fail:
  1792. return ret;
  1793. }
  1794. /** Flush (reset) the frame ID after seeking.
  1795. */
  1796. static av_cold void flush(AVCodecContext *avctx)
  1797. {
  1798. ALSDecContext *ctx = avctx->priv_data;
  1799. ctx->frame_id = 0;
  1800. }
  1801. AVCodec ff_als_decoder = {
  1802. .name = "als",
  1803. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
  1804. .type = AVMEDIA_TYPE_AUDIO,
  1805. .id = AV_CODEC_ID_MP4ALS,
  1806. .priv_data_size = sizeof(ALSDecContext),
  1807. .init = decode_init,
  1808. .close = decode_end,
  1809. .decode = decode_frame,
  1810. .flush = flush,
  1811. .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
  1812. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  1813. };