You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1791 lines
63KB

  1. /*
  2. * MPEG-4 ALS decoder
  3. * Copyright (c) 2009 Thilo Borgmann <thilo.borgmann _at_ googlemail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG-4 ALS decoder
  24. * @author Thilo Borgmann <thilo.borgmann _at_ googlemail.com>
  25. */
  26. //#define DEBUG
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "unary.h"
  30. #include "mpeg4audio.h"
  31. #include "bytestream.h"
  32. #include "bgmc.h"
  33. #include "dsputil.h"
  34. #include "internal.h"
  35. #include "libavutil/samplefmt.h"
  36. #include "libavutil/crc.h"
  37. #include <stdint.h>
  38. /** Rice parameters and corresponding index offsets for decoding the
  39. * indices of scaled PARCOR values. The table chosen is set globally
  40. * by the encoder and stored in ALSSpecificConfig.
  41. */
  42. static const int8_t parcor_rice_table[3][20][2] = {
  43. { {-52, 4}, {-29, 5}, {-31, 4}, { 19, 4}, {-16, 4},
  44. { 12, 3}, { -7, 3}, { 9, 3}, { -5, 3}, { 6, 3},
  45. { -4, 3}, { 3, 3}, { -3, 2}, { 3, 2}, { -2, 2},
  46. { 3, 2}, { -1, 2}, { 2, 2}, { -1, 2}, { 2, 2} },
  47. { {-58, 3}, {-42, 4}, {-46, 4}, { 37, 5}, {-36, 4},
  48. { 29, 4}, {-29, 4}, { 25, 4}, {-23, 4}, { 20, 4},
  49. {-17, 4}, { 16, 4}, {-12, 4}, { 12, 3}, {-10, 4},
  50. { 7, 3}, { -4, 4}, { 3, 3}, { -1, 3}, { 1, 3} },
  51. { {-59, 3}, {-45, 5}, {-50, 4}, { 38, 4}, {-39, 4},
  52. { 32, 4}, {-30, 4}, { 25, 3}, {-23, 3}, { 20, 3},
  53. {-20, 3}, { 16, 3}, {-13, 3}, { 10, 3}, { -7, 3},
  54. { 3, 3}, { 0, 3}, { -1, 3}, { 2, 3}, { -1, 2} }
  55. };
  56. /** Scaled PARCOR values used for the first two PARCOR coefficients.
  57. * To be indexed by the Rice coded indices.
  58. * Generated by: parcor_scaled_values[i] = 32 + ((i * (i+1)) << 7) - (1 << 20)
  59. * Actual values are divided by 32 in order to be stored in 16 bits.
  60. */
  61. static const int16_t parcor_scaled_values[] = {
  62. -1048544 / 32, -1048288 / 32, -1047776 / 32, -1047008 / 32,
  63. -1045984 / 32, -1044704 / 32, -1043168 / 32, -1041376 / 32,
  64. -1039328 / 32, -1037024 / 32, -1034464 / 32, -1031648 / 32,
  65. -1028576 / 32, -1025248 / 32, -1021664 / 32, -1017824 / 32,
  66. -1013728 / 32, -1009376 / 32, -1004768 / 32, -999904 / 32,
  67. -994784 / 32, -989408 / 32, -983776 / 32, -977888 / 32,
  68. -971744 / 32, -965344 / 32, -958688 / 32, -951776 / 32,
  69. -944608 / 32, -937184 / 32, -929504 / 32, -921568 / 32,
  70. -913376 / 32, -904928 / 32, -896224 / 32, -887264 / 32,
  71. -878048 / 32, -868576 / 32, -858848 / 32, -848864 / 32,
  72. -838624 / 32, -828128 / 32, -817376 / 32, -806368 / 32,
  73. -795104 / 32, -783584 / 32, -771808 / 32, -759776 / 32,
  74. -747488 / 32, -734944 / 32, -722144 / 32, -709088 / 32,
  75. -695776 / 32, -682208 / 32, -668384 / 32, -654304 / 32,
  76. -639968 / 32, -625376 / 32, -610528 / 32, -595424 / 32,
  77. -580064 / 32, -564448 / 32, -548576 / 32, -532448 / 32,
  78. -516064 / 32, -499424 / 32, -482528 / 32, -465376 / 32,
  79. -447968 / 32, -430304 / 32, -412384 / 32, -394208 / 32,
  80. -375776 / 32, -357088 / 32, -338144 / 32, -318944 / 32,
  81. -299488 / 32, -279776 / 32, -259808 / 32, -239584 / 32,
  82. -219104 / 32, -198368 / 32, -177376 / 32, -156128 / 32,
  83. -134624 / 32, -112864 / 32, -90848 / 32, -68576 / 32,
  84. -46048 / 32, -23264 / 32, -224 / 32, 23072 / 32,
  85. 46624 / 32, 70432 / 32, 94496 / 32, 118816 / 32,
  86. 143392 / 32, 168224 / 32, 193312 / 32, 218656 / 32,
  87. 244256 / 32, 270112 / 32, 296224 / 32, 322592 / 32,
  88. 349216 / 32, 376096 / 32, 403232 / 32, 430624 / 32,
  89. 458272 / 32, 486176 / 32, 514336 / 32, 542752 / 32,
  90. 571424 / 32, 600352 / 32, 629536 / 32, 658976 / 32,
  91. 688672 / 32, 718624 / 32, 748832 / 32, 779296 / 32,
  92. 810016 / 32, 840992 / 32, 872224 / 32, 903712 / 32,
  93. 935456 / 32, 967456 / 32, 999712 / 32, 1032224 / 32
  94. };
  95. /** Gain values of p(0) for long-term prediction.
  96. * To be indexed by the Rice coded indices.
  97. */
  98. static const uint8_t ltp_gain_values [4][4] = {
  99. { 0, 8, 16, 24},
  100. {32, 40, 48, 56},
  101. {64, 70, 76, 82},
  102. {88, 92, 96, 100}
  103. };
  104. /** Inter-channel weighting factors for multi-channel correlation.
  105. * To be indexed by the Rice coded indices.
  106. */
  107. static const int16_t mcc_weightings[] = {
  108. 204, 192, 179, 166, 153, 140, 128, 115,
  109. 102, 89, 76, 64, 51, 38, 25, 12,
  110. 0, -12, -25, -38, -51, -64, -76, -89,
  111. -102, -115, -128, -140, -153, -166, -179, -192
  112. };
  113. /** Tail codes used in arithmetic coding using block Gilbert-Moore codes.
  114. */
  115. static const uint8_t tail_code[16][6] = {
  116. { 74, 44, 25, 13, 7, 3},
  117. { 68, 42, 24, 13, 7, 3},
  118. { 58, 39, 23, 13, 7, 3},
  119. {126, 70, 37, 19, 10, 5},
  120. {132, 70, 37, 20, 10, 5},
  121. {124, 70, 38, 20, 10, 5},
  122. {120, 69, 37, 20, 11, 5},
  123. {116, 67, 37, 20, 11, 5},
  124. {108, 66, 36, 20, 10, 5},
  125. {102, 62, 36, 20, 10, 5},
  126. { 88, 58, 34, 19, 10, 5},
  127. {162, 89, 49, 25, 13, 7},
  128. {156, 87, 49, 26, 14, 7},
  129. {150, 86, 47, 26, 14, 7},
  130. {142, 84, 47, 26, 14, 7},
  131. {131, 79, 46, 26, 14, 7}
  132. };
  133. enum RA_Flag {
  134. RA_FLAG_NONE,
  135. RA_FLAG_FRAMES,
  136. RA_FLAG_HEADER
  137. };
  138. typedef struct {
  139. uint32_t samples; ///< number of samples, 0xFFFFFFFF if unknown
  140. int resolution; ///< 000 = 8-bit; 001 = 16-bit; 010 = 24-bit; 011 = 32-bit
  141. int floating; ///< 1 = IEEE 32-bit floating-point, 0 = integer
  142. int msb_first; ///< 1 = original CRC calculated on big-endian system, 0 = little-endian
  143. int frame_length; ///< frame length for each frame (last frame may differ)
  144. int ra_distance; ///< distance between RA frames (in frames, 0...255)
  145. enum RA_Flag ra_flag; ///< indicates where the size of ra units is stored
  146. int adapt_order; ///< adaptive order: 1 = on, 0 = off
  147. int coef_table; ///< table index of Rice code parameters
  148. int long_term_prediction; ///< long term prediction (LTP): 1 = on, 0 = off
  149. int max_order; ///< maximum prediction order (0..1023)
  150. int block_switching; ///< number of block switching levels
  151. int bgmc; ///< "Block Gilbert-Moore Code": 1 = on, 0 = off (Rice coding only)
  152. int sb_part; ///< sub-block partition
  153. int joint_stereo; ///< joint stereo: 1 = on, 0 = off
  154. int mc_coding; ///< extended inter-channel coding (multi channel coding): 1 = on, 0 = off
  155. int chan_config; ///< indicates that a chan_config_info field is present
  156. int chan_sort; ///< channel rearrangement: 1 = on, 0 = off
  157. int rlslms; ///< use "Recursive Least Square-Least Mean Square" predictor: 1 = on, 0 = off
  158. int chan_config_info; ///< mapping of channels to loudspeaker locations. Unused until setting channel configuration is implemented.
  159. int *chan_pos; ///< original channel positions
  160. int crc_enabled; ///< enable Cyclic Redundancy Checksum
  161. } ALSSpecificConfig;
  162. typedef struct {
  163. int stop_flag;
  164. int master_channel;
  165. int time_diff_flag;
  166. int time_diff_sign;
  167. int time_diff_index;
  168. int weighting[6];
  169. } ALSChannelData;
  170. typedef struct {
  171. AVCodecContext *avctx;
  172. AVFrame frame;
  173. ALSSpecificConfig sconf;
  174. GetBitContext gb;
  175. DSPContext dsp;
  176. const AVCRC *crc_table;
  177. uint32_t crc_org; ///< CRC value of the original input data
  178. uint32_t crc; ///< CRC value calculated from decoded data
  179. unsigned int cur_frame_length; ///< length of the current frame to decode
  180. unsigned int frame_id; ///< the frame ID / number of the current frame
  181. unsigned int js_switch; ///< if true, joint-stereo decoding is enforced
  182. unsigned int cs_switch; ///< if true, channel rearrangement is done
  183. unsigned int num_blocks; ///< number of blocks used in the current frame
  184. unsigned int s_max; ///< maximum Rice parameter allowed in entropy coding
  185. uint8_t *bgmc_lut; ///< pointer at lookup tables used for BGMC
  186. int *bgmc_lut_status; ///< pointer at lookup table status flags used for BGMC
  187. int ltp_lag_length; ///< number of bits used for ltp lag value
  188. int *const_block; ///< contains const_block flags for all channels
  189. unsigned int *shift_lsbs; ///< contains shift_lsbs flags for all channels
  190. unsigned int *opt_order; ///< contains opt_order flags for all channels
  191. int *store_prev_samples; ///< contains store_prev_samples flags for all channels
  192. int *use_ltp; ///< contains use_ltp flags for all channels
  193. int *ltp_lag; ///< contains ltp lag values for all channels
  194. int **ltp_gain; ///< gain values for ltp 5-tap filter for a channel
  195. int *ltp_gain_buffer; ///< contains all gain values for ltp 5-tap filter
  196. int32_t **quant_cof; ///< quantized parcor coefficients for a channel
  197. int32_t *quant_cof_buffer; ///< contains all quantized parcor coefficients
  198. int32_t **lpc_cof; ///< coefficients of the direct form prediction filter for a channel
  199. int32_t *lpc_cof_buffer; ///< contains all coefficients of the direct form prediction filter
  200. int32_t *lpc_cof_reversed_buffer; ///< temporary buffer to set up a reversed versio of lpc_cof_buffer
  201. ALSChannelData **chan_data; ///< channel data for multi-channel correlation
  202. ALSChannelData *chan_data_buffer; ///< contains channel data for all channels
  203. int *reverted_channels; ///< stores a flag for each reverted channel
  204. int32_t *prev_raw_samples; ///< contains unshifted raw samples from the previous block
  205. int32_t **raw_samples; ///< decoded raw samples for each channel
  206. int32_t *raw_buffer; ///< contains all decoded raw samples including carryover samples
  207. uint8_t *crc_buffer; ///< buffer of byte order corrected samples used for CRC check
  208. } ALSDecContext;
  209. typedef struct {
  210. unsigned int block_length; ///< number of samples within the block
  211. unsigned int ra_block; ///< if true, this is a random access block
  212. int *const_block; ///< if true, this is a constant value block
  213. int js_blocks; ///< true if this block contains a difference signal
  214. unsigned int *shift_lsbs; ///< shift of values for this block
  215. unsigned int *opt_order; ///< prediction order of this block
  216. int *store_prev_samples;///< if true, carryover samples have to be stored
  217. int *use_ltp; ///< if true, long-term prediction is used
  218. int *ltp_lag; ///< lag value for long-term prediction
  219. int *ltp_gain; ///< gain values for ltp 5-tap filter
  220. int32_t *quant_cof; ///< quantized parcor coefficients
  221. int32_t *lpc_cof; ///< coefficients of the direct form prediction
  222. int32_t *raw_samples; ///< decoded raw samples / residuals for this block
  223. int32_t *prev_raw_samples; ///< contains unshifted raw samples from the previous block
  224. int32_t *raw_other; ///< decoded raw samples of the other channel of a channel pair
  225. } ALSBlockData;
  226. static av_cold void dprint_specific_config(ALSDecContext *ctx)
  227. {
  228. #ifdef DEBUG
  229. AVCodecContext *avctx = ctx->avctx;
  230. ALSSpecificConfig *sconf = &ctx->sconf;
  231. av_dlog(avctx, "resolution = %i\n", sconf->resolution);
  232. av_dlog(avctx, "floating = %i\n", sconf->floating);
  233. av_dlog(avctx, "frame_length = %i\n", sconf->frame_length);
  234. av_dlog(avctx, "ra_distance = %i\n", sconf->ra_distance);
  235. av_dlog(avctx, "ra_flag = %i\n", sconf->ra_flag);
  236. av_dlog(avctx, "adapt_order = %i\n", sconf->adapt_order);
  237. av_dlog(avctx, "coef_table = %i\n", sconf->coef_table);
  238. av_dlog(avctx, "long_term_prediction = %i\n", sconf->long_term_prediction);
  239. av_dlog(avctx, "max_order = %i\n", sconf->max_order);
  240. av_dlog(avctx, "block_switching = %i\n", sconf->block_switching);
  241. av_dlog(avctx, "bgmc = %i\n", sconf->bgmc);
  242. av_dlog(avctx, "sb_part = %i\n", sconf->sb_part);
  243. av_dlog(avctx, "joint_stereo = %i\n", sconf->joint_stereo);
  244. av_dlog(avctx, "mc_coding = %i\n", sconf->mc_coding);
  245. av_dlog(avctx, "chan_config = %i\n", sconf->chan_config);
  246. av_dlog(avctx, "chan_sort = %i\n", sconf->chan_sort);
  247. av_dlog(avctx, "RLSLMS = %i\n", sconf->rlslms);
  248. av_dlog(avctx, "chan_config_info = %i\n", sconf->chan_config_info);
  249. #endif
  250. }
  251. /** Read an ALSSpecificConfig from a buffer into the output struct.
  252. */
  253. static av_cold int read_specific_config(ALSDecContext *ctx)
  254. {
  255. GetBitContext gb;
  256. uint64_t ht_size;
  257. int i, config_offset;
  258. MPEG4AudioConfig m4ac;
  259. ALSSpecificConfig *sconf = &ctx->sconf;
  260. AVCodecContext *avctx = ctx->avctx;
  261. uint32_t als_id, header_size, trailer_size;
  262. init_get_bits(&gb, avctx->extradata, avctx->extradata_size * 8);
  263. config_offset = avpriv_mpeg4audio_get_config(&m4ac, avctx->extradata,
  264. avctx->extradata_size * 8, 1);
  265. if (config_offset < 0)
  266. return -1;
  267. skip_bits_long(&gb, config_offset);
  268. if (get_bits_left(&gb) < (30 << 3))
  269. return -1;
  270. // read the fixed items
  271. als_id = get_bits_long(&gb, 32);
  272. avctx->sample_rate = m4ac.sample_rate;
  273. skip_bits_long(&gb, 32); // sample rate already known
  274. sconf->samples = get_bits_long(&gb, 32);
  275. avctx->channels = m4ac.channels;
  276. skip_bits(&gb, 16); // number of channels already known
  277. skip_bits(&gb, 3); // skip file_type
  278. sconf->resolution = get_bits(&gb, 3);
  279. sconf->floating = get_bits1(&gb);
  280. sconf->msb_first = get_bits1(&gb);
  281. sconf->frame_length = get_bits(&gb, 16) + 1;
  282. sconf->ra_distance = get_bits(&gb, 8);
  283. sconf->ra_flag = get_bits(&gb, 2);
  284. sconf->adapt_order = get_bits1(&gb);
  285. sconf->coef_table = get_bits(&gb, 2);
  286. sconf->long_term_prediction = get_bits1(&gb);
  287. sconf->max_order = get_bits(&gb, 10);
  288. sconf->block_switching = get_bits(&gb, 2);
  289. sconf->bgmc = get_bits1(&gb);
  290. sconf->sb_part = get_bits1(&gb);
  291. sconf->joint_stereo = get_bits1(&gb);
  292. sconf->mc_coding = get_bits1(&gb);
  293. sconf->chan_config = get_bits1(&gb);
  294. sconf->chan_sort = get_bits1(&gb);
  295. sconf->crc_enabled = get_bits1(&gb);
  296. sconf->rlslms = get_bits1(&gb);
  297. skip_bits(&gb, 5); // skip 5 reserved bits
  298. skip_bits1(&gb); // skip aux_data_enabled
  299. // check for ALSSpecificConfig struct
  300. if (als_id != MKBETAG('A','L','S','\0'))
  301. return -1;
  302. ctx->cur_frame_length = sconf->frame_length;
  303. // read channel config
  304. if (sconf->chan_config)
  305. sconf->chan_config_info = get_bits(&gb, 16);
  306. // TODO: use this to set avctx->channel_layout
  307. // read channel sorting
  308. if (sconf->chan_sort && avctx->channels > 1) {
  309. int chan_pos_bits = av_ceil_log2(avctx->channels);
  310. int bits_needed = avctx->channels * chan_pos_bits + 7;
  311. if (get_bits_left(&gb) < bits_needed)
  312. return -1;
  313. if (!(sconf->chan_pos = av_malloc(avctx->channels * sizeof(*sconf->chan_pos))))
  314. return AVERROR(ENOMEM);
  315. ctx->cs_switch = 1;
  316. for (i = 0; i < avctx->channels; i++) {
  317. int idx;
  318. idx = get_bits(&gb, chan_pos_bits);
  319. if (idx >= avctx->channels) {
  320. av_log(avctx, AV_LOG_WARNING, "Invalid channel reordering.\n");
  321. ctx->cs_switch = 0;
  322. break;
  323. }
  324. sconf->chan_pos[idx] = i;
  325. }
  326. align_get_bits(&gb);
  327. }
  328. // read fixed header and trailer sizes,
  329. // if size = 0xFFFFFFFF then there is no data field!
  330. if (get_bits_left(&gb) < 64)
  331. return -1;
  332. header_size = get_bits_long(&gb, 32);
  333. trailer_size = get_bits_long(&gb, 32);
  334. if (header_size == 0xFFFFFFFF)
  335. header_size = 0;
  336. if (trailer_size == 0xFFFFFFFF)
  337. trailer_size = 0;
  338. ht_size = ((int64_t)(header_size) + (int64_t)(trailer_size)) << 3;
  339. // skip the header and trailer data
  340. if (get_bits_left(&gb) < ht_size)
  341. return -1;
  342. if (ht_size > INT32_MAX)
  343. return -1;
  344. skip_bits_long(&gb, ht_size);
  345. // initialize CRC calculation
  346. if (sconf->crc_enabled) {
  347. if (get_bits_left(&gb) < 32)
  348. return -1;
  349. if (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL)) {
  350. ctx->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
  351. ctx->crc = 0xFFFFFFFF;
  352. ctx->crc_org = ~get_bits_long(&gb, 32);
  353. } else
  354. skip_bits_long(&gb, 32);
  355. }
  356. // no need to read the rest of ALSSpecificConfig (ra_unit_size & aux data)
  357. dprint_specific_config(ctx);
  358. return 0;
  359. }
  360. /** Check the ALSSpecificConfig for unsupported features.
  361. */
  362. static int check_specific_config(ALSDecContext *ctx)
  363. {
  364. ALSSpecificConfig *sconf = &ctx->sconf;
  365. int error = 0;
  366. // report unsupported feature and set error value
  367. #define MISSING_ERR(cond, str, errval) \
  368. { \
  369. if (cond) { \
  370. av_log_missing_feature(ctx->avctx, str, 0); \
  371. error = errval; \
  372. } \
  373. }
  374. MISSING_ERR(sconf->floating, "Floating point decoding", AVERROR_PATCHWELCOME);
  375. MISSING_ERR(sconf->rlslms, "Adaptive RLS-LMS prediction", AVERROR_PATCHWELCOME);
  376. return error;
  377. }
  378. /** Parse the bs_info field to extract the block partitioning used in
  379. * block switching mode, refer to ISO/IEC 14496-3, section 11.6.2.
  380. */
  381. static void parse_bs_info(const uint32_t bs_info, unsigned int n,
  382. unsigned int div, unsigned int **div_blocks,
  383. unsigned int *num_blocks)
  384. {
  385. if (n < 31 && ((bs_info << n) & 0x40000000)) {
  386. // if the level is valid and the investigated bit n is set
  387. // then recursively check both children at bits (2n+1) and (2n+2)
  388. n *= 2;
  389. div += 1;
  390. parse_bs_info(bs_info, n + 1, div, div_blocks, num_blocks);
  391. parse_bs_info(bs_info, n + 2, div, div_blocks, num_blocks);
  392. } else {
  393. // else the bit is not set or the last level has been reached
  394. // (bit implicitly not set)
  395. **div_blocks = div;
  396. (*div_blocks)++;
  397. (*num_blocks)++;
  398. }
  399. }
  400. /** Read and decode a Rice codeword.
  401. */
  402. static int32_t decode_rice(GetBitContext *gb, unsigned int k)
  403. {
  404. int max = get_bits_left(gb) - k;
  405. int q = get_unary(gb, 0, max);
  406. int r = k ? get_bits1(gb) : !(q & 1);
  407. if (k > 1) {
  408. q <<= (k - 1);
  409. q += get_bits_long(gb, k - 1);
  410. } else if (!k) {
  411. q >>= 1;
  412. }
  413. return r ? q : ~q;
  414. }
  415. /** Convert PARCOR coefficient k to direct filter coefficient.
  416. */
  417. static void parcor_to_lpc(unsigned int k, const int32_t *par, int32_t *cof)
  418. {
  419. int i, j;
  420. for (i = 0, j = k - 1; i < j; i++, j--) {
  421. int tmp1 = ((MUL64(par[k], cof[j]) + (1 << 19)) >> 20);
  422. cof[j] += ((MUL64(par[k], cof[i]) + (1 << 19)) >> 20);
  423. cof[i] += tmp1;
  424. }
  425. if (i == j)
  426. cof[i] += ((MUL64(par[k], cof[j]) + (1 << 19)) >> 20);
  427. cof[k] = par[k];
  428. }
  429. /** Read block switching field if necessary and set actual block sizes.
  430. * Also assure that the block sizes of the last frame correspond to the
  431. * actual number of samples.
  432. */
  433. static void get_block_sizes(ALSDecContext *ctx, unsigned int *div_blocks,
  434. uint32_t *bs_info)
  435. {
  436. ALSSpecificConfig *sconf = &ctx->sconf;
  437. GetBitContext *gb = &ctx->gb;
  438. unsigned int *ptr_div_blocks = div_blocks;
  439. unsigned int b;
  440. if (sconf->block_switching) {
  441. unsigned int bs_info_len = 1 << (sconf->block_switching + 2);
  442. *bs_info = get_bits_long(gb, bs_info_len);
  443. *bs_info <<= (32 - bs_info_len);
  444. }
  445. ctx->num_blocks = 0;
  446. parse_bs_info(*bs_info, 0, 0, &ptr_div_blocks, &ctx->num_blocks);
  447. // The last frame may have an overdetermined block structure given in
  448. // the bitstream. In that case the defined block structure would need
  449. // more samples than available to be consistent.
  450. // The block structure is actually used but the block sizes are adapted
  451. // to fit the actual number of available samples.
  452. // Example: 5 samples, 2nd level block sizes: 2 2 2 2.
  453. // This results in the actual block sizes: 2 2 1 0.
  454. // This is not specified in 14496-3 but actually done by the reference
  455. // codec RM22 revision 2.
  456. // This appears to happen in case of an odd number of samples in the last
  457. // frame which is actually not allowed by the block length switching part
  458. // of 14496-3.
  459. // The ALS conformance files feature an odd number of samples in the last
  460. // frame.
  461. for (b = 0; b < ctx->num_blocks; b++)
  462. div_blocks[b] = ctx->sconf.frame_length >> div_blocks[b];
  463. if (ctx->cur_frame_length != ctx->sconf.frame_length) {
  464. unsigned int remaining = ctx->cur_frame_length;
  465. for (b = 0; b < ctx->num_blocks; b++) {
  466. if (remaining <= div_blocks[b]) {
  467. div_blocks[b] = remaining;
  468. ctx->num_blocks = b + 1;
  469. break;
  470. }
  471. remaining -= div_blocks[b];
  472. }
  473. }
  474. }
  475. /** Read the block data for a constant block
  476. */
  477. static int read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  478. {
  479. ALSSpecificConfig *sconf = &ctx->sconf;
  480. AVCodecContext *avctx = ctx->avctx;
  481. GetBitContext *gb = &ctx->gb;
  482. if (bd->block_length <= 0)
  483. return AVERROR_INVALIDDATA;
  484. *bd->raw_samples = 0;
  485. *bd->const_block = get_bits1(gb); // 1 = constant value, 0 = zero block (silence)
  486. bd->js_blocks = get_bits1(gb);
  487. // skip 5 reserved bits
  488. skip_bits(gb, 5);
  489. if (*bd->const_block) {
  490. unsigned int const_val_bits = sconf->floating ? 24 : avctx->bits_per_raw_sample;
  491. *bd->raw_samples = get_sbits_long(gb, const_val_bits);
  492. }
  493. // ensure constant block decoding by reusing this field
  494. *bd->const_block = 1;
  495. return 0;
  496. }
  497. /** Decode the block data for a constant block
  498. */
  499. static void decode_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  500. {
  501. int smp = bd->block_length - 1;
  502. int32_t val = *bd->raw_samples;
  503. int32_t *dst = bd->raw_samples + 1;
  504. // write raw samples into buffer
  505. for (; smp; smp--)
  506. *dst++ = val;
  507. }
  508. /** Read the block data for a non-constant block
  509. */
  510. static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  511. {
  512. ALSSpecificConfig *sconf = &ctx->sconf;
  513. AVCodecContext *avctx = ctx->avctx;
  514. GetBitContext *gb = &ctx->gb;
  515. unsigned int k;
  516. unsigned int s[8];
  517. unsigned int sx[8];
  518. unsigned int sub_blocks, log2_sub_blocks, sb_length;
  519. unsigned int start = 0;
  520. unsigned int opt_order;
  521. int sb;
  522. int32_t *quant_cof = bd->quant_cof;
  523. int32_t *current_res;
  524. // ensure variable block decoding by reusing this field
  525. *bd->const_block = 0;
  526. *bd->opt_order = 1;
  527. bd->js_blocks = get_bits1(gb);
  528. opt_order = *bd->opt_order;
  529. // determine the number of subblocks for entropy decoding
  530. if (!sconf->bgmc && !sconf->sb_part) {
  531. log2_sub_blocks = 0;
  532. } else {
  533. if (sconf->bgmc && sconf->sb_part)
  534. log2_sub_blocks = get_bits(gb, 2);
  535. else
  536. log2_sub_blocks = 2 * get_bits1(gb);
  537. }
  538. sub_blocks = 1 << log2_sub_blocks;
  539. // do not continue in case of a damaged stream since
  540. // block_length must be evenly divisible by sub_blocks
  541. if (bd->block_length & (sub_blocks - 1)) {
  542. av_log(avctx, AV_LOG_WARNING,
  543. "Block length is not evenly divisible by the number of subblocks.\n");
  544. return -1;
  545. }
  546. sb_length = bd->block_length >> log2_sub_blocks;
  547. if (sconf->bgmc) {
  548. s[0] = get_bits(gb, 8 + (sconf->resolution > 1));
  549. for (k = 1; k < sub_blocks; k++)
  550. s[k] = s[k - 1] + decode_rice(gb, 2);
  551. for (k = 0; k < sub_blocks; k++) {
  552. sx[k] = s[k] & 0x0F;
  553. s [k] >>= 4;
  554. }
  555. } else {
  556. s[0] = get_bits(gb, 4 + (sconf->resolution > 1));
  557. for (k = 1; k < sub_blocks; k++)
  558. s[k] = s[k - 1] + decode_rice(gb, 0);
  559. }
  560. for (k = 1; k < sub_blocks; k++)
  561. if (s[k] > 32) {
  562. av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
  563. return AVERROR_INVALIDDATA;
  564. }
  565. if (get_bits1(gb))
  566. *bd->shift_lsbs = get_bits(gb, 4) + 1;
  567. *bd->store_prev_samples = (bd->js_blocks && bd->raw_other) || *bd->shift_lsbs;
  568. if (!sconf->rlslms) {
  569. if (sconf->adapt_order) {
  570. int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
  571. 2, sconf->max_order + 1));
  572. *bd->opt_order = get_bits(gb, opt_order_length);
  573. if (*bd->opt_order > sconf->max_order) {
  574. *bd->opt_order = sconf->max_order;
  575. av_log(avctx, AV_LOG_ERROR, "Predictor order too large.\n");
  576. return AVERROR_INVALIDDATA;
  577. }
  578. } else {
  579. *bd->opt_order = sconf->max_order;
  580. }
  581. opt_order = *bd->opt_order;
  582. if (opt_order) {
  583. int add_base;
  584. if (sconf->coef_table == 3) {
  585. add_base = 0x7F;
  586. // read coefficient 0
  587. quant_cof[0] = 32 * parcor_scaled_values[get_bits(gb, 7)];
  588. // read coefficient 1
  589. if (opt_order > 1)
  590. quant_cof[1] = -32 * parcor_scaled_values[get_bits(gb, 7)];
  591. // read coefficients 2 to opt_order
  592. for (k = 2; k < opt_order; k++)
  593. quant_cof[k] = get_bits(gb, 7);
  594. } else {
  595. int k_max;
  596. add_base = 1;
  597. // read coefficient 0 to 19
  598. k_max = FFMIN(opt_order, 20);
  599. for (k = 0; k < k_max; k++) {
  600. int rice_param = parcor_rice_table[sconf->coef_table][k][1];
  601. int offset = parcor_rice_table[sconf->coef_table][k][0];
  602. quant_cof[k] = decode_rice(gb, rice_param) + offset;
  603. if (quant_cof[k] < -64 || quant_cof[k] > 63) {
  604. av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range.\n", quant_cof[k]);
  605. return AVERROR_INVALIDDATA;
  606. }
  607. }
  608. // read coefficients 20 to 126
  609. k_max = FFMIN(opt_order, 127);
  610. for (; k < k_max; k++)
  611. quant_cof[k] = decode_rice(gb, 2) + (k & 1);
  612. // read coefficients 127 to opt_order
  613. for (; k < opt_order; k++)
  614. quant_cof[k] = decode_rice(gb, 1);
  615. quant_cof[0] = 32 * parcor_scaled_values[quant_cof[0] + 64];
  616. if (opt_order > 1)
  617. quant_cof[1] = -32 * parcor_scaled_values[quant_cof[1] + 64];
  618. }
  619. for (k = 2; k < opt_order; k++)
  620. quant_cof[k] = (quant_cof[k] << 14) + (add_base << 13);
  621. }
  622. }
  623. // read LTP gain and lag values
  624. if (sconf->long_term_prediction) {
  625. *bd->use_ltp = get_bits1(gb);
  626. if (*bd->use_ltp) {
  627. int r, c;
  628. bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
  629. bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
  630. r = get_unary(gb, 0, 3);
  631. c = get_bits(gb, 2);
  632. bd->ltp_gain[2] = ltp_gain_values[r][c];
  633. bd->ltp_gain[3] = decode_rice(gb, 2) << 3;
  634. bd->ltp_gain[4] = decode_rice(gb, 1) << 3;
  635. *bd->ltp_lag = get_bits(gb, ctx->ltp_lag_length);
  636. *bd->ltp_lag += FFMAX(4, opt_order + 1);
  637. }
  638. }
  639. // read first value and residuals in case of a random access block
  640. if (bd->ra_block) {
  641. if (opt_order)
  642. bd->raw_samples[0] = decode_rice(gb, avctx->bits_per_raw_sample - 4);
  643. if (opt_order > 1)
  644. bd->raw_samples[1] = decode_rice(gb, FFMIN(s[0] + 3, ctx->s_max));
  645. if (opt_order > 2)
  646. bd->raw_samples[2] = decode_rice(gb, FFMIN(s[0] + 1, ctx->s_max));
  647. start = FFMIN(opt_order, 3);
  648. }
  649. // read all residuals
  650. if (sconf->bgmc) {
  651. int delta[8];
  652. unsigned int k [8];
  653. unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
  654. // read most significant bits
  655. unsigned int high;
  656. unsigned int low;
  657. unsigned int value;
  658. ff_bgmc_decode_init(gb, &high, &low, &value);
  659. current_res = bd->raw_samples + start;
  660. for (sb = 0; sb < sub_blocks; sb++) {
  661. unsigned int sb_len = sb_length - (sb ? 0 : start);
  662. k [sb] = s[sb] > b ? s[sb] - b : 0;
  663. delta[sb] = 5 - s[sb] + k[sb];
  664. ff_bgmc_decode(gb, sb_len, current_res,
  665. delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
  666. current_res += sb_len;
  667. }
  668. ff_bgmc_decode_end(gb);
  669. // read least significant bits and tails
  670. current_res = bd->raw_samples + start;
  671. for (sb = 0; sb < sub_blocks; sb++, start = 0) {
  672. unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
  673. unsigned int cur_k = k[sb];
  674. unsigned int cur_s = s[sb];
  675. for (; start < sb_length; start++) {
  676. int32_t res = *current_res;
  677. if (res == cur_tail_code) {
  678. unsigned int max_msb = (2 + (sx[sb] > 2) + (sx[sb] > 10))
  679. << (5 - delta[sb]);
  680. res = decode_rice(gb, cur_s);
  681. if (res >= 0) {
  682. res += (max_msb ) << cur_k;
  683. } else {
  684. res -= (max_msb - 1) << cur_k;
  685. }
  686. } else {
  687. if (res > cur_tail_code)
  688. res--;
  689. if (res & 1)
  690. res = -res;
  691. res >>= 1;
  692. if (cur_k) {
  693. res <<= cur_k;
  694. res |= get_bits_long(gb, cur_k);
  695. }
  696. }
  697. *current_res++ = res;
  698. }
  699. }
  700. } else {
  701. current_res = bd->raw_samples + start;
  702. for (sb = 0; sb < sub_blocks; sb++, start = 0)
  703. for (; start < sb_length; start++)
  704. *current_res++ = decode_rice(gb, s[sb]);
  705. }
  706. if (!sconf->mc_coding || ctx->js_switch)
  707. align_get_bits(gb);
  708. return 0;
  709. }
  710. /** Decode the block data for a non-constant block
  711. */
  712. static int decode_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
  713. {
  714. ALSSpecificConfig *sconf = &ctx->sconf;
  715. unsigned int block_length = bd->block_length;
  716. unsigned int smp = 0;
  717. unsigned int k;
  718. int opt_order = *bd->opt_order;
  719. int sb;
  720. int64_t y;
  721. int32_t *quant_cof = bd->quant_cof;
  722. int32_t *lpc_cof = bd->lpc_cof;
  723. int32_t *raw_samples = bd->raw_samples;
  724. int32_t *raw_samples_end = bd->raw_samples + bd->block_length;
  725. int32_t *lpc_cof_reversed = ctx->lpc_cof_reversed_buffer;
  726. // reverse long-term prediction
  727. if (*bd->use_ltp) {
  728. int ltp_smp;
  729. for (ltp_smp = FFMAX(*bd->ltp_lag - 2, 0); ltp_smp < block_length; ltp_smp++) {
  730. int center = ltp_smp - *bd->ltp_lag;
  731. int begin = FFMAX(0, center - 2);
  732. int end = center + 3;
  733. int tab = 5 - (end - begin);
  734. int base;
  735. y = 1 << 6;
  736. for (base = begin; base < end; base++, tab++)
  737. y += MUL64(bd->ltp_gain[tab], raw_samples[base]);
  738. raw_samples[ltp_smp] += y >> 7;
  739. }
  740. }
  741. // reconstruct all samples from residuals
  742. if (bd->ra_block) {
  743. for (smp = 0; smp < opt_order; smp++) {
  744. y = 1 << 19;
  745. for (sb = 0; sb < smp; sb++)
  746. y += MUL64(lpc_cof[sb], raw_samples[-(sb + 1)]);
  747. *raw_samples++ -= y >> 20;
  748. parcor_to_lpc(smp, quant_cof, lpc_cof);
  749. }
  750. } else {
  751. for (k = 0; k < opt_order; k++)
  752. parcor_to_lpc(k, quant_cof, lpc_cof);
  753. // store previous samples in case that they have to be altered
  754. if (*bd->store_prev_samples)
  755. memcpy(bd->prev_raw_samples, raw_samples - sconf->max_order,
  756. sizeof(*bd->prev_raw_samples) * sconf->max_order);
  757. // reconstruct difference signal for prediction (joint-stereo)
  758. if (bd->js_blocks && bd->raw_other) {
  759. int32_t *left, *right;
  760. if (bd->raw_other > raw_samples) { // D = R - L
  761. left = raw_samples;
  762. right = bd->raw_other;
  763. } else { // D = R - L
  764. left = bd->raw_other;
  765. right = raw_samples;
  766. }
  767. for (sb = -1; sb >= -sconf->max_order; sb--)
  768. raw_samples[sb] = right[sb] - left[sb];
  769. }
  770. // reconstruct shifted signal
  771. if (*bd->shift_lsbs)
  772. for (sb = -1; sb >= -sconf->max_order; sb--)
  773. raw_samples[sb] >>= *bd->shift_lsbs;
  774. }
  775. // reverse linear prediction coefficients for efficiency
  776. lpc_cof = lpc_cof + opt_order;
  777. for (sb = 0; sb < opt_order; sb++)
  778. lpc_cof_reversed[sb] = lpc_cof[-(sb + 1)];
  779. // reconstruct raw samples
  780. raw_samples = bd->raw_samples + smp;
  781. lpc_cof = lpc_cof_reversed + opt_order;
  782. for (; raw_samples < raw_samples_end; raw_samples++) {
  783. y = 1 << 19;
  784. for (sb = -opt_order; sb < 0; sb++)
  785. y += MUL64(lpc_cof[sb], raw_samples[sb]);
  786. *raw_samples -= y >> 20;
  787. }
  788. raw_samples = bd->raw_samples;
  789. // restore previous samples in case that they have been altered
  790. if (*bd->store_prev_samples)
  791. memcpy(raw_samples - sconf->max_order, bd->prev_raw_samples,
  792. sizeof(*raw_samples) * sconf->max_order);
  793. return 0;
  794. }
  795. /** Read the block data.
  796. */
  797. static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
  798. {
  799. GetBitContext *gb = &ctx->gb;
  800. int ret;
  801. *bd->shift_lsbs = 0;
  802. // read block type flag and read the samples accordingly
  803. if (get_bits1(gb)) {
  804. if ((ret = read_var_block_data(ctx, bd)) < 0)
  805. return ret;
  806. } else {
  807. if ((ret = read_const_block_data(ctx, bd)) < 0)
  808. return ret;
  809. }
  810. return 0;
  811. }
  812. /** Decode the block data.
  813. */
  814. static int decode_block(ALSDecContext *ctx, ALSBlockData *bd)
  815. {
  816. unsigned int smp;
  817. // read block type flag and read the samples accordingly
  818. if (*bd->const_block)
  819. decode_const_block_data(ctx, bd);
  820. else if (decode_var_block_data(ctx, bd))
  821. return -1;
  822. // TODO: read RLSLMS extension data
  823. if (*bd->shift_lsbs)
  824. for (smp = 0; smp < bd->block_length; smp++)
  825. bd->raw_samples[smp] <<= *bd->shift_lsbs;
  826. return 0;
  827. }
  828. /** Read and decode block data successively.
  829. */
  830. static int read_decode_block(ALSDecContext *ctx, ALSBlockData *bd)
  831. {
  832. int ret;
  833. ret = read_block(ctx, bd);
  834. if (ret)
  835. return ret;
  836. ret = decode_block(ctx, bd);
  837. return ret;
  838. }
  839. /** Compute the number of samples left to decode for the current frame and
  840. * sets these samples to zero.
  841. */
  842. static void zero_remaining(unsigned int b, unsigned int b_max,
  843. const unsigned int *div_blocks, int32_t *buf)
  844. {
  845. unsigned int count = 0;
  846. while (b < b_max)
  847. count += div_blocks[b++];
  848. if (count)
  849. memset(buf, 0, sizeof(*buf) * count);
  850. }
  851. /** Decode blocks independently.
  852. */
  853. static int decode_blocks_ind(ALSDecContext *ctx, unsigned int ra_frame,
  854. unsigned int c, const unsigned int *div_blocks,
  855. unsigned int *js_blocks)
  856. {
  857. unsigned int b;
  858. ALSBlockData bd = { 0 };
  859. bd.ra_block = ra_frame;
  860. bd.const_block = ctx->const_block;
  861. bd.shift_lsbs = ctx->shift_lsbs;
  862. bd.opt_order = ctx->opt_order;
  863. bd.store_prev_samples = ctx->store_prev_samples;
  864. bd.use_ltp = ctx->use_ltp;
  865. bd.ltp_lag = ctx->ltp_lag;
  866. bd.ltp_gain = ctx->ltp_gain[0];
  867. bd.quant_cof = ctx->quant_cof[0];
  868. bd.lpc_cof = ctx->lpc_cof[0];
  869. bd.prev_raw_samples = ctx->prev_raw_samples;
  870. bd.raw_samples = ctx->raw_samples[c];
  871. for (b = 0; b < ctx->num_blocks; b++) {
  872. bd.block_length = div_blocks[b];
  873. if (read_decode_block(ctx, &bd)) {
  874. // damaged block, write zero for the rest of the frame
  875. zero_remaining(b, ctx->num_blocks, div_blocks, bd.raw_samples);
  876. return -1;
  877. }
  878. bd.raw_samples += div_blocks[b];
  879. bd.ra_block = 0;
  880. }
  881. return 0;
  882. }
  883. /** Decode blocks dependently.
  884. */
  885. static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame,
  886. unsigned int c, const unsigned int *div_blocks,
  887. unsigned int *js_blocks)
  888. {
  889. ALSSpecificConfig *sconf = &ctx->sconf;
  890. unsigned int offset = 0;
  891. unsigned int b;
  892. ALSBlockData bd[2] = { { 0 } };
  893. bd[0].ra_block = ra_frame;
  894. bd[0].const_block = ctx->const_block;
  895. bd[0].shift_lsbs = ctx->shift_lsbs;
  896. bd[0].opt_order = ctx->opt_order;
  897. bd[0].store_prev_samples = ctx->store_prev_samples;
  898. bd[0].use_ltp = ctx->use_ltp;
  899. bd[0].ltp_lag = ctx->ltp_lag;
  900. bd[0].ltp_gain = ctx->ltp_gain[0];
  901. bd[0].quant_cof = ctx->quant_cof[0];
  902. bd[0].lpc_cof = ctx->lpc_cof[0];
  903. bd[0].prev_raw_samples = ctx->prev_raw_samples;
  904. bd[0].js_blocks = *js_blocks;
  905. bd[1].ra_block = ra_frame;
  906. bd[1].const_block = ctx->const_block;
  907. bd[1].shift_lsbs = ctx->shift_lsbs;
  908. bd[1].opt_order = ctx->opt_order;
  909. bd[1].store_prev_samples = ctx->store_prev_samples;
  910. bd[1].use_ltp = ctx->use_ltp;
  911. bd[1].ltp_lag = ctx->ltp_lag;
  912. bd[1].ltp_gain = ctx->ltp_gain[0];
  913. bd[1].quant_cof = ctx->quant_cof[0];
  914. bd[1].lpc_cof = ctx->lpc_cof[0];
  915. bd[1].prev_raw_samples = ctx->prev_raw_samples;
  916. bd[1].js_blocks = *(js_blocks + 1);
  917. // decode all blocks
  918. for (b = 0; b < ctx->num_blocks; b++) {
  919. unsigned int s;
  920. bd[0].block_length = div_blocks[b];
  921. bd[1].block_length = div_blocks[b];
  922. bd[0].raw_samples = ctx->raw_samples[c ] + offset;
  923. bd[1].raw_samples = ctx->raw_samples[c + 1] + offset;
  924. bd[0].raw_other = bd[1].raw_samples;
  925. bd[1].raw_other = bd[0].raw_samples;
  926. if(read_decode_block(ctx, &bd[0]) || read_decode_block(ctx, &bd[1])) {
  927. // damaged block, write zero for the rest of the frame
  928. zero_remaining(b, ctx->num_blocks, div_blocks, bd[0].raw_samples);
  929. zero_remaining(b, ctx->num_blocks, div_blocks, bd[1].raw_samples);
  930. return -1;
  931. }
  932. // reconstruct joint-stereo blocks
  933. if (bd[0].js_blocks) {
  934. if (bd[1].js_blocks)
  935. av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel pair.\n");
  936. for (s = 0; s < div_blocks[b]; s++)
  937. bd[0].raw_samples[s] = bd[1].raw_samples[s] - bd[0].raw_samples[s];
  938. } else if (bd[1].js_blocks) {
  939. for (s = 0; s < div_blocks[b]; s++)
  940. bd[1].raw_samples[s] = bd[1].raw_samples[s] + bd[0].raw_samples[s];
  941. }
  942. offset += div_blocks[b];
  943. bd[0].ra_block = 0;
  944. bd[1].ra_block = 0;
  945. }
  946. // store carryover raw samples,
  947. // the others channel raw samples are stored by the calling function.
  948. memmove(ctx->raw_samples[c] - sconf->max_order,
  949. ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
  950. sizeof(*ctx->raw_samples[c]) * sconf->max_order);
  951. return 0;
  952. }
  953. /** Read the channel data.
  954. */
  955. static int read_channel_data(ALSDecContext *ctx, ALSChannelData *cd, int c)
  956. {
  957. GetBitContext *gb = &ctx->gb;
  958. ALSChannelData *current = cd;
  959. unsigned int channels = ctx->avctx->channels;
  960. int entries = 0;
  961. while (entries < channels && !(current->stop_flag = get_bits1(gb))) {
  962. current->master_channel = get_bits_long(gb, av_ceil_log2(channels));
  963. if (current->master_channel >= channels) {
  964. av_log(ctx->avctx, AV_LOG_ERROR, "Invalid master channel.\n");
  965. return -1;
  966. }
  967. if (current->master_channel != c) {
  968. current->time_diff_flag = get_bits1(gb);
  969. current->weighting[0] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
  970. current->weighting[1] = mcc_weightings[av_clip(decode_rice(gb, 2) + 14, 0, 31)];
  971. current->weighting[2] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
  972. if (current->time_diff_flag) {
  973. current->weighting[3] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
  974. current->weighting[4] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
  975. current->weighting[5] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
  976. current->time_diff_sign = get_bits1(gb);
  977. current->time_diff_index = get_bits(gb, ctx->ltp_lag_length - 3) + 3;
  978. }
  979. }
  980. current++;
  981. entries++;
  982. }
  983. if (entries == channels) {
  984. av_log(ctx->avctx, AV_LOG_ERROR, "Damaged channel data.\n");
  985. return -1;
  986. }
  987. align_get_bits(gb);
  988. return 0;
  989. }
  990. /** Recursively reverts the inter-channel correlation for a block.
  991. */
  992. static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
  993. ALSChannelData **cd, int *reverted,
  994. unsigned int offset, int c)
  995. {
  996. ALSChannelData *ch = cd[c];
  997. unsigned int dep = 0;
  998. unsigned int channels = ctx->avctx->channels;
  999. if (reverted[c])
  1000. return 0;
  1001. reverted[c] = 1;
  1002. while (dep < channels && !ch[dep].stop_flag) {
  1003. revert_channel_correlation(ctx, bd, cd, reverted, offset,
  1004. ch[dep].master_channel);
  1005. dep++;
  1006. }
  1007. if (dep == channels) {
  1008. av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel correlation.\n");
  1009. return -1;
  1010. }
  1011. bd->const_block = ctx->const_block + c;
  1012. bd->shift_lsbs = ctx->shift_lsbs + c;
  1013. bd->opt_order = ctx->opt_order + c;
  1014. bd->store_prev_samples = ctx->store_prev_samples + c;
  1015. bd->use_ltp = ctx->use_ltp + c;
  1016. bd->ltp_lag = ctx->ltp_lag + c;
  1017. bd->ltp_gain = ctx->ltp_gain[c];
  1018. bd->lpc_cof = ctx->lpc_cof[c];
  1019. bd->quant_cof = ctx->quant_cof[c];
  1020. bd->raw_samples = ctx->raw_samples[c] + offset;
  1021. dep = 0;
  1022. while (!ch[dep].stop_flag) {
  1023. unsigned int smp;
  1024. unsigned int begin = 1;
  1025. unsigned int end = bd->block_length - 1;
  1026. int64_t y;
  1027. int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset;
  1028. if (ch[dep].time_diff_flag) {
  1029. int t = ch[dep].time_diff_index;
  1030. if (ch[dep].time_diff_sign) {
  1031. t = -t;
  1032. begin -= t;
  1033. } else {
  1034. end -= t;
  1035. }
  1036. for (smp = begin; smp < end; smp++) {
  1037. y = (1 << 6) +
  1038. MUL64(ch[dep].weighting[0], master[smp - 1 ]) +
  1039. MUL64(ch[dep].weighting[1], master[smp ]) +
  1040. MUL64(ch[dep].weighting[2], master[smp + 1 ]) +
  1041. MUL64(ch[dep].weighting[3], master[smp - 1 + t]) +
  1042. MUL64(ch[dep].weighting[4], master[smp + t]) +
  1043. MUL64(ch[dep].weighting[5], master[smp + 1 + t]);
  1044. bd->raw_samples[smp] += y >> 7;
  1045. }
  1046. } else {
  1047. for (smp = begin; smp < end; smp++) {
  1048. y = (1 << 6) +
  1049. MUL64(ch[dep].weighting[0], master[smp - 1]) +
  1050. MUL64(ch[dep].weighting[1], master[smp ]) +
  1051. MUL64(ch[dep].weighting[2], master[smp + 1]);
  1052. bd->raw_samples[smp] += y >> 7;
  1053. }
  1054. }
  1055. dep++;
  1056. }
  1057. return 0;
  1058. }
  1059. /** Read the frame data.
  1060. */
  1061. static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
  1062. {
  1063. ALSSpecificConfig *sconf = &ctx->sconf;
  1064. AVCodecContext *avctx = ctx->avctx;
  1065. GetBitContext *gb = &ctx->gb;
  1066. unsigned int div_blocks[32]; ///< block sizes.
  1067. unsigned int c;
  1068. unsigned int js_blocks[2];
  1069. uint32_t bs_info = 0;
  1070. // skip the size of the ra unit if present in the frame
  1071. if (sconf->ra_flag == RA_FLAG_FRAMES && ra_frame)
  1072. skip_bits_long(gb, 32);
  1073. if (sconf->mc_coding && sconf->joint_stereo) {
  1074. ctx->js_switch = get_bits1(gb);
  1075. align_get_bits(gb);
  1076. }
  1077. if (!sconf->mc_coding || ctx->js_switch) {
  1078. int independent_bs = !sconf->joint_stereo;
  1079. for (c = 0; c < avctx->channels; c++) {
  1080. js_blocks[0] = 0;
  1081. js_blocks[1] = 0;
  1082. get_block_sizes(ctx, div_blocks, &bs_info);
  1083. // if joint_stereo and block_switching is set, independent decoding
  1084. // is signaled via the first bit of bs_info
  1085. if (sconf->joint_stereo && sconf->block_switching)
  1086. if (bs_info >> 31)
  1087. independent_bs = 2;
  1088. // if this is the last channel, it has to be decoded independently
  1089. if (c == avctx->channels - 1)
  1090. independent_bs = 1;
  1091. if (independent_bs) {
  1092. if (decode_blocks_ind(ctx, ra_frame, c, div_blocks, js_blocks))
  1093. return -1;
  1094. independent_bs--;
  1095. } else {
  1096. if (decode_blocks(ctx, ra_frame, c, div_blocks, js_blocks))
  1097. return -1;
  1098. c++;
  1099. }
  1100. // store carryover raw samples
  1101. memmove(ctx->raw_samples[c] - sconf->max_order,
  1102. ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
  1103. sizeof(*ctx->raw_samples[c]) * sconf->max_order);
  1104. }
  1105. } else { // multi-channel coding
  1106. ALSBlockData bd = { 0 };
  1107. int b, ret;
  1108. int *reverted_channels = ctx->reverted_channels;
  1109. unsigned int offset = 0;
  1110. for (c = 0; c < avctx->channels; c++)
  1111. if (ctx->chan_data[c] < ctx->chan_data_buffer) {
  1112. av_log(ctx->avctx, AV_LOG_ERROR, "Invalid channel data.\n");
  1113. return -1;
  1114. }
  1115. memset(reverted_channels, 0, sizeof(*reverted_channels) * avctx->channels);
  1116. bd.ra_block = ra_frame;
  1117. bd.prev_raw_samples = ctx->prev_raw_samples;
  1118. get_block_sizes(ctx, div_blocks, &bs_info);
  1119. for (b = 0; b < ctx->num_blocks; b++) {
  1120. bd.block_length = div_blocks[b];
  1121. for (c = 0; c < avctx->channels; c++) {
  1122. bd.const_block = ctx->const_block + c;
  1123. bd.shift_lsbs = ctx->shift_lsbs + c;
  1124. bd.opt_order = ctx->opt_order + c;
  1125. bd.store_prev_samples = ctx->store_prev_samples + c;
  1126. bd.use_ltp = ctx->use_ltp + c;
  1127. bd.ltp_lag = ctx->ltp_lag + c;
  1128. bd.ltp_gain = ctx->ltp_gain[c];
  1129. bd.lpc_cof = ctx->lpc_cof[c];
  1130. bd.quant_cof = ctx->quant_cof[c];
  1131. bd.raw_samples = ctx->raw_samples[c] + offset;
  1132. bd.raw_other = NULL;
  1133. if ((ret = read_block(ctx, &bd)) < 0)
  1134. return ret;
  1135. if ((ret = read_channel_data(ctx, ctx->chan_data[c], c)) < 0)
  1136. return ret;
  1137. }
  1138. for (c = 0; c < avctx->channels; c++)
  1139. if (revert_channel_correlation(ctx, &bd, ctx->chan_data,
  1140. reverted_channels, offset, c))
  1141. return -1;
  1142. for (c = 0; c < avctx->channels; c++) {
  1143. bd.const_block = ctx->const_block + c;
  1144. bd.shift_lsbs = ctx->shift_lsbs + c;
  1145. bd.opt_order = ctx->opt_order + c;
  1146. bd.store_prev_samples = ctx->store_prev_samples + c;
  1147. bd.use_ltp = ctx->use_ltp + c;
  1148. bd.ltp_lag = ctx->ltp_lag + c;
  1149. bd.ltp_gain = ctx->ltp_gain[c];
  1150. bd.lpc_cof = ctx->lpc_cof[c];
  1151. bd.quant_cof = ctx->quant_cof[c];
  1152. bd.raw_samples = ctx->raw_samples[c] + offset;
  1153. if ((ret = decode_block(ctx, &bd)) < 0)
  1154. return ret;
  1155. }
  1156. memset(reverted_channels, 0, avctx->channels * sizeof(*reverted_channels));
  1157. offset += div_blocks[b];
  1158. bd.ra_block = 0;
  1159. }
  1160. // store carryover raw samples
  1161. for (c = 0; c < avctx->channels; c++)
  1162. memmove(ctx->raw_samples[c] - sconf->max_order,
  1163. ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
  1164. sizeof(*ctx->raw_samples[c]) * sconf->max_order);
  1165. }
  1166. // TODO: read_diff_float_data
  1167. return 0;
  1168. }
  1169. /** Decode an ALS frame.
  1170. */
  1171. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
  1172. AVPacket *avpkt)
  1173. {
  1174. ALSDecContext *ctx = avctx->priv_data;
  1175. ALSSpecificConfig *sconf = &ctx->sconf;
  1176. const uint8_t *buffer = avpkt->data;
  1177. int buffer_size = avpkt->size;
  1178. int invalid_frame, ret;
  1179. unsigned int c, sample, ra_frame, bytes_read, shift;
  1180. init_get_bits(&ctx->gb, buffer, buffer_size * 8);
  1181. // In the case that the distance between random access frames is set to zero
  1182. // (sconf->ra_distance == 0) no frame is treated as a random access frame.
  1183. // For the first frame, if prediction is used, all samples used from the
  1184. // previous frame are assumed to be zero.
  1185. ra_frame = sconf->ra_distance && !(ctx->frame_id % sconf->ra_distance);
  1186. // the last frame to decode might have a different length
  1187. if (sconf->samples != 0xFFFFFFFF)
  1188. ctx->cur_frame_length = FFMIN(sconf->samples - ctx->frame_id * (uint64_t) sconf->frame_length,
  1189. sconf->frame_length);
  1190. else
  1191. ctx->cur_frame_length = sconf->frame_length;
  1192. // decode the frame data
  1193. if ((invalid_frame = read_frame_data(ctx, ra_frame)) < 0)
  1194. av_log(ctx->avctx, AV_LOG_WARNING,
  1195. "Reading frame data failed. Skipping RA unit.\n");
  1196. ctx->frame_id++;
  1197. /* get output buffer */
  1198. ctx->frame.nb_samples = ctx->cur_frame_length;
  1199. if ((ret = ff_get_buffer(avctx, &ctx->frame)) < 0) {
  1200. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n");
  1201. return ret;
  1202. }
  1203. // transform decoded frame into output format
  1204. #define INTERLEAVE_OUTPUT(bps) \
  1205. { \
  1206. int##bps##_t *dest = (int##bps##_t*)ctx->frame.data[0]; \
  1207. shift = bps - ctx->avctx->bits_per_raw_sample; \
  1208. if (!ctx->cs_switch) { \
  1209. for (sample = 0; sample < ctx->cur_frame_length; sample++) \
  1210. for (c = 0; c < avctx->channels; c++) \
  1211. *dest++ = ctx->raw_samples[c][sample] << shift; \
  1212. } else { \
  1213. for (sample = 0; sample < ctx->cur_frame_length; sample++) \
  1214. for (c = 0; c < avctx->channels; c++) \
  1215. *dest++ = ctx->raw_samples[sconf->chan_pos[c]][sample] << shift; \
  1216. } \
  1217. }
  1218. if (ctx->avctx->bits_per_raw_sample <= 16) {
  1219. INTERLEAVE_OUTPUT(16)
  1220. } else {
  1221. INTERLEAVE_OUTPUT(32)
  1222. }
  1223. // update CRC
  1224. if (sconf->crc_enabled && (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL))) {
  1225. int swap = HAVE_BIGENDIAN != sconf->msb_first;
  1226. if (ctx->avctx->bits_per_raw_sample == 24) {
  1227. int32_t *src = (int32_t *)ctx->frame.data[0];
  1228. for (sample = 0;
  1229. sample < ctx->cur_frame_length * avctx->channels;
  1230. sample++) {
  1231. int32_t v;
  1232. if (swap)
  1233. v = av_bswap32(src[sample]);
  1234. else
  1235. v = src[sample];
  1236. if (!HAVE_BIGENDIAN)
  1237. v >>= 8;
  1238. ctx->crc = av_crc(ctx->crc_table, ctx->crc, (uint8_t*)(&v), 3);
  1239. }
  1240. } else {
  1241. uint8_t *crc_source;
  1242. if (swap) {
  1243. if (ctx->avctx->bits_per_raw_sample <= 16) {
  1244. int16_t *src = (int16_t*) ctx->frame.data[0];
  1245. int16_t *dest = (int16_t*) ctx->crc_buffer;
  1246. for (sample = 0;
  1247. sample < ctx->cur_frame_length * avctx->channels;
  1248. sample++)
  1249. *dest++ = av_bswap16(src[sample]);
  1250. } else {
  1251. ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer,
  1252. (uint32_t *)ctx->frame.data[0],
  1253. ctx->cur_frame_length * avctx->channels);
  1254. }
  1255. crc_source = ctx->crc_buffer;
  1256. } else {
  1257. crc_source = ctx->frame.data[0];
  1258. }
  1259. ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source,
  1260. ctx->cur_frame_length * avctx->channels *
  1261. av_get_bytes_per_sample(avctx->sample_fmt));
  1262. }
  1263. // check CRC sums if this is the last frame
  1264. if (ctx->cur_frame_length != sconf->frame_length &&
  1265. ctx->crc_org != ctx->crc) {
  1266. av_log(avctx, AV_LOG_ERROR, "CRC error.\n");
  1267. }
  1268. }
  1269. *got_frame_ptr = 1;
  1270. *(AVFrame *)data = ctx->frame;
  1271. bytes_read = invalid_frame ? buffer_size :
  1272. (get_bits_count(&ctx->gb) + 7) >> 3;
  1273. return bytes_read;
  1274. }
  1275. /** Uninitialize the ALS decoder.
  1276. */
  1277. static av_cold int decode_end(AVCodecContext *avctx)
  1278. {
  1279. ALSDecContext *ctx = avctx->priv_data;
  1280. av_freep(&ctx->sconf.chan_pos);
  1281. ff_bgmc_end(&ctx->bgmc_lut, &ctx->bgmc_lut_status);
  1282. av_freep(&ctx->const_block);
  1283. av_freep(&ctx->shift_lsbs);
  1284. av_freep(&ctx->opt_order);
  1285. av_freep(&ctx->store_prev_samples);
  1286. av_freep(&ctx->use_ltp);
  1287. av_freep(&ctx->ltp_lag);
  1288. av_freep(&ctx->ltp_gain);
  1289. av_freep(&ctx->ltp_gain_buffer);
  1290. av_freep(&ctx->quant_cof);
  1291. av_freep(&ctx->lpc_cof);
  1292. av_freep(&ctx->quant_cof_buffer);
  1293. av_freep(&ctx->lpc_cof_buffer);
  1294. av_freep(&ctx->lpc_cof_reversed_buffer);
  1295. av_freep(&ctx->prev_raw_samples);
  1296. av_freep(&ctx->raw_samples);
  1297. av_freep(&ctx->raw_buffer);
  1298. av_freep(&ctx->chan_data);
  1299. av_freep(&ctx->chan_data_buffer);
  1300. av_freep(&ctx->reverted_channels);
  1301. av_freep(&ctx->crc_buffer);
  1302. return 0;
  1303. }
  1304. /** Initialize the ALS decoder.
  1305. */
  1306. static av_cold int decode_init(AVCodecContext *avctx)
  1307. {
  1308. unsigned int c;
  1309. unsigned int channel_size;
  1310. int num_buffers;
  1311. ALSDecContext *ctx = avctx->priv_data;
  1312. ALSSpecificConfig *sconf = &ctx->sconf;
  1313. ctx->avctx = avctx;
  1314. if (!avctx->extradata) {
  1315. av_log(avctx, AV_LOG_ERROR, "Missing required ALS extradata.\n");
  1316. return -1;
  1317. }
  1318. if (read_specific_config(ctx)) {
  1319. av_log(avctx, AV_LOG_ERROR, "Reading ALSSpecificConfig failed.\n");
  1320. decode_end(avctx);
  1321. return -1;
  1322. }
  1323. if (check_specific_config(ctx)) {
  1324. decode_end(avctx);
  1325. return -1;
  1326. }
  1327. if (sconf->bgmc)
  1328. ff_bgmc_init(avctx, &ctx->bgmc_lut, &ctx->bgmc_lut_status);
  1329. if (sconf->floating) {
  1330. avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
  1331. avctx->bits_per_raw_sample = 32;
  1332. } else {
  1333. avctx->sample_fmt = sconf->resolution > 1
  1334. ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S16;
  1335. avctx->bits_per_raw_sample = (sconf->resolution + 1) * 8;
  1336. }
  1337. // set maximum Rice parameter for progressive decoding based on resolution
  1338. // This is not specified in 14496-3 but actually done by the reference
  1339. // codec RM22 revision 2.
  1340. ctx->s_max = sconf->resolution > 1 ? 31 : 15;
  1341. // set lag value for long-term prediction
  1342. ctx->ltp_lag_length = 8 + (avctx->sample_rate >= 96000) +
  1343. (avctx->sample_rate >= 192000);
  1344. // allocate quantized parcor coefficient buffer
  1345. num_buffers = sconf->mc_coding ? avctx->channels : 1;
  1346. ctx->quant_cof = av_malloc(sizeof(*ctx->quant_cof) * num_buffers);
  1347. ctx->lpc_cof = av_malloc(sizeof(*ctx->lpc_cof) * num_buffers);
  1348. ctx->quant_cof_buffer = av_malloc(sizeof(*ctx->quant_cof_buffer) *
  1349. num_buffers * sconf->max_order);
  1350. ctx->lpc_cof_buffer = av_malloc(sizeof(*ctx->lpc_cof_buffer) *
  1351. num_buffers * sconf->max_order);
  1352. ctx->lpc_cof_reversed_buffer = av_malloc(sizeof(*ctx->lpc_cof_buffer) *
  1353. sconf->max_order);
  1354. if (!ctx->quant_cof || !ctx->lpc_cof ||
  1355. !ctx->quant_cof_buffer || !ctx->lpc_cof_buffer ||
  1356. !ctx->lpc_cof_reversed_buffer) {
  1357. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1358. return AVERROR(ENOMEM);
  1359. }
  1360. // assign quantized parcor coefficient buffers
  1361. for (c = 0; c < num_buffers; c++) {
  1362. ctx->quant_cof[c] = ctx->quant_cof_buffer + c * sconf->max_order;
  1363. ctx->lpc_cof[c] = ctx->lpc_cof_buffer + c * sconf->max_order;
  1364. }
  1365. // allocate and assign lag and gain data buffer for ltp mode
  1366. ctx->const_block = av_malloc (sizeof(*ctx->const_block) * num_buffers);
  1367. ctx->shift_lsbs = av_malloc (sizeof(*ctx->shift_lsbs) * num_buffers);
  1368. ctx->opt_order = av_malloc (sizeof(*ctx->opt_order) * num_buffers);
  1369. ctx->store_prev_samples = av_malloc(sizeof(*ctx->store_prev_samples) * num_buffers);
  1370. ctx->use_ltp = av_mallocz(sizeof(*ctx->use_ltp) * num_buffers);
  1371. ctx->ltp_lag = av_malloc (sizeof(*ctx->ltp_lag) * num_buffers);
  1372. ctx->ltp_gain = av_malloc (sizeof(*ctx->ltp_gain) * num_buffers);
  1373. ctx->ltp_gain_buffer = av_malloc (sizeof(*ctx->ltp_gain_buffer) *
  1374. num_buffers * 5);
  1375. if (!ctx->const_block || !ctx->shift_lsbs ||
  1376. !ctx->opt_order || !ctx->store_prev_samples ||
  1377. !ctx->use_ltp || !ctx->ltp_lag ||
  1378. !ctx->ltp_gain || !ctx->ltp_gain_buffer) {
  1379. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1380. decode_end(avctx);
  1381. return AVERROR(ENOMEM);
  1382. }
  1383. for (c = 0; c < num_buffers; c++)
  1384. ctx->ltp_gain[c] = ctx->ltp_gain_buffer + c * 5;
  1385. // allocate and assign channel data buffer for mcc mode
  1386. if (sconf->mc_coding) {
  1387. ctx->chan_data_buffer = av_malloc(sizeof(*ctx->chan_data_buffer) *
  1388. num_buffers * num_buffers);
  1389. ctx->chan_data = av_malloc(sizeof(*ctx->chan_data) *
  1390. num_buffers);
  1391. ctx->reverted_channels = av_malloc(sizeof(*ctx->reverted_channels) *
  1392. num_buffers);
  1393. if (!ctx->chan_data_buffer || !ctx->chan_data || !ctx->reverted_channels) {
  1394. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1395. decode_end(avctx);
  1396. return AVERROR(ENOMEM);
  1397. }
  1398. for (c = 0; c < num_buffers; c++)
  1399. ctx->chan_data[c] = ctx->chan_data_buffer + c * num_buffers;
  1400. } else {
  1401. ctx->chan_data = NULL;
  1402. ctx->chan_data_buffer = NULL;
  1403. ctx->reverted_channels = NULL;
  1404. }
  1405. channel_size = sconf->frame_length + sconf->max_order;
  1406. ctx->prev_raw_samples = av_malloc (sizeof(*ctx->prev_raw_samples) * sconf->max_order);
  1407. ctx->raw_buffer = av_mallocz(sizeof(*ctx-> raw_buffer) * avctx->channels * channel_size);
  1408. ctx->raw_samples = av_malloc (sizeof(*ctx-> raw_samples) * avctx->channels);
  1409. // allocate previous raw sample buffer
  1410. if (!ctx->prev_raw_samples || !ctx->raw_buffer|| !ctx->raw_samples) {
  1411. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1412. decode_end(avctx);
  1413. return AVERROR(ENOMEM);
  1414. }
  1415. // assign raw samples buffers
  1416. ctx->raw_samples[0] = ctx->raw_buffer + sconf->max_order;
  1417. for (c = 1; c < avctx->channels; c++)
  1418. ctx->raw_samples[c] = ctx->raw_samples[c - 1] + channel_size;
  1419. // allocate crc buffer
  1420. if (HAVE_BIGENDIAN != sconf->msb_first && sconf->crc_enabled &&
  1421. (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL))) {
  1422. ctx->crc_buffer = av_malloc(sizeof(*ctx->crc_buffer) *
  1423. ctx->cur_frame_length *
  1424. avctx->channels *
  1425. av_get_bytes_per_sample(avctx->sample_fmt));
  1426. if (!ctx->crc_buffer) {
  1427. av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
  1428. decode_end(avctx);
  1429. return AVERROR(ENOMEM);
  1430. }
  1431. }
  1432. ff_dsputil_init(&ctx->dsp, avctx);
  1433. avcodec_get_frame_defaults(&ctx->frame);
  1434. avctx->coded_frame = &ctx->frame;
  1435. return 0;
  1436. }
  1437. /** Flush (reset) the frame ID after seeking.
  1438. */
  1439. static av_cold void flush(AVCodecContext *avctx)
  1440. {
  1441. ALSDecContext *ctx = avctx->priv_data;
  1442. ctx->frame_id = 0;
  1443. }
  1444. AVCodec ff_als_decoder = {
  1445. .name = "als",
  1446. .type = AVMEDIA_TYPE_AUDIO,
  1447. .id = AV_CODEC_ID_MP4ALS,
  1448. .priv_data_size = sizeof(ALSDecContext),
  1449. .init = decode_init,
  1450. .close = decode_end,
  1451. .decode = decode_frame,
  1452. .flush = flush,
  1453. .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
  1454. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
  1455. };