You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1323 lines
48KB

  1. /*
  2. * Wmall compatible decoder
  3. * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
  4. * Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
  5. * Copyright (c) 2011 Andreas Öman
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * @brief wmall decoder implementation
  26. * Wmall is an MDCT based codec comparable to wma standard or AAC.
  27. * The decoding therefore consists of the following steps:
  28. * - bitstream decoding
  29. * - reconstruction of per-channel data
  30. * - rescaling and inverse quantization
  31. * - IMDCT
  32. * - windowing and overlapp-add
  33. *
  34. * The compressed wmall bitstream is split into individual packets.
  35. * Every such packet contains one or more wma frames.
  36. * The compressed frames may have a variable length and frames may
  37. * cross packet boundaries.
  38. * Common to all wmall frames is the number of samples that are stored in
  39. * a frame.
  40. * The number of samples and a few other decode flags are stored
  41. * as extradata that has to be passed to the decoder.
  42. *
  43. * The wmall frames themselves are again split into a variable number of
  44. * subframes. Every subframe contains the data for 2^N time domain samples
  45. * where N varies between 7 and 12.
  46. *
  47. * Example wmall bitstream (in samples):
  48. *
  49. * || packet 0 || packet 1 || packet 2 packets
  50. * ---------------------------------------------------
  51. * || frame 0 || frame 1 || frame 2 || frames
  52. * ---------------------------------------------------
  53. * || | | || | | | || || subframes of channel 0
  54. * ---------------------------------------------------
  55. * || | | || | | | || || subframes of channel 1
  56. * ---------------------------------------------------
  57. *
  58. * The frame layouts for the individual channels of a wma frame does not need
  59. * to be the same.
  60. *
  61. * However, if the offsets and lengths of several subframes of a frame are the
  62. * same, the subframes of the channels can be grouped.
  63. * Every group may then use special coding techniques like M/S stereo coding
  64. * to improve the compression ratio. These channel transformations do not
  65. * need to be applied to a whole subframe. Instead, they can also work on
  66. * individual scale factor bands (see below).
  67. * The coefficients that carry the audio signal in the frequency domain
  68. * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
  69. * In addition to that, the encoder can switch to a runlevel coding scheme
  70. * by transmitting subframe_length / 128 zero coefficients.
  71. *
  72. * Before the audio signal can be converted to the time domain, the
  73. * coefficients have to be rescaled and inverse quantized.
  74. * A subframe is therefore split into several scale factor bands that get
  75. * scaled individually.
  76. * Scale factors are submitted for every frame but they might be shared
  77. * between the subframes of a channel. Scale factors are initially DPCM-coded.
  78. * Once scale factors are shared, the differences are transmitted as runlevel
  79. * codes.
  80. * Every subframe length and offset combination in the frame layout shares a
  81. * common quantization factor that can be adjusted for every channel by a
  82. * modifier.
  83. * After the inverse quantization, the coefficients get processed by an IMDCT.
  84. * The resulting values are then windowed with a sine window and the first half
  85. * of the values are added to the second half of the output from the previous
  86. * subframe in order to reconstruct the output samples.
  87. */
  88. #include "avcodec.h"
  89. #include "internal.h"
  90. #include "get_bits.h"
  91. #include "put_bits.h"
  92. #include "dsputil.h"
  93. #include "wma.h"
  94. /** current decoder limitations */
  95. #define WMALL_MAX_CHANNELS 8 ///< max number of handled channels
  96. #define MAX_SUBFRAMES 32 ///< max number of subframes per channel
  97. #define MAX_BANDS 29 ///< max number of scale factor bands
  98. #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
  99. #define WMALL_BLOCK_MIN_BITS 6 ///< log2 of min block size
  100. #define WMALL_BLOCK_MAX_BITS 12 ///< log2 of max block size
  101. #define WMALL_BLOCK_MAX_SIZE (1 << WMALL_BLOCK_MAX_BITS) ///< maximum block size
  102. #define WMALL_BLOCK_SIZES (WMALL_BLOCK_MAX_BITS - WMALL_BLOCK_MIN_BITS + 1) ///< possible block sizes
  103. #define VLCBITS 9
  104. #define SCALEVLCBITS 8
  105. #define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
  106. #define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
  107. #define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
  108. #define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
  109. #define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
  110. static float sin64[33]; ///< sinus table for decorrelation
  111. /**
  112. * @brief frame specific decoder context for a single channel
  113. */
  114. typedef struct {
  115. int16_t prev_block_len; ///< length of the previous block
  116. uint8_t transmit_coefs;
  117. uint8_t num_subframes;
  118. uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
  119. uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
  120. uint8_t cur_subframe; ///< current subframe number
  121. uint16_t decoded_samples; ///< number of already processed samples
  122. uint8_t grouped; ///< channel is part of a group
  123. int quant_step; ///< quantization step for the current subframe
  124. int8_t reuse_sf; ///< share scale factors between subframes
  125. int8_t scale_factor_step; ///< scaling step for the current subframe
  126. int max_scale_factor; ///< maximum scale factor for the current subframe
  127. int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
  128. int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
  129. int* scale_factors; ///< pointer to the scale factor values used for decoding
  130. uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
  131. float* coeffs; ///< pointer to the subframe decode buffer
  132. uint16_t num_vec_coeffs; ///< number of vector coded coefficients
  133. DECLARE_ALIGNED(16, float, out)[WMALL_BLOCK_MAX_SIZE + WMALL_BLOCK_MAX_SIZE / 2]; ///< output buffer
  134. } WmallChannelCtx;
  135. /* XXX: probably we don't need subframe_config[],
  136. WmallChannelCtx holds all the necessary data. */
  137. /**
  138. * @brief channel group for channel transformations
  139. */
  140. typedef struct {
  141. uint8_t num_channels; ///< number of channels in the group
  142. int8_t transform; ///< transform on / off
  143. int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
  144. float decorrelation_matrix[WMALL_MAX_CHANNELS*WMALL_MAX_CHANNELS];
  145. float* channel_data[WMALL_MAX_CHANNELS]; ///< transformation coefficients
  146. } WmallChannelGrp;
  147. /**
  148. * @brief main decoder context
  149. */
  150. typedef struct WmallDecodeCtx {
  151. /* generic decoder variables */
  152. AVCodecContext* avctx; ///< codec context for av_log
  153. DSPContext dsp; ///< accelerated DSP functions
  154. uint8_t frame_data[MAX_FRAMESIZE +
  155. FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
  156. PutBitContext pb; ///< context for filling the frame_data buffer
  157. FFTContext mdct_ctx[WMALL_BLOCK_SIZES]; ///< MDCT context per block size
  158. DECLARE_ALIGNED(16, float, tmp)[WMALL_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
  159. float* windows[WMALL_BLOCK_SIZES]; ///< windows for the different block sizes
  160. /* frame size dependent frame information (set during initialization) */
  161. uint32_t decode_flags; ///< used compression features
  162. uint8_t len_prefix; ///< frame is prefixed with its length
  163. uint8_t dynamic_range_compression; ///< frame contains DRC data
  164. uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
  165. uint16_t samples_per_frame; ///< number of samples to output
  166. uint16_t log2_frame_size;
  167. int8_t num_channels; ///< number of channels in the stream (same as AVCodecContext.num_channels)
  168. int8_t lfe_channel; ///< lfe channel index
  169. uint8_t max_num_subframes;
  170. uint8_t subframe_len_bits; ///< number of bits used for the subframe length
  171. uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
  172. uint16_t min_samples_per_subframe;
  173. int8_t num_sfb[WMALL_BLOCK_SIZES]; ///< scale factor bands per block size
  174. int16_t sfb_offsets[WMALL_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
  175. int8_t sf_offsets[WMALL_BLOCK_SIZES][WMALL_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
  176. int16_t subwoofer_cutoffs[WMALL_BLOCK_SIZES]; ///< subwoofer cutoff values
  177. /* packet decode state */
  178. GetBitContext pgb; ///< bitstream reader context for the packet
  179. int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
  180. uint8_t packet_offset; ///< frame offset in the packet
  181. uint8_t packet_sequence_number; ///< current packet number
  182. int num_saved_bits; ///< saved number of bits
  183. int frame_offset; ///< frame offset in the bit reservoir
  184. int subframe_offset; ///< subframe offset in the bit reservoir
  185. uint8_t packet_loss; ///< set in case of bitstream error
  186. uint8_t packet_done; ///< set when a packet is fully decoded
  187. /* frame decode state */
  188. uint32_t frame_num; ///< current frame number (not used for decoding)
  189. GetBitContext gb; ///< bitstream reader context
  190. int buf_bit_size; ///< buffer size in bits
  191. float* samples; ///< current samplebuffer pointer
  192. float* samples_end; ///< maximum samplebuffer pointer
  193. uint8_t drc_gain; ///< gain for the DRC tool
  194. int8_t skip_frame; ///< skip output step
  195. int8_t parsed_all_subframes; ///< all subframes decoded?
  196. /* subframe/block decode state */
  197. int16_t subframe_len; ///< current subframe length
  198. int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
  199. int8_t channel_indexes_for_cur_subframe[WMALL_MAX_CHANNELS];
  200. int8_t num_bands; ///< number of scale factor bands
  201. int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
  202. int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
  203. uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
  204. int8_t esc_len; ///< length of escaped coefficients
  205. uint8_t num_chgroups; ///< number of channel groups
  206. WmallChannelGrp chgroup[WMALL_MAX_CHANNELS]; ///< channel group information
  207. WmallChannelCtx channel[WMALL_MAX_CHANNELS]; ///< per channel data
  208. // WMA lossless
  209. uint8_t do_arith_coding;
  210. uint8_t do_ac_filter;
  211. uint8_t do_inter_ch_decorr;
  212. uint8_t do_mclms;
  213. uint8_t do_lpc;
  214. int8_t acfilter_order;
  215. int8_t acfilter_scaling;
  216. int acfilter_coeffs[16];
  217. int8_t mclms_order;
  218. int8_t mclms_scaling;
  219. int16_t mclms_coeffs[128];
  220. int16_t mclms_coeffs_cur[4];
  221. int mclms_prevvalues[64]; // FIXME: should be 32-bit / 16-bit depending on bit-depth
  222. int16_t mclms_updates[64];
  223. int mclms_recent;
  224. int movave_scaling;
  225. int quant_stepsize;
  226. struct {
  227. int order;
  228. int scaling;
  229. int coefsend;
  230. int bitsend;
  231. int16_t coefs[256];
  232. int lms_prevvalues[512]; // FIXME: see above
  233. int16_t lms_updates[512]; // and here too
  234. int recent;
  235. } cdlms[2][9]; /* XXX: Here, 2 is the max. no. of channels allowed,
  236. 9 is the maximum no. of filters per channel.
  237. Question is, why 2 if WMALL_MAX_CHANNELS == 8 */
  238. int cdlms_ttl[2];
  239. int bV3RTM;
  240. int is_channel_coded[2]; // XXX: same question as above applies here too (and below)
  241. int update_speed[2];
  242. int transient[2];
  243. int transient_pos[2];
  244. int seekable_tile;
  245. int ave_sum[2];
  246. int channel_residues[2][2048];
  247. int lpc_coefs[2][40];
  248. int lpc_order;
  249. int lpc_scaling;
  250. int lpc_intbits;
  251. int channel_coeffs[2][2048];
  252. } WmallDecodeCtx;
  253. #undef dprintf
  254. #define dprintf(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
  255. static int num_logged_tiles = 0;
  256. /**
  257. *@brief helper function to print the most important members of the context
  258. *@param s context
  259. */
  260. static void av_cold dump_context(WmallDecodeCtx *s)
  261. {
  262. #define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
  263. #define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %x\n", a, b);
  264. PRINT("ed sample bit depth", s->bits_per_sample);
  265. PRINT_HEX("ed decode flags", s->decode_flags);
  266. PRINT("samples per frame", s->samples_per_frame);
  267. PRINT("log2 frame size", s->log2_frame_size);
  268. PRINT("max num subframes", s->max_num_subframes);
  269. PRINT("len prefix", s->len_prefix);
  270. PRINT("num channels", s->num_channels);
  271. }
  272. /**
  273. *@brief Uninitialize the decoder and free all resources.
  274. *@param avctx codec context
  275. *@return 0 on success, < 0 otherwise
  276. */
  277. static av_cold int decode_end(AVCodecContext *avctx)
  278. {
  279. WmallDecodeCtx *s = avctx->priv_data;
  280. int i;
  281. for (i = 0; i < WMALL_BLOCK_SIZES; i++)
  282. ff_mdct_end(&s->mdct_ctx[i]);
  283. return 0;
  284. }
  285. /**
  286. *@brief Initialize the decoder.
  287. *@param avctx codec context
  288. *@return 0 on success, -1 otherwise
  289. */
  290. static av_cold int decode_init(AVCodecContext *avctx)
  291. {
  292. WmallDecodeCtx *s = avctx->priv_data;
  293. uint8_t *edata_ptr = avctx->extradata;
  294. unsigned int channel_mask;
  295. int i;
  296. int log2_max_num_subframes;
  297. int num_possible_block_sizes;
  298. s->avctx = avctx;
  299. dsputil_init(&s->dsp, avctx);
  300. init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
  301. avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
  302. if (avctx->extradata_size >= 18) {
  303. s->decode_flags = AV_RL16(edata_ptr+14);
  304. channel_mask = AV_RL32(edata_ptr+2);
  305. s->bits_per_sample = AV_RL16(edata_ptr);
  306. /** dump the extradata */
  307. for (i = 0; i < avctx->extradata_size; i++)
  308. dprintf(avctx, "[%x] ", avctx->extradata[i]);
  309. dprintf(avctx, "\n");
  310. } else {
  311. av_log_ask_for_sample(avctx, "Unknown extradata size\n");
  312. return AVERROR_INVALIDDATA;
  313. }
  314. /** generic init */
  315. s->log2_frame_size = av_log2(avctx->block_align) + 4;
  316. /** frame info */
  317. s->skip_frame = 1; /* skip first frame */
  318. s->packet_loss = 1;
  319. s->len_prefix = (s->decode_flags & 0x40);
  320. /** get frame len */
  321. s->samples_per_frame = 1 << ff_wma_get_frame_len_bits(avctx->sample_rate,
  322. 3, s->decode_flags);
  323. /** init previous block len */
  324. for (i = 0; i < avctx->channels; i++)
  325. s->channel[i].prev_block_len = s->samples_per_frame;
  326. /** subframe info */
  327. log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
  328. s->max_num_subframes = 1 << log2_max_num_subframes;
  329. s->max_subframe_len_bit = 0;
  330. s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
  331. num_possible_block_sizes = log2_max_num_subframes + 1;
  332. s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
  333. s->dynamic_range_compression = (s->decode_flags & 0x80);
  334. s->bV3RTM = s->decode_flags & 0x100;
  335. if (s->max_num_subframes > MAX_SUBFRAMES) {
  336. av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %i\n",
  337. s->max_num_subframes);
  338. return AVERROR_INVALIDDATA;
  339. }
  340. s->num_channels = avctx->channels;
  341. /** extract lfe channel position */
  342. s->lfe_channel = -1;
  343. if (channel_mask & 8) {
  344. unsigned int mask;
  345. for (mask = 1; mask < 16; mask <<= 1) {
  346. if (channel_mask & mask)
  347. ++s->lfe_channel;
  348. }
  349. }
  350. if (s->num_channels < 0) {
  351. av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", s->num_channels);
  352. return AVERROR_INVALIDDATA;
  353. } else if (s->num_channels > WMALL_MAX_CHANNELS) {
  354. av_log_ask_for_sample(avctx, "unsupported number of channels\n");
  355. return AVERROR_PATCHWELCOME;
  356. }
  357. avctx->channel_layout = channel_mask;
  358. return 0;
  359. }
  360. /**
  361. *@brief Decode the subframe length.
  362. *@param s context
  363. *@param offset sample offset in the frame
  364. *@return decoded subframe length on success, < 0 in case of an error
  365. */
  366. static int decode_subframe_length(WmallDecodeCtx *s, int offset)
  367. {
  368. int frame_len_ratio;
  369. int subframe_len, len;
  370. /** no need to read from the bitstream when only one length is possible */
  371. if (offset == s->samples_per_frame - s->min_samples_per_subframe)
  372. return s->min_samples_per_subframe;
  373. len = av_log2(s->max_num_subframes - 1) + 1; // XXX: 5.3.3
  374. frame_len_ratio = get_bits(&s->gb, len); // XXX: tile_size_ratio
  375. subframe_len = s->min_samples_per_subframe * (frame_len_ratio + 1);
  376. /** sanity check the length */
  377. if (subframe_len < s->min_samples_per_subframe ||
  378. subframe_len > s->samples_per_frame) {
  379. av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
  380. subframe_len);
  381. return AVERROR_INVALIDDATA;
  382. }
  383. return subframe_len;
  384. }
  385. /**
  386. *@brief Decode how the data in the frame is split into subframes.
  387. * Every WMA frame contains the encoded data for a fixed number of
  388. * samples per channel. The data for every channel might be split
  389. * into several subframes. This function will reconstruct the list of
  390. * subframes for every channel.
  391. *
  392. * If the subframes are not evenly split, the algorithm estimates the
  393. * channels with the lowest number of total samples.
  394. * Afterwards, for each of these channels a bit is read from the
  395. * bitstream that indicates if the channel contains a subframe with the
  396. * next subframe size that is going to be read from the bitstream or not.
  397. * If a channel contains such a subframe, the subframe size gets added to
  398. * the channel's subframe list.
  399. * The algorithm repeats these steps until the frame is properly divided
  400. * between the individual channels.
  401. *
  402. *@param s context
  403. *@return 0 on success, < 0 in case of an error
  404. */
  405. static int decode_tilehdr(WmallDecodeCtx *s) /* XXX: decode_tile_configuration() [Table 9] */
  406. {
  407. uint16_t num_samples[WMALL_MAX_CHANNELS]; /**< sum of samples for all currently known subframes of a channel */
  408. uint8_t contains_subframe[WMALL_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
  409. int channels_for_cur_subframe = s->num_channels; /**< number of channels that contain the current subframe */
  410. int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subfra2me offsets and sizes */
  411. int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
  412. int c;
  413. /* Should never consume more than 3073 bits (256 iterations for the
  414. * while loop when always the minimum amount of 128 samples is substracted
  415. * from missing samples in the 8 channel case).
  416. * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
  417. */
  418. /** reset tiling information */
  419. for (c = 0; c < s->num_channels; c++)
  420. s->channel[c].num_subframes = 0;
  421. memset(num_samples, 0, sizeof(num_samples));
  422. if (s->max_num_subframes == 1 || get_bits1(&s->gb)) // XXX: locate in the spec
  423. fixed_channel_layout = 1; // XXX: tile_aligned ?
  424. /** loop until the frame data is split between the subframes */
  425. do {
  426. int subframe_len;
  427. /** check which channels contain the subframe */
  428. for (c = 0; c < s->num_channels; c++) {
  429. if (num_samples[c] == min_channel_len) {
  430. if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
  431. (min_channel_len == s->samples_per_frame - s->min_samples_per_subframe)) {
  432. contains_subframe[c] = 1;
  433. }
  434. else {
  435. contains_subframe[c] = get_bits1(&s->gb); // XXX: locate in the spec
  436. }
  437. } else
  438. contains_subframe[c] = 0;
  439. }
  440. /** get subframe length, subframe_len == 0 is not allowed */
  441. if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0) //XXX: this reads tile_size_ratio
  442. return AVERROR_INVALIDDATA;
  443. /** add subframes to the individual channels and find new min_channel_len */
  444. min_channel_len += subframe_len;
  445. for (c = 0; c < s->num_channels; c++) {
  446. WmallChannelCtx* chan = &s->channel[c];
  447. if (contains_subframe[c]) {
  448. if (chan->num_subframes >= MAX_SUBFRAMES) {
  449. av_log(s->avctx, AV_LOG_ERROR,
  450. "broken frame: num subframes > 31\n");
  451. return AVERROR_INVALIDDATA;
  452. }
  453. chan->subframe_len[chan->num_subframes] = subframe_len;
  454. num_samples[c] += subframe_len;
  455. ++chan->num_subframes;
  456. if (num_samples[c] > s->samples_per_frame) {
  457. av_log(s->avctx, AV_LOG_ERROR, "broken frame: "
  458. "channel len(%d) > samples_per_frame(%d)\n",
  459. num_samples[c], s->samples_per_frame);
  460. return AVERROR_INVALIDDATA;
  461. }
  462. } else if (num_samples[c] <= min_channel_len) {
  463. if (num_samples[c] < min_channel_len) {
  464. channels_for_cur_subframe = 0;
  465. min_channel_len = num_samples[c];
  466. }
  467. ++channels_for_cur_subframe;
  468. }
  469. }
  470. } while (min_channel_len < s->samples_per_frame);
  471. for (c = 0; c < s->num_channels; c++) {
  472. int i;
  473. int offset = 0;
  474. for (i = 0; i < s->channel[c].num_subframes; i++) {
  475. s->channel[c].subframe_offset[i] = offset;
  476. offset += s->channel[c].subframe_len[i];
  477. }
  478. }
  479. return 0;
  480. }
  481. static int my_log2(unsigned int i)
  482. {
  483. unsigned int iLog2 = 0;
  484. while ((i >> iLog2) > 1)
  485. iLog2++;
  486. return iLog2;
  487. }
  488. /**
  489. *
  490. */
  491. static void decode_ac_filter(WmallDecodeCtx *s)
  492. {
  493. int i;
  494. s->acfilter_order = get_bits(&s->gb, 4) + 1;
  495. s->acfilter_scaling = get_bits(&s->gb, 4);
  496. for(i = 0; i < s->acfilter_order; i++) {
  497. s->acfilter_coeffs[i] = get_bits(&s->gb, s->acfilter_scaling) + 1;
  498. }
  499. }
  500. /**
  501. *
  502. */
  503. static void decode_mclms(WmallDecodeCtx *s)
  504. {
  505. s->mclms_order = (get_bits(&s->gb, 4) + 1) * 2;
  506. s->mclms_scaling = get_bits(&s->gb, 4);
  507. if(get_bits1(&s->gb)) {
  508. // mclms_send_coef
  509. int i;
  510. int send_coef_bits;
  511. int cbits = av_log2(s->mclms_scaling + 1);
  512. assert(cbits == my_log2(s->mclms_scaling + 1));
  513. if(1 << cbits < s->mclms_scaling + 1)
  514. cbits++;
  515. send_coef_bits = (cbits ? get_bits(&s->gb, cbits) : 0) + 2;
  516. for(i = 0; i < s->mclms_order * s->num_channels * s->num_channels; i++) {
  517. s->mclms_coeffs[i] = get_bits(&s->gb, send_coef_bits);
  518. }
  519. for(i = 0; i < s->num_channels; i++) {
  520. int c;
  521. for(c = 0; c < i; c++) {
  522. s->mclms_coeffs_cur[i * s->num_channels + c] = get_bits(&s->gb, send_coef_bits);
  523. }
  524. }
  525. }
  526. }
  527. /**
  528. *
  529. */
  530. static void decode_cdlms(WmallDecodeCtx *s)
  531. {
  532. int c, i;
  533. int cdlms_send_coef = get_bits1(&s->gb);
  534. for(c = 0; c < s->num_channels; c++) {
  535. s->cdlms_ttl[c] = get_bits(&s->gb, 3) + 1;
  536. for(i = 0; i < s->cdlms_ttl[c]; i++) {
  537. s->cdlms[c][i].order = (get_bits(&s->gb, 7) + 1) * 8;
  538. }
  539. for(i = 0; i < s->cdlms_ttl[c]; i++) {
  540. s->cdlms[c][i].scaling = get_bits(&s->gb, 4);
  541. }
  542. if(cdlms_send_coef) {
  543. for(i = 0; i < s->cdlms_ttl[c]; i++) {
  544. int cbits, shift_l, shift_r, j;
  545. cbits = av_log2(s->cdlms[c][i].order);
  546. if(1 << cbits < s->cdlms[c][i].order)
  547. cbits++;
  548. s->cdlms[c][i].coefsend = get_bits(&s->gb, cbits) + 1;
  549. cbits = av_log2(s->cdlms[c][i].scaling + 1);
  550. if(1 << cbits < s->cdlms[c][i].scaling + 1)
  551. cbits++;
  552. s->cdlms[c][i].bitsend = get_bits(&s->gb, cbits) + 2;
  553. shift_l = 32 - s->cdlms[c][i].bitsend;
  554. shift_r = 32 - 2 - s->cdlms[c][i].scaling;
  555. for(j = 0; j < s->cdlms[c][i].coefsend; j++) {
  556. s->cdlms[c][i].coefs[j] =
  557. (get_bits(&s->gb, s->cdlms[c][i].bitsend) << shift_l) >> shift_r;
  558. }
  559. }
  560. }
  561. }
  562. }
  563. /**
  564. *
  565. */
  566. static int decode_channel_residues(WmallDecodeCtx *s, int ch, int tile_size)
  567. {
  568. int i = 0;
  569. unsigned int ave_mean;
  570. s->transient[ch] = get_bits1(&s->gb);
  571. if(s->transient[ch])
  572. s->transient_pos[ch] = get_bits(&s->gb, av_log2(tile_size));
  573. if(s->seekable_tile) {
  574. ave_mean = get_bits(&s->gb, s->bits_per_sample);
  575. s->ave_sum[ch] = ave_mean << (s->movave_scaling + 1);
  576. // s->ave_sum[ch] *= 2;
  577. }
  578. if(s->seekable_tile) {
  579. if(s->do_inter_ch_decorr)
  580. s->channel_residues[ch][0] = get_sbits(&s->gb, s->bits_per_sample + 1);
  581. else
  582. s->channel_residues[ch][0] = get_sbits(&s->gb, s->bits_per_sample);
  583. i++;
  584. }
  585. av_log(0, 0, "%8d: ", num_logged_tiles++);
  586. for(; i < tile_size; i++) {
  587. int quo = 0, rem, rem_bits, residue;
  588. while(get_bits1(&s->gb))
  589. quo++;
  590. if(quo >= 32)
  591. quo += get_bits_long(&s->gb, get_bits(&s->gb, 5) + 1);
  592. ave_mean = (s->ave_sum[ch] + (1 << s->movave_scaling)) >> (s->movave_scaling + 1);
  593. rem_bits = av_ceil_log2(ave_mean);
  594. rem = rem_bits ? get_bits(&s->gb, rem_bits) : 0;
  595. residue = (quo << rem_bits) + rem;
  596. s->ave_sum[ch] = residue + s->ave_sum[ch] - (s->ave_sum[ch] >> s->movave_scaling);
  597. if(residue & 1)
  598. residue = -(residue >> 1) - 1;
  599. else
  600. residue = residue >> 1;
  601. s->channel_residues[ch][i] = residue;
  602. //if (num_logged_tiles < 1)
  603. av_log(0, 0, "%4d ", residue);
  604. // dprintf(s->avctx, "%5d: %5d %10d %12d %12d %5d %-16d %04x\n",i, quo, ave_mean, s->ave_sum[ch], rem, rem_bits, s->channel_residues[ch][i], show_bits(&s->gb, 16));
  605. }
  606. av_log(0, 0, "\n Tile size = %d\n", tile_size);
  607. return 0;
  608. }
  609. /**
  610. *
  611. */
  612. static void
  613. decode_lpc(WmallDecodeCtx *s)
  614. {
  615. int ch, i, cbits;
  616. s->lpc_order = get_bits(&s->gb, 5) + 1;
  617. s->lpc_scaling = get_bits(&s->gb, 4);
  618. s->lpc_intbits = get_bits(&s->gb, 3) + 1;
  619. cbits = s->lpc_scaling + s->lpc_intbits;
  620. for(ch = 0; ch < s->num_channels; ch++) {
  621. for(i = 0; i < s->lpc_order; i++) {
  622. s->lpc_coefs[ch][i] = get_sbits(&s->gb, cbits);
  623. }
  624. }
  625. }
  626. static void clear_codec_buffers(WmallDecodeCtx *s)
  627. {
  628. int ich, ilms;
  629. memset(s->acfilter_coeffs, 0, 16 * sizeof(int));
  630. memset(s->lpc_coefs , 0, 40 * 2 * sizeof(int));
  631. memset(s->mclms_coeffs , 0, 128 * sizeof(int16_t));
  632. memset(s->mclms_coeffs_cur, 0, 4 * sizeof(int16_t));
  633. memset(s->mclms_prevvalues, 0, 64 * sizeof(int));
  634. memset(s->mclms_updates , 0, 64 * sizeof(int16_t));
  635. for (ich = 0; ich < s->num_channels; ich++) {
  636. for (ilms = 0; ilms < s->cdlms_ttl[ich]; ilms++) {
  637. memset(s->cdlms[ich][ilms].coefs , 0, 256 * sizeof(int16_t));
  638. memset(s->cdlms[ich][ilms].lms_prevvalues, 0, 512 * sizeof(int));
  639. memset(s->cdlms[ich][ilms].lms_updates , 0, 512 * sizeof(int16_t));
  640. }
  641. s->ave_sum[ich] = 0;
  642. }
  643. }
  644. static void reset_codec(WmallDecodeCtx *s)
  645. {
  646. int ich, ilms;
  647. s->mclms_recent = s->mclms_order * s->num_channels;
  648. for (ich = 0; ich < s->num_channels; ich++)
  649. for (ilms = 0; ilms < s->cdlms_ttl[ich]; ilms++)
  650. s->cdlms[ich][ilms].recent = s->cdlms[ich][ilms].order;
  651. }
  652. static int lms_predict(WmallDecodeCtx *s, int ich, int ilms)
  653. {
  654. int32_t pred, icoef;
  655. int recent = s->cdlms[ich][ilms].recent;
  656. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  657. pred += s->cdlms[ich][ilms].coefs[icoef] *
  658. s->cdlms[ich][ilms].lms_prevvalues[icoef + recent];
  659. pred += (1 << (s->cdlms[ich][ilms].scaling - 1));
  660. /* XXX: Table 29 has:
  661. iPred >= cdlms[iCh][ilms].scaling;
  662. seems to me like a missing > */
  663. pred >>= s->cdlms[ich][ilms].scaling;
  664. return pred;
  665. }
  666. static void lms_update(WmallDecodeCtx *s, int ich, int ilms, int32_t input, int32_t pred)
  667. {
  668. int icoef;
  669. int recent = s->cdlms[ich][ilms].recent;
  670. int range = 1 << (s->bits_per_sample - 1);
  671. int bps = s->bits_per_sample > 16 ? 4 : 2; // bytes per sample
  672. if (input > pred) {
  673. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  674. s->cdlms[ich][ilms].coefs[icoef] +=
  675. s->cdlms[ich][ilms].lms_updates[icoef + recent];
  676. } else {
  677. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  678. s->cdlms[ich][ilms].coefs[icoef] -=
  679. s->cdlms[ich][ilms].lms_updates[icoef]; // XXX: [icoef + recent] ?
  680. }
  681. s->cdlms[ich][ilms].recent--;
  682. s->cdlms[ich][ilms].lms_prevvalues[recent] = av_clip(input, -range, range - 1);
  683. if (input > pred)
  684. s->cdlms[ich][ilms].lms_updates[recent] = s->update_speed[ich];
  685. else if (input < pred)
  686. s->cdlms[ich][ilms].lms_updates[recent] = -s->update_speed[ich];
  687. /* XXX: spec says:
  688. cdlms[iCh][ilms].updates[iRecent + cdlms[iCh][ilms].order >> 4] >>= 2;
  689. lms_updates[iCh][ilms][iRecent + cdlms[iCh][ilms].order >> 3] >>= 1;
  690. Questions is - are cdlms[iCh][ilms].updates[] and lms_updates[][][] two
  691. seperate buffers? Here I've assumed that the two are same which makes
  692. more sense to me.
  693. */
  694. s->cdlms[ich][ilms].lms_updates[recent + s->cdlms[ich][ilms].order >> 4] >>= 2;
  695. s->cdlms[ich][ilms].lms_updates[recent + s->cdlms[ich][ilms].order >> 3] >>= 1;
  696. /* XXX: recent + (s->cdlms[ich][ilms].order >> 4) ? */
  697. if (s->cdlms[ich][ilms].recent == 0) {
  698. /* XXX: This memcpy()s will probably fail if a fixed 32-bit buffer is used.
  699. follow kshishkov's suggestion of using a union. */
  700. memcpy(s->cdlms[ich][ilms].lms_prevvalues + s->cdlms[ich][ilms].order,
  701. s->cdlms[ich][ilms].lms_prevvalues,
  702. bps * s->cdlms[ich][ilms].order);
  703. memcpy(s->cdlms[ich][ilms].lms_updates + s->cdlms[ich][ilms].order,
  704. s->cdlms[ich][ilms].lms_updates,
  705. bps * s->cdlms[ich][ilms].order);
  706. s->cdlms[ich][ilms].recent = s->cdlms[ich][ilms].order;
  707. }
  708. }
  709. static void use_high_update_speed(WmallDecodeCtx *s, int ich)
  710. {
  711. int ilms, recent, icoef;
  712. s->update_speed[ich] = 16;
  713. for (ilms = s->cdlms_ttl[ich]; ilms >= 0; ilms--) {
  714. recent = s->cdlms[ich][ilms].recent;
  715. if (s->bV3RTM) {
  716. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  717. s->cdlms[ich][ilms].lms_updates[icoef + recent] *= 2;
  718. } else {
  719. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  720. s->cdlms[ich][ilms].lms_updates[icoef] *= 2;
  721. }
  722. }
  723. }
  724. static void use_normal_update_speed(WmallDecodeCtx *s, int ich)
  725. {
  726. int ilms, recent, icoef;
  727. s->update_speed[ich] = 8;
  728. for (ilms = s->cdlms_ttl[ich]; ilms >= 0; ilms--) {
  729. recent = s->cdlms[ich][ilms].recent;
  730. if (s->bV3RTM) {
  731. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  732. s->cdlms[ich][ilms].lms_updates[icoef + recent] /= 2;
  733. } else {
  734. for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
  735. s->cdlms[ich][ilms].lms_updates[icoef] /= 2;
  736. }
  737. }
  738. }
  739. /**
  740. *@brief Decode a single subframe (block).
  741. *@param s codec context
  742. *@return 0 on success, < 0 when decoding failed
  743. */
  744. static int decode_subframe(WmallDecodeCtx *s)
  745. {
  746. int offset = s->samples_per_frame;
  747. int subframe_len = s->samples_per_frame;
  748. int i;
  749. int total_samples = s->samples_per_frame * s->num_channels;
  750. int rawpcm_tile;
  751. int padding_zeroes;
  752. s->subframe_offset = get_bits_count(&s->gb);
  753. /** reset channel context and find the next block offset and size
  754. == the next block of the channel with the smallest number of
  755. decoded samples
  756. */
  757. for (i = 0; i < s->num_channels; i++) {
  758. s->channel[i].grouped = 0;
  759. if (offset > s->channel[i].decoded_samples) {
  760. offset = s->channel[i].decoded_samples;
  761. subframe_len =
  762. s->channel[i].subframe_len[s->channel[i].cur_subframe];
  763. }
  764. }
  765. /** get a list of all channels that contain the estimated block */
  766. s->channels_for_cur_subframe = 0;
  767. for (i = 0; i < s->num_channels; i++) {
  768. const int cur_subframe = s->channel[i].cur_subframe;
  769. /** substract already processed samples */
  770. total_samples -= s->channel[i].decoded_samples;
  771. /** and count if there are multiple subframes that match our profile */
  772. if (offset == s->channel[i].decoded_samples &&
  773. subframe_len == s->channel[i].subframe_len[cur_subframe]) {
  774. total_samples -= s->channel[i].subframe_len[cur_subframe];
  775. s->channel[i].decoded_samples +=
  776. s->channel[i].subframe_len[cur_subframe];
  777. s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i;
  778. ++s->channels_for_cur_subframe;
  779. }
  780. }
  781. /** check if the frame will be complete after processing the
  782. estimated block */
  783. if (!total_samples)
  784. s->parsed_all_subframes = 1;
  785. s->seekable_tile = get_bits1(&s->gb);
  786. if(s->seekable_tile) {
  787. clear_codec_buffers(s);
  788. s->do_arith_coding = get_bits1(&s->gb);
  789. if(s->do_arith_coding) {
  790. dprintf(s->avctx, "do_arith_coding == 1");
  791. abort();
  792. }
  793. s->do_ac_filter = get_bits1(&s->gb);
  794. s->do_inter_ch_decorr = get_bits1(&s->gb);
  795. s->do_mclms = get_bits1(&s->gb);
  796. if(s->do_ac_filter)
  797. decode_ac_filter(s);
  798. if(s->do_mclms)
  799. decode_mclms(s);
  800. decode_cdlms(s);
  801. s->movave_scaling = get_bits(&s->gb, 3);
  802. s->quant_stepsize = get_bits(&s->gb, 8) + 1;
  803. reset_codec(s);
  804. }
  805. rawpcm_tile = get_bits1(&s->gb);
  806. for(i = 0; i < s->num_channels; i++) {
  807. s->is_channel_coded[i] = 1;
  808. }
  809. if(!rawpcm_tile) {
  810. for(i = 0; i < s->num_channels; i++) {
  811. s->is_channel_coded[i] = get_bits1(&s->gb);
  812. }
  813. if(s->bV3RTM) {
  814. // LPC
  815. s->do_lpc = get_bits1(&s->gb);
  816. if(s->do_lpc) {
  817. decode_lpc(s);
  818. }
  819. } else {
  820. s->do_lpc = 0;
  821. }
  822. }
  823. if(get_bits1(&s->gb)) {
  824. padding_zeroes = get_bits(&s->gb, 5);
  825. } else {
  826. padding_zeroes = 0;
  827. }
  828. if(rawpcm_tile) {
  829. int bits = s->bits_per_sample - padding_zeroes;
  830. int j;
  831. dprintf(s->avctx, "RAWPCM %d bits per sample. total %d bits, remain=%d\n", bits,
  832. bits * s->num_channels * subframe_len, get_bits_count(&s->gb));
  833. for(i = 0; i < s->num_channels; i++) {
  834. for(j = 0; j < subframe_len; j++) {
  835. s->channel_coeffs[i][j] = get_sbits(&s->gb, bits);
  836. // dprintf(s->avctx, "PCM[%d][%d] = 0x%04x\n", i, j, s->channel_coeffs[i][j]);
  837. }
  838. }
  839. } else {
  840. for(i = 0; i < s->num_channels; i++)
  841. if(s->is_channel_coded[i])
  842. decode_channel_residues(s, i, subframe_len);
  843. }
  844. /** handled one subframe */
  845. for (i = 0; i < s->channels_for_cur_subframe; i++) {
  846. int c = s->channel_indexes_for_cur_subframe[i];
  847. if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
  848. av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n");
  849. return AVERROR_INVALIDDATA;
  850. }
  851. ++s->channel[c].cur_subframe; // XXX: 6.4
  852. }
  853. return 0;
  854. }
  855. /**
  856. *@brief Decode one WMA frame.
  857. *@param s codec context
  858. *@return 0 if the trailer bit indicates that this is the last frame,
  859. * 1 if there are additional frames
  860. */
  861. static int decode_frame(WmallDecodeCtx *s)
  862. {
  863. GetBitContext* gb = &s->gb;
  864. int more_frames = 0;
  865. int len = 0;
  866. int i;
  867. /** check for potential output buffer overflow */
  868. if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
  869. /** return an error if no frame could be decoded at all */
  870. av_log(s->avctx, AV_LOG_ERROR,
  871. "not enough space for the output samples\n");
  872. s->packet_loss = 1;
  873. return 0;
  874. }
  875. /** get frame length */
  876. if (s->len_prefix)
  877. len = get_bits(gb, s->log2_frame_size); // XXX: compressed_frame_size_bits [Table 8]
  878. /** decode tile information */
  879. if (decode_tilehdr(s)) { // should include decode_tile_configuration() [Table 9]
  880. s->packet_loss = 1;
  881. return 0;
  882. }
  883. /** read drc info */
  884. if (s->dynamic_range_compression) {
  885. s->drc_gain = get_bits(gb, 8); // XXX: drc_frame_scale_factor [Table 8]
  886. }
  887. /** no idea what these are for, might be the number of samples
  888. that need to be skipped at the beginning or end of a stream */
  889. if (get_bits1(gb)) {
  890. int skip;
  891. /** usually true for the first frame */
  892. if (get_bits1(gb)) {
  893. skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
  894. dprintf(s->avctx, "start skip: %i\n", skip);
  895. }
  896. /** sometimes true for the last frame */
  897. if (get_bits1(gb)) {
  898. skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
  899. dprintf(s->avctx, "end skip: %i\n", skip);
  900. }
  901. }
  902. /** reset subframe states */
  903. s->parsed_all_subframes = 0;
  904. for (i = 0; i < s->num_channels; i++) {
  905. s->channel[i].decoded_samples = 0;
  906. s->channel[i].cur_subframe = 0;
  907. s->channel[i].reuse_sf = 0;
  908. }
  909. /** decode all subframes */
  910. while (!s->parsed_all_subframes) {
  911. if (decode_subframe(s) < 0) {
  912. s->packet_loss = 1;
  913. return 0;
  914. }
  915. }
  916. dprintf(s->avctx, "Frame done\n");
  917. if (s->skip_frame) {
  918. s->skip_frame = 0;
  919. } else
  920. s->samples += s->num_channels * s->samples_per_frame;
  921. if (s->len_prefix) {
  922. if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
  923. /** FIXME: not sure if this is always an error */
  924. av_log(s->avctx, AV_LOG_ERROR,
  925. "frame[%i] would have to skip %i bits\n", s->frame_num,
  926. len - (get_bits_count(gb) - s->frame_offset) - 1);
  927. s->packet_loss = 1;
  928. return 0;
  929. }
  930. /** skip the rest of the frame data */
  931. skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
  932. } else {
  933. /*
  934. while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
  935. dprintf(s->avctx, "skip1\n");
  936. }
  937. */
  938. }
  939. /** decode trailer bit */
  940. more_frames = get_bits1(gb);
  941. ++s->frame_num;
  942. return more_frames;
  943. }
  944. /**
  945. *@brief Calculate remaining input buffer length.
  946. *@param s codec context
  947. *@param gb bitstream reader context
  948. *@return remaining size in bits
  949. */
  950. static int remaining_bits(WmallDecodeCtx *s, GetBitContext *gb)
  951. {
  952. return s->buf_bit_size - get_bits_count(gb);
  953. }
  954. /**
  955. *@brief Fill the bit reservoir with a (partial) frame.
  956. *@param s codec context
  957. *@param gb bitstream reader context
  958. *@param len length of the partial frame
  959. *@param append decides wether to reset the buffer or not
  960. */
  961. static void save_bits(WmallDecodeCtx *s, GetBitContext* gb, int len,
  962. int append)
  963. {
  964. int buflen;
  965. /** when the frame data does not need to be concatenated, the input buffer
  966. is resetted and additional bits from the previous frame are copyed
  967. and skipped later so that a fast byte copy is possible */
  968. if (!append) {
  969. s->frame_offset = get_bits_count(gb) & 7;
  970. s->num_saved_bits = s->frame_offset;
  971. init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
  972. }
  973. buflen = (s->num_saved_bits + len + 8) >> 3;
  974. if (len <= 0 || buflen > MAX_FRAMESIZE) {
  975. av_log_ask_for_sample(s->avctx, "input buffer too small\n");
  976. s->packet_loss = 1;
  977. return;
  978. }
  979. s->num_saved_bits += len;
  980. if (!append) {
  981. avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
  982. s->num_saved_bits);
  983. } else {
  984. int align = 8 - (get_bits_count(gb) & 7);
  985. align = FFMIN(align, len);
  986. put_bits(&s->pb, align, get_bits(gb, align));
  987. len -= align;
  988. avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3), len);
  989. }
  990. skip_bits_long(gb, len);
  991. {
  992. PutBitContext tmp = s->pb;
  993. flush_put_bits(&tmp);
  994. }
  995. init_get_bits(&s->gb, s->frame_data, s->num_saved_bits);
  996. skip_bits(&s->gb, s->frame_offset);
  997. }
  998. /**
  999. *@brief Decode a single WMA packet.
  1000. *@param avctx codec context
  1001. *@param data the output buffer
  1002. *@param data_size number of bytes that were written to the output buffer
  1003. *@param avpkt input packet
  1004. *@return number of bytes that were read from the input buffer
  1005. */
  1006. static int decode_packet(AVCodecContext *avctx,
  1007. void *data, int *data_size, AVPacket* avpkt)
  1008. {
  1009. WmallDecodeCtx *s = avctx->priv_data;
  1010. GetBitContext* gb = &s->pgb;
  1011. const uint8_t* buf = avpkt->data;
  1012. int buf_size = avpkt->size;
  1013. int num_bits_prev_frame;
  1014. int packet_sequence_number;
  1015. s->samples = data;
  1016. s->samples_end = (float*)((int8_t*)data + *data_size);
  1017. *data_size = 0;
  1018. if (s->packet_done || s->packet_loss) {
  1019. s->packet_done = 0;
  1020. /** sanity check for the buffer length */
  1021. if (buf_size < avctx->block_align)
  1022. return 0;
  1023. s->next_packet_start = buf_size - avctx->block_align;
  1024. buf_size = avctx->block_align;
  1025. s->buf_bit_size = buf_size << 3;
  1026. /** parse packet header */
  1027. init_get_bits(gb, buf, s->buf_bit_size);
  1028. packet_sequence_number = get_bits(gb, 4);
  1029. int seekable_frame_in_packet = get_bits1(gb);
  1030. int spliced_packet = get_bits1(gb);
  1031. /** get number of bits that need to be added to the previous frame */
  1032. num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
  1033. /** check for packet loss */
  1034. if (!s->packet_loss &&
  1035. ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
  1036. s->packet_loss = 1;
  1037. av_log(avctx, AV_LOG_ERROR, "Packet loss detected! seq %x vs %x\n",
  1038. s->packet_sequence_number, packet_sequence_number);
  1039. }
  1040. s->packet_sequence_number = packet_sequence_number;
  1041. if (num_bits_prev_frame > 0) {
  1042. int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb);
  1043. if (num_bits_prev_frame >= remaining_packet_bits) {
  1044. num_bits_prev_frame = remaining_packet_bits;
  1045. s->packet_done = 1;
  1046. }
  1047. /** append the previous frame data to the remaining data from the
  1048. previous packet to create a full frame */
  1049. save_bits(s, gb, num_bits_prev_frame, 1);
  1050. /** decode the cross packet frame if it is valid */
  1051. if (!s->packet_loss)
  1052. decode_frame(s);
  1053. } else if (s->num_saved_bits - s->frame_offset) {
  1054. dprintf(avctx, "ignoring %x previously saved bits\n",
  1055. s->num_saved_bits - s->frame_offset);
  1056. }
  1057. if (s->packet_loss) {
  1058. /** reset number of saved bits so that the decoder
  1059. does not start to decode incomplete frames in the
  1060. s->len_prefix == 0 case */
  1061. s->num_saved_bits = 0;
  1062. s->packet_loss = 0;
  1063. }
  1064. } else {
  1065. int frame_size;
  1066. s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3;
  1067. init_get_bits(gb, avpkt->data, s->buf_bit_size);
  1068. skip_bits(gb, s->packet_offset);
  1069. if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size &&
  1070. (frame_size = show_bits(gb, s->log2_frame_size)) &&
  1071. frame_size <= remaining_bits(s, gb)) {
  1072. save_bits(s, gb, frame_size, 0);
  1073. s->packet_done = !decode_frame(s);
  1074. } else if (!s->len_prefix
  1075. && s->num_saved_bits > get_bits_count(&s->gb)) {
  1076. /** when the frames do not have a length prefix, we don't know
  1077. the compressed length of the individual frames
  1078. however, we know what part of a new packet belongs to the
  1079. previous frame
  1080. therefore we save the incoming packet first, then we append
  1081. the "previous frame" data from the next packet so that
  1082. we get a buffer that only contains full frames */
  1083. s->packet_done = !decode_frame(s);
  1084. } else {
  1085. s->packet_done = 1;
  1086. }
  1087. }
  1088. if (s->packet_done && !s->packet_loss &&
  1089. remaining_bits(s, gb) > 0) {
  1090. /** save the rest of the data so that it can be decoded
  1091. with the next packet */
  1092. save_bits(s, gb, remaining_bits(s, gb), 0);
  1093. }
  1094. *data_size = 0; // (int8_t *)s->samples - (int8_t *)data;
  1095. s->packet_offset = get_bits_count(gb) & 7;
  1096. return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3;
  1097. }
  1098. /**
  1099. *@brief Clear decoder buffers (for seeking).
  1100. *@param avctx codec context
  1101. */
  1102. static void flush(AVCodecContext *avctx)
  1103. {
  1104. WmallDecodeCtx *s = avctx->priv_data;
  1105. int i;
  1106. /** reset output buffer as a part of it is used during the windowing of a
  1107. new frame */
  1108. for (i = 0; i < s->num_channels; i++)
  1109. memset(s->channel[i].out, 0, s->samples_per_frame *
  1110. sizeof(*s->channel[i].out));
  1111. s->packet_loss = 1;
  1112. }
  1113. /**
  1114. *@brief wmall decoder
  1115. */
  1116. AVCodec ff_wmalossless_decoder = {
  1117. "wmalossless",
  1118. AVMEDIA_TYPE_AUDIO,
  1119. CODEC_ID_WMALOSSLESS,
  1120. sizeof(WmallDecodeCtx),
  1121. decode_init,
  1122. NULL,
  1123. decode_end,
  1124. decode_packet,
  1125. .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_EXPERIMENTAL,
  1126. .flush= flush,
  1127. .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Lossless"),
  1128. };