You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1898 lines
62KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder
  24. */
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/crc.h"
  29. #include "libavutil/float_dsp.h"
  30. #include "libavutil/libm.h"
  31. #include "libavutil/mem_internal.h"
  32. #include "libavutil/thread.h"
  33. #include "avcodec.h"
  34. #include "get_bits.h"
  35. #include "internal.h"
  36. #include "mathops.h"
  37. #include "mpegaudiodsp.h"
  38. /*
  39. * TODO:
  40. * - test lsf / mpeg25 extensively.
  41. */
  42. #include "mpegaudio.h"
  43. #include "mpegaudiodecheader.h"
  44. #define BACKSTEP_SIZE 512
  45. #define EXTRABYTES 24
  46. #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
  47. /* layer 3 "granule" */
  48. typedef struct GranuleDef {
  49. uint8_t scfsi;
  50. int part2_3_length;
  51. int big_values;
  52. int global_gain;
  53. int scalefac_compress;
  54. uint8_t block_type;
  55. uint8_t switch_point;
  56. int table_select[3];
  57. int subblock_gain[3];
  58. uint8_t scalefac_scale;
  59. uint8_t count1table_select;
  60. int region_size[3]; /* number of huffman codes in each region */
  61. int preflag;
  62. int short_start, long_end; /* long/short band indexes */
  63. uint8_t scale_factors[40];
  64. DECLARE_ALIGNED(16, INTFLOAT, sb_hybrid)[SBLIMIT * 18]; /* 576 samples */
  65. } GranuleDef;
  66. typedef struct MPADecodeContext {
  67. MPA_DECODE_HEADER
  68. uint8_t last_buf[LAST_BUF_SIZE];
  69. int last_buf_size;
  70. int extrasize;
  71. /* next header (used in free format parsing) */
  72. uint32_t free_format_next_header;
  73. GetBitContext gb;
  74. GetBitContext in_gb;
  75. DECLARE_ALIGNED(32, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2];
  76. int synth_buf_offset[MPA_MAX_CHANNELS];
  77. DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
  78. INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
  79. GranuleDef granules[2][2]; /* Used in Layer 3 */
  80. int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
  81. int dither_state;
  82. int err_recognition;
  83. AVCodecContext* avctx;
  84. MPADSPContext mpadsp;
  85. void (*butterflies_float)(float *av_restrict v1, float *av_restrict v2, int len);
  86. AVFrame *frame;
  87. uint32_t crc;
  88. } MPADecodeContext;
  89. #define HEADER_SIZE 4
  90. #include "mpegaudiodata.h"
  91. #include "mpegaudio_tablegen.h"
  92. /* intensity stereo coef table */
  93. static INTFLOAT is_table_lsf[2][2][16];
  94. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  95. static int32_t scale_factor_mult[15][3];
  96. /* mult table for layer 2 group quantization */
  97. #define SCALE_GEN(v) \
  98. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  99. static const int32_t scale_factor_mult2[3][3] = {
  100. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  101. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  102. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  103. };
  104. /**
  105. * Convert region offsets to region sizes and truncate
  106. * size to big_values.
  107. */
  108. static void region_offset2size(GranuleDef *g)
  109. {
  110. int i, k, j = 0;
  111. g->region_size[2] = 576 / 2;
  112. for (i = 0; i < 3; i++) {
  113. k = FFMIN(g->region_size[i], g->big_values);
  114. g->region_size[i] = k - j;
  115. j = k;
  116. }
  117. }
  118. static void init_short_region(MPADecodeContext *s, GranuleDef *g)
  119. {
  120. if (g->block_type == 2) {
  121. if (s->sample_rate_index != 8)
  122. g->region_size[0] = (36 / 2);
  123. else
  124. g->region_size[0] = (72 / 2);
  125. } else {
  126. if (s->sample_rate_index <= 2)
  127. g->region_size[0] = (36 / 2);
  128. else if (s->sample_rate_index != 8)
  129. g->region_size[0] = (54 / 2);
  130. else
  131. g->region_size[0] = (108 / 2);
  132. }
  133. g->region_size[1] = (576 / 2);
  134. }
  135. static void init_long_region(MPADecodeContext *s, GranuleDef *g,
  136. int ra1, int ra2)
  137. {
  138. int l;
  139. g->region_size[0] = ff_band_index_long[s->sample_rate_index][ra1 + 1];
  140. /* should not overflow */
  141. l = FFMIN(ra1 + ra2 + 2, 22);
  142. g->region_size[1] = ff_band_index_long[s->sample_rate_index][ l];
  143. }
  144. static void compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
  145. {
  146. if (g->block_type == 2) {
  147. if (g->switch_point) {
  148. if(s->sample_rate_index == 8)
  149. avpriv_request_sample(s->avctx, "switch point in 8khz");
  150. /* if switched mode, we handle the 36 first samples as
  151. long blocks. For 8000Hz, we handle the 72 first
  152. exponents as long blocks */
  153. if (s->sample_rate_index <= 2)
  154. g->long_end = 8;
  155. else
  156. g->long_end = 6;
  157. g->short_start = 3;
  158. } else {
  159. g->long_end = 0;
  160. g->short_start = 0;
  161. }
  162. } else {
  163. g->short_start = 13;
  164. g->long_end = 22;
  165. }
  166. }
  167. /* layer 1 unscaling */
  168. /* n = number of bits of the mantissa minus 1 */
  169. static inline int l1_unscale(int n, int mant, int scale_factor)
  170. {
  171. int shift, mod;
  172. int64_t val;
  173. shift = ff_scale_factor_modshift[scale_factor];
  174. mod = shift & 3;
  175. shift >>= 2;
  176. val = MUL64((int)(mant + (-1U << n) + 1), scale_factor_mult[n-1][mod]);
  177. shift += n;
  178. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  179. return (int)((val + (1LL << (shift - 1))) >> shift);
  180. }
  181. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  182. {
  183. int shift, mod, val;
  184. shift = ff_scale_factor_modshift[scale_factor];
  185. mod = shift & 3;
  186. shift >>= 2;
  187. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  188. /* NOTE: at this point, 0 <= shift <= 21 */
  189. if (shift > 0)
  190. val = (val + (1 << (shift - 1))) >> shift;
  191. return val;
  192. }
  193. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  194. static inline int l3_unscale(int value, int exponent)
  195. {
  196. unsigned int m;
  197. int e;
  198. e = ff_table_4_3_exp [4 * value + (exponent & 3)];
  199. m = ff_table_4_3_value[4 * value + (exponent & 3)];
  200. e -= exponent >> 2;
  201. #ifdef DEBUG
  202. if(e < 1)
  203. av_log(NULL, AV_LOG_WARNING, "l3_unscale: e is %d\n", e);
  204. #endif
  205. if (e > (SUINT)31)
  206. return 0;
  207. m = (m + ((1U << e) >> 1)) >> e;
  208. return m;
  209. }
  210. static av_cold void decode_init_static(void)
  211. {
  212. int i, j;
  213. /* scale factor multiply for layer 1 */
  214. for (i = 0; i < 15; i++) {
  215. int n, norm;
  216. n = i + 2;
  217. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  218. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  219. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  220. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  221. ff_dlog(NULL, "%d: norm=%x s=%"PRIx32" %"PRIx32" %"PRIx32"\n", i,
  222. (unsigned)norm,
  223. scale_factor_mult[i][0],
  224. scale_factor_mult[i][1],
  225. scale_factor_mult[i][2]);
  226. }
  227. /* compute n ^ (4/3) and store it in mantissa/exp format */
  228. mpegaudio_tableinit();
  229. for (i = 0; i < 16; i++) {
  230. double f;
  231. int e, k;
  232. for (j = 0; j < 2; j++) {
  233. e = -(j + 1) * ((i + 1) >> 1);
  234. f = exp2(e / 4.0);
  235. k = i & 1;
  236. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  237. is_table_lsf[j][k ][i] = FIXR(1.0);
  238. ff_dlog(NULL, "is_table_lsf %d %d: %f %f\n",
  239. i, j, (float) is_table_lsf[j][0][i],
  240. (float) is_table_lsf[j][1][i]);
  241. }
  242. }
  243. RENAME(ff_mpa_synth_init)();
  244. ff_mpegaudiodec_common_init_static();
  245. }
  246. static av_cold int decode_init(AVCodecContext * avctx)
  247. {
  248. static AVOnce init_static_once = AV_ONCE_INIT;
  249. MPADecodeContext *s = avctx->priv_data;
  250. s->avctx = avctx;
  251. #if USE_FLOATS
  252. {
  253. AVFloatDSPContext *fdsp;
  254. fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
  255. if (!fdsp)
  256. return AVERROR(ENOMEM);
  257. s->butterflies_float = fdsp->butterflies_float;
  258. av_free(fdsp);
  259. }
  260. #endif
  261. ff_mpadsp_init(&s->mpadsp);
  262. if (avctx->request_sample_fmt == OUT_FMT &&
  263. avctx->codec_id != AV_CODEC_ID_MP3ON4)
  264. avctx->sample_fmt = OUT_FMT;
  265. else
  266. avctx->sample_fmt = OUT_FMT_P;
  267. s->err_recognition = avctx->err_recognition;
  268. if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
  269. s->adu_mode = 1;
  270. ff_thread_once(&init_static_once, decode_init_static);
  271. return 0;
  272. }
  273. #define C3 FIXHR(0.86602540378443864676/2)
  274. #define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
  275. #define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
  276. #define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
  277. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  278. cases. */
  279. static void imdct12(INTFLOAT *out, SUINTFLOAT *in)
  280. {
  281. SUINTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  282. in0 = in[0*3];
  283. in1 = in[1*3] + in[0*3];
  284. in2 = in[2*3] + in[1*3];
  285. in3 = in[3*3] + in[2*3];
  286. in4 = in[4*3] + in[3*3];
  287. in5 = in[5*3] + in[4*3];
  288. in5 += in3;
  289. in3 += in1;
  290. in2 = MULH3(in2, C3, 2);
  291. in3 = MULH3(in3, C3, 4);
  292. t1 = in0 - in4;
  293. t2 = MULH3(in1 - in5, C4, 2);
  294. out[ 7] =
  295. out[10] = t1 + t2;
  296. out[ 1] =
  297. out[ 4] = t1 - t2;
  298. in0 += SHR(in4, 1);
  299. in4 = in0 + in2;
  300. in5 += 2*in1;
  301. in1 = MULH3(in5 + in3, C5, 1);
  302. out[ 8] =
  303. out[ 9] = in4 + in1;
  304. out[ 2] =
  305. out[ 3] = in4 - in1;
  306. in0 -= in2;
  307. in5 = MULH3(in5 - in3, C6, 2);
  308. out[ 0] =
  309. out[ 5] = in0 - in5;
  310. out[ 6] =
  311. out[11] = in0 + in5;
  312. }
  313. static int handle_crc(MPADecodeContext *s, int sec_len)
  314. {
  315. if (s->error_protection && (s->err_recognition & AV_EF_CRCCHECK)) {
  316. const uint8_t *buf = s->gb.buffer - HEADER_SIZE;
  317. int sec_byte_len = sec_len >> 3;
  318. int sec_rem_bits = sec_len & 7;
  319. const AVCRC *crc_tab = av_crc_get_table(AV_CRC_16_ANSI);
  320. uint8_t tmp_buf[4];
  321. uint32_t crc_val = av_crc(crc_tab, UINT16_MAX, &buf[2], 2);
  322. crc_val = av_crc(crc_tab, crc_val, &buf[6], sec_byte_len);
  323. AV_WB32(tmp_buf,
  324. ((buf[6 + sec_byte_len] & (0xFF00 >> sec_rem_bits)) << 24) +
  325. ((s->crc << 16) >> sec_rem_bits));
  326. crc_val = av_crc(crc_tab, crc_val, tmp_buf, 3);
  327. if (crc_val) {
  328. av_log(s->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", crc_val);
  329. if (s->err_recognition & AV_EF_EXPLODE)
  330. return AVERROR_INVALIDDATA;
  331. }
  332. }
  333. return 0;
  334. }
  335. /* return the number of decoded frames */
  336. static int mp_decode_layer1(MPADecodeContext *s)
  337. {
  338. int bound, i, v, n, ch, j, mant;
  339. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  340. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  341. int ret;
  342. ret = handle_crc(s, (s->nb_channels == 1) ? 8*16 : 8*32);
  343. if (ret < 0)
  344. return ret;
  345. if (s->mode == MPA_JSTEREO)
  346. bound = (s->mode_ext + 1) * 4;
  347. else
  348. bound = SBLIMIT;
  349. /* allocation bits */
  350. for (i = 0; i < bound; i++) {
  351. for (ch = 0; ch < s->nb_channels; ch++) {
  352. allocation[ch][i] = get_bits(&s->gb, 4);
  353. }
  354. }
  355. for (i = bound; i < SBLIMIT; i++)
  356. allocation[0][i] = get_bits(&s->gb, 4);
  357. /* scale factors */
  358. for (i = 0; i < bound; i++) {
  359. for (ch = 0; ch < s->nb_channels; ch++) {
  360. if (allocation[ch][i])
  361. scale_factors[ch][i] = get_bits(&s->gb, 6);
  362. }
  363. }
  364. for (i = bound; i < SBLIMIT; i++) {
  365. if (allocation[0][i]) {
  366. scale_factors[0][i] = get_bits(&s->gb, 6);
  367. scale_factors[1][i] = get_bits(&s->gb, 6);
  368. }
  369. }
  370. /* compute samples */
  371. for (j = 0; j < 12; j++) {
  372. for (i = 0; i < bound; i++) {
  373. for (ch = 0; ch < s->nb_channels; ch++) {
  374. n = allocation[ch][i];
  375. if (n) {
  376. mant = get_bits(&s->gb, n + 1);
  377. v = l1_unscale(n, mant, scale_factors[ch][i]);
  378. } else {
  379. v = 0;
  380. }
  381. s->sb_samples[ch][j][i] = v;
  382. }
  383. }
  384. for (i = bound; i < SBLIMIT; i++) {
  385. n = allocation[0][i];
  386. if (n) {
  387. mant = get_bits(&s->gb, n + 1);
  388. v = l1_unscale(n, mant, scale_factors[0][i]);
  389. s->sb_samples[0][j][i] = v;
  390. v = l1_unscale(n, mant, scale_factors[1][i]);
  391. s->sb_samples[1][j][i] = v;
  392. } else {
  393. s->sb_samples[0][j][i] = 0;
  394. s->sb_samples[1][j][i] = 0;
  395. }
  396. }
  397. }
  398. return 12;
  399. }
  400. static int mp_decode_layer2(MPADecodeContext *s)
  401. {
  402. int sblimit; /* number of used subbands */
  403. const unsigned char *alloc_table;
  404. int table, bit_alloc_bits, i, j, ch, bound, v;
  405. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  406. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  407. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  408. int scale, qindex, bits, steps, k, l, m, b;
  409. int ret;
  410. /* select decoding table */
  411. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  412. s->sample_rate, s->lsf);
  413. sblimit = ff_mpa_sblimit_table[table];
  414. alloc_table = ff_mpa_alloc_tables[table];
  415. if (s->mode == MPA_JSTEREO)
  416. bound = (s->mode_ext + 1) * 4;
  417. else
  418. bound = sblimit;
  419. ff_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  420. /* sanity check */
  421. if (bound > sblimit)
  422. bound = sblimit;
  423. /* parse bit allocation */
  424. j = 0;
  425. for (i = 0; i < bound; i++) {
  426. bit_alloc_bits = alloc_table[j];
  427. for (ch = 0; ch < s->nb_channels; ch++)
  428. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  429. j += 1 << bit_alloc_bits;
  430. }
  431. for (i = bound; i < sblimit; i++) {
  432. bit_alloc_bits = alloc_table[j];
  433. v = get_bits(&s->gb, bit_alloc_bits);
  434. bit_alloc[0][i] = v;
  435. bit_alloc[1][i] = v;
  436. j += 1 << bit_alloc_bits;
  437. }
  438. /* scale codes */
  439. for (i = 0; i < sblimit; i++) {
  440. for (ch = 0; ch < s->nb_channels; ch++) {
  441. if (bit_alloc[ch][i])
  442. scale_code[ch][i] = get_bits(&s->gb, 2);
  443. }
  444. }
  445. ret = handle_crc(s, get_bits_count(&s->gb) - 16);
  446. if (ret < 0)
  447. return ret;
  448. /* scale factors */
  449. for (i = 0; i < sblimit; i++) {
  450. for (ch = 0; ch < s->nb_channels; ch++) {
  451. if (bit_alloc[ch][i]) {
  452. sf = scale_factors[ch][i];
  453. switch (scale_code[ch][i]) {
  454. default:
  455. case 0:
  456. sf[0] = get_bits(&s->gb, 6);
  457. sf[1] = get_bits(&s->gb, 6);
  458. sf[2] = get_bits(&s->gb, 6);
  459. break;
  460. case 2:
  461. sf[0] = get_bits(&s->gb, 6);
  462. sf[1] = sf[0];
  463. sf[2] = sf[0];
  464. break;
  465. case 1:
  466. sf[0] = get_bits(&s->gb, 6);
  467. sf[2] = get_bits(&s->gb, 6);
  468. sf[1] = sf[0];
  469. break;
  470. case 3:
  471. sf[0] = get_bits(&s->gb, 6);
  472. sf[2] = get_bits(&s->gb, 6);
  473. sf[1] = sf[2];
  474. break;
  475. }
  476. }
  477. }
  478. }
  479. /* samples */
  480. for (k = 0; k < 3; k++) {
  481. for (l = 0; l < 12; l += 3) {
  482. j = 0;
  483. for (i = 0; i < bound; i++) {
  484. bit_alloc_bits = alloc_table[j];
  485. for (ch = 0; ch < s->nb_channels; ch++) {
  486. b = bit_alloc[ch][i];
  487. if (b) {
  488. scale = scale_factors[ch][i][k];
  489. qindex = alloc_table[j+b];
  490. bits = ff_mpa_quant_bits[qindex];
  491. if (bits < 0) {
  492. int v2;
  493. /* 3 values at the same time */
  494. v = get_bits(&s->gb, -bits);
  495. v2 = ff_division_tabs[qindex][v];
  496. steps = ff_mpa_quant_steps[qindex];
  497. s->sb_samples[ch][k * 12 + l + 0][i] =
  498. l2_unscale_group(steps, v2 & 15, scale);
  499. s->sb_samples[ch][k * 12 + l + 1][i] =
  500. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  501. s->sb_samples[ch][k * 12 + l + 2][i] =
  502. l2_unscale_group(steps, v2 >> 8 , scale);
  503. } else {
  504. for (m = 0; m < 3; m++) {
  505. v = get_bits(&s->gb, bits);
  506. v = l1_unscale(bits - 1, v, scale);
  507. s->sb_samples[ch][k * 12 + l + m][i] = v;
  508. }
  509. }
  510. } else {
  511. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  512. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  513. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  514. }
  515. }
  516. /* next subband in alloc table */
  517. j += 1 << bit_alloc_bits;
  518. }
  519. /* XXX: find a way to avoid this duplication of code */
  520. for (i = bound; i < sblimit; i++) {
  521. bit_alloc_bits = alloc_table[j];
  522. b = bit_alloc[0][i];
  523. if (b) {
  524. int mant, scale0, scale1;
  525. scale0 = scale_factors[0][i][k];
  526. scale1 = scale_factors[1][i][k];
  527. qindex = alloc_table[j + b];
  528. bits = ff_mpa_quant_bits[qindex];
  529. if (bits < 0) {
  530. /* 3 values at the same time */
  531. v = get_bits(&s->gb, -bits);
  532. steps = ff_mpa_quant_steps[qindex];
  533. mant = v % steps;
  534. v = v / steps;
  535. s->sb_samples[0][k * 12 + l + 0][i] =
  536. l2_unscale_group(steps, mant, scale0);
  537. s->sb_samples[1][k * 12 + l + 0][i] =
  538. l2_unscale_group(steps, mant, scale1);
  539. mant = v % steps;
  540. v = v / steps;
  541. s->sb_samples[0][k * 12 + l + 1][i] =
  542. l2_unscale_group(steps, mant, scale0);
  543. s->sb_samples[1][k * 12 + l + 1][i] =
  544. l2_unscale_group(steps, mant, scale1);
  545. s->sb_samples[0][k * 12 + l + 2][i] =
  546. l2_unscale_group(steps, v, scale0);
  547. s->sb_samples[1][k * 12 + l + 2][i] =
  548. l2_unscale_group(steps, v, scale1);
  549. } else {
  550. for (m = 0; m < 3; m++) {
  551. mant = get_bits(&s->gb, bits);
  552. s->sb_samples[0][k * 12 + l + m][i] =
  553. l1_unscale(bits - 1, mant, scale0);
  554. s->sb_samples[1][k * 12 + l + m][i] =
  555. l1_unscale(bits - 1, mant, scale1);
  556. }
  557. }
  558. } else {
  559. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  560. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  561. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  562. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  563. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  564. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  565. }
  566. /* next subband in alloc table */
  567. j += 1 << bit_alloc_bits;
  568. }
  569. /* fill remaining samples to zero */
  570. for (i = sblimit; i < SBLIMIT; i++) {
  571. for (ch = 0; ch < s->nb_channels; ch++) {
  572. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  573. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  574. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  575. }
  576. }
  577. }
  578. }
  579. return 3 * 12;
  580. }
  581. #define SPLIT(dst,sf,n) \
  582. if (n == 3) { \
  583. int m = (sf * 171) >> 9; \
  584. dst = sf - 3 * m; \
  585. sf = m; \
  586. } else if (n == 4) { \
  587. dst = sf & 3; \
  588. sf >>= 2; \
  589. } else if (n == 5) { \
  590. int m = (sf * 205) >> 10; \
  591. dst = sf - 5 * m; \
  592. sf = m; \
  593. } else if (n == 6) { \
  594. int m = (sf * 171) >> 10; \
  595. dst = sf - 6 * m; \
  596. sf = m; \
  597. } else { \
  598. dst = 0; \
  599. }
  600. static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
  601. int n3)
  602. {
  603. SPLIT(slen[3], sf, n3)
  604. SPLIT(slen[2], sf, n2)
  605. SPLIT(slen[1], sf, n1)
  606. slen[0] = sf;
  607. }
  608. static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
  609. int16_t *exponents)
  610. {
  611. const uint8_t *bstab, *pretab;
  612. int len, i, j, k, l, v0, shift, gain, gains[3];
  613. int16_t *exp_ptr;
  614. exp_ptr = exponents;
  615. gain = g->global_gain - 210;
  616. shift = g->scalefac_scale + 1;
  617. bstab = ff_band_size_long[s->sample_rate_index];
  618. pretab = ff_mpa_pretab[g->preflag];
  619. for (i = 0; i < g->long_end; i++) {
  620. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  621. len = bstab[i];
  622. for (j = len; j > 0; j--)
  623. *exp_ptr++ = v0;
  624. }
  625. if (g->short_start < 13) {
  626. bstab = ff_band_size_short[s->sample_rate_index];
  627. gains[0] = gain - (g->subblock_gain[0] << 3);
  628. gains[1] = gain - (g->subblock_gain[1] << 3);
  629. gains[2] = gain - (g->subblock_gain[2] << 3);
  630. k = g->long_end;
  631. for (i = g->short_start; i < 13; i++) {
  632. len = bstab[i];
  633. for (l = 0; l < 3; l++) {
  634. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  635. for (j = len; j > 0; j--)
  636. *exp_ptr++ = v0;
  637. }
  638. }
  639. }
  640. }
  641. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
  642. int *end_pos2)
  643. {
  644. if (s->in_gb.buffer && *pos >= s->gb.size_in_bits - s->extrasize * 8) {
  645. s->gb = s->in_gb;
  646. s->in_gb.buffer = NULL;
  647. s->extrasize = 0;
  648. av_assert2((get_bits_count(&s->gb) & 7) == 0);
  649. skip_bits_long(&s->gb, *pos - *end_pos);
  650. *end_pos2 =
  651. *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
  652. *pos = get_bits_count(&s->gb);
  653. }
  654. }
  655. /* Following is an optimized code for
  656. INTFLOAT v = *src
  657. if(get_bits1(&s->gb))
  658. v = -v;
  659. *dst = v;
  660. */
  661. #if USE_FLOATS
  662. #define READ_FLIP_SIGN(dst,src) \
  663. v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
  664. AV_WN32A(dst, v);
  665. #else
  666. #define READ_FLIP_SIGN(dst,src) \
  667. v = -get_bits1(&s->gb); \
  668. *(dst) = (*(src) ^ v) - v;
  669. #endif
  670. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  671. int16_t *exponents, int end_pos2)
  672. {
  673. int s_index;
  674. int i;
  675. int last_pos, bits_left;
  676. VLC *vlc;
  677. int end_pos = FFMIN(end_pos2, s->gb.size_in_bits - s->extrasize * 8);
  678. /* low frequencies (called big values) */
  679. s_index = 0;
  680. for (i = 0; i < 3; i++) {
  681. int j, k, l, linbits;
  682. j = g->region_size[i];
  683. if (j == 0)
  684. continue;
  685. /* select vlc table */
  686. k = g->table_select[i];
  687. l = ff_mpa_huff_data[k][0];
  688. linbits = ff_mpa_huff_data[k][1];
  689. vlc = &ff_huff_vlc[l];
  690. if (!l) {
  691. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
  692. s_index += 2 * j;
  693. continue;
  694. }
  695. /* read huffcode and compute each couple */
  696. for (; j > 0; j--) {
  697. int exponent, x, y;
  698. int v;
  699. int pos = get_bits_count(&s->gb);
  700. if (pos >= end_pos){
  701. switch_buffer(s, &pos, &end_pos, &end_pos2);
  702. if (pos >= end_pos)
  703. break;
  704. }
  705. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  706. if (!y) {
  707. g->sb_hybrid[s_index ] =
  708. g->sb_hybrid[s_index + 1] = 0;
  709. s_index += 2;
  710. continue;
  711. }
  712. exponent= exponents[s_index];
  713. ff_dlog(s->avctx, "region=%d n=%d y=%d exp=%d\n",
  714. i, g->region_size[i] - j, y, exponent);
  715. if (y & 16) {
  716. x = y >> 5;
  717. y = y & 0x0f;
  718. if (x < 15) {
  719. READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
  720. } else {
  721. x += get_bitsz(&s->gb, linbits);
  722. v = l3_unscale(x, exponent);
  723. if (get_bits1(&s->gb))
  724. v = -v;
  725. g->sb_hybrid[s_index] = v;
  726. }
  727. if (y < 15) {
  728. READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
  729. } else {
  730. y += get_bitsz(&s->gb, linbits);
  731. v = l3_unscale(y, exponent);
  732. if (get_bits1(&s->gb))
  733. v = -v;
  734. g->sb_hybrid[s_index + 1] = v;
  735. }
  736. } else {
  737. x = y >> 5;
  738. y = y & 0x0f;
  739. x += y;
  740. if (x < 15) {
  741. READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
  742. } else {
  743. x += get_bitsz(&s->gb, linbits);
  744. v = l3_unscale(x, exponent);
  745. if (get_bits1(&s->gb))
  746. v = -v;
  747. g->sb_hybrid[s_index+!!y] = v;
  748. }
  749. g->sb_hybrid[s_index + !y] = 0;
  750. }
  751. s_index += 2;
  752. }
  753. }
  754. /* high frequencies */
  755. vlc = &ff_huff_quad_vlc[g->count1table_select];
  756. last_pos = 0;
  757. while (s_index <= 572) {
  758. int pos, code;
  759. pos = get_bits_count(&s->gb);
  760. if (pos >= end_pos) {
  761. if (pos > end_pos2 && last_pos) {
  762. /* some encoders generate an incorrect size for this
  763. part. We must go back into the data */
  764. s_index -= 4;
  765. skip_bits_long(&s->gb, last_pos - pos);
  766. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  767. if(s->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))
  768. s_index=0;
  769. break;
  770. }
  771. switch_buffer(s, &pos, &end_pos, &end_pos2);
  772. if (pos >= end_pos)
  773. break;
  774. }
  775. last_pos = pos;
  776. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  777. ff_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  778. g->sb_hybrid[s_index + 0] =
  779. g->sb_hybrid[s_index + 1] =
  780. g->sb_hybrid[s_index + 2] =
  781. g->sb_hybrid[s_index + 3] = 0;
  782. while (code) {
  783. static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
  784. int v;
  785. int pos = s_index + idxtab[code];
  786. code ^= 8 >> idxtab[code];
  787. READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
  788. }
  789. s_index += 4;
  790. }
  791. /* skip extension bits */
  792. bits_left = end_pos2 - get_bits_count(&s->gb);
  793. if (bits_left < 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_COMPLIANT))) {
  794. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  795. s_index=0;
  796. } else if (bits_left > 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) {
  797. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  798. s_index = 0;
  799. }
  800. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
  801. skip_bits_long(&s->gb, bits_left);
  802. i = get_bits_count(&s->gb);
  803. switch_buffer(s, &i, &end_pos, &end_pos2);
  804. return 0;
  805. }
  806. /* Reorder short blocks from bitstream order to interleaved order. It
  807. would be faster to do it in parsing, but the code would be far more
  808. complicated */
  809. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  810. {
  811. int i, j, len;
  812. INTFLOAT *ptr, *dst, *ptr1;
  813. INTFLOAT tmp[576];
  814. if (g->block_type != 2)
  815. return;
  816. if (g->switch_point) {
  817. if (s->sample_rate_index != 8)
  818. ptr = g->sb_hybrid + 36;
  819. else
  820. ptr = g->sb_hybrid + 72;
  821. } else {
  822. ptr = g->sb_hybrid;
  823. }
  824. for (i = g->short_start; i < 13; i++) {
  825. len = ff_band_size_short[s->sample_rate_index][i];
  826. ptr1 = ptr;
  827. dst = tmp;
  828. for (j = len; j > 0; j--) {
  829. *dst++ = ptr[0*len];
  830. *dst++ = ptr[1*len];
  831. *dst++ = ptr[2*len];
  832. ptr++;
  833. }
  834. ptr += 2 * len;
  835. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  836. }
  837. }
  838. #define ISQRT2 FIXR(0.70710678118654752440)
  839. static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
  840. {
  841. int i, j, k, l;
  842. int sf_max, sf, len, non_zero_found;
  843. INTFLOAT *tab0, *tab1, v1, v2;
  844. const INTFLOAT (*is_tab)[16];
  845. SUINTFLOAT tmp0, tmp1;
  846. int non_zero_found_short[3];
  847. /* intensity stereo */
  848. if (s->mode_ext & MODE_EXT_I_STEREO) {
  849. if (!s->lsf) {
  850. is_tab = is_table;
  851. sf_max = 7;
  852. } else {
  853. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  854. sf_max = 16;
  855. }
  856. tab0 = g0->sb_hybrid + 576;
  857. tab1 = g1->sb_hybrid + 576;
  858. non_zero_found_short[0] = 0;
  859. non_zero_found_short[1] = 0;
  860. non_zero_found_short[2] = 0;
  861. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  862. for (i = 12; i >= g1->short_start; i--) {
  863. /* for last band, use previous scale factor */
  864. if (i != 11)
  865. k -= 3;
  866. len = ff_band_size_short[s->sample_rate_index][i];
  867. for (l = 2; l >= 0; l--) {
  868. tab0 -= len;
  869. tab1 -= len;
  870. if (!non_zero_found_short[l]) {
  871. /* test if non zero band. if so, stop doing i-stereo */
  872. for (j = 0; j < len; j++) {
  873. if (tab1[j] != 0) {
  874. non_zero_found_short[l] = 1;
  875. goto found1;
  876. }
  877. }
  878. sf = g1->scale_factors[k + l];
  879. if (sf >= sf_max)
  880. goto found1;
  881. v1 = is_tab[0][sf];
  882. v2 = is_tab[1][sf];
  883. for (j = 0; j < len; j++) {
  884. tmp0 = tab0[j];
  885. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  886. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  887. }
  888. } else {
  889. found1:
  890. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  891. /* lower part of the spectrum : do ms stereo
  892. if enabled */
  893. for (j = 0; j < len; j++) {
  894. tmp0 = tab0[j];
  895. tmp1 = tab1[j];
  896. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  897. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  898. }
  899. }
  900. }
  901. }
  902. }
  903. non_zero_found = non_zero_found_short[0] |
  904. non_zero_found_short[1] |
  905. non_zero_found_short[2];
  906. for (i = g1->long_end - 1;i >= 0;i--) {
  907. len = ff_band_size_long[s->sample_rate_index][i];
  908. tab0 -= len;
  909. tab1 -= len;
  910. /* test if non zero band. if so, stop doing i-stereo */
  911. if (!non_zero_found) {
  912. for (j = 0; j < len; j++) {
  913. if (tab1[j] != 0) {
  914. non_zero_found = 1;
  915. goto found2;
  916. }
  917. }
  918. /* for last band, use previous scale factor */
  919. k = (i == 21) ? 20 : i;
  920. sf = g1->scale_factors[k];
  921. if (sf >= sf_max)
  922. goto found2;
  923. v1 = is_tab[0][sf];
  924. v2 = is_tab[1][sf];
  925. for (j = 0; j < len; j++) {
  926. tmp0 = tab0[j];
  927. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  928. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  929. }
  930. } else {
  931. found2:
  932. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  933. /* lower part of the spectrum : do ms stereo
  934. if enabled */
  935. for (j = 0; j < len; j++) {
  936. tmp0 = tab0[j];
  937. tmp1 = tab1[j];
  938. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  939. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  940. }
  941. }
  942. }
  943. }
  944. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  945. /* ms stereo ONLY */
  946. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  947. global gain */
  948. #if USE_FLOATS
  949. s->butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
  950. #else
  951. tab0 = g0->sb_hybrid;
  952. tab1 = g1->sb_hybrid;
  953. for (i = 0; i < 576; i++) {
  954. tmp0 = tab0[i];
  955. tmp1 = tab1[i];
  956. tab0[i] = tmp0 + tmp1;
  957. tab1[i] = tmp0 - tmp1;
  958. }
  959. #endif
  960. }
  961. }
  962. #if USE_FLOATS
  963. #if HAVE_MIPSFPU
  964. # include "mips/compute_antialias_float.h"
  965. #endif /* HAVE_MIPSFPU */
  966. #else
  967. #if HAVE_MIPSDSP
  968. # include "mips/compute_antialias_fixed.h"
  969. #endif /* HAVE_MIPSDSP */
  970. #endif /* USE_FLOATS */
  971. #ifndef compute_antialias
  972. #if USE_FLOATS
  973. #define AA(j) do { \
  974. float tmp0 = ptr[-1-j]; \
  975. float tmp1 = ptr[ j]; \
  976. ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
  977. ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
  978. } while (0)
  979. #else
  980. #define AA(j) do { \
  981. SUINT tmp0 = ptr[-1-j]; \
  982. SUINT tmp1 = ptr[ j]; \
  983. SUINT tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
  984. ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
  985. ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
  986. } while (0)
  987. #endif
  988. static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
  989. {
  990. INTFLOAT *ptr;
  991. int n, i;
  992. /* we antialias only "long" bands */
  993. if (g->block_type == 2) {
  994. if (!g->switch_point)
  995. return;
  996. /* XXX: check this for 8000Hz case */
  997. n = 1;
  998. } else {
  999. n = SBLIMIT - 1;
  1000. }
  1001. ptr = g->sb_hybrid + 18;
  1002. for (i = n; i > 0; i--) {
  1003. AA(0);
  1004. AA(1);
  1005. AA(2);
  1006. AA(3);
  1007. AA(4);
  1008. AA(5);
  1009. AA(6);
  1010. AA(7);
  1011. ptr += 18;
  1012. }
  1013. }
  1014. #endif /* compute_antialias */
  1015. static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
  1016. INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
  1017. {
  1018. INTFLOAT *win, *out_ptr, *ptr, *buf, *ptr1;
  1019. INTFLOAT out2[12];
  1020. int i, j, mdct_long_end, sblimit;
  1021. /* find last non zero block */
  1022. ptr = g->sb_hybrid + 576;
  1023. ptr1 = g->sb_hybrid + 2 * 18;
  1024. while (ptr >= ptr1) {
  1025. int32_t *p;
  1026. ptr -= 6;
  1027. p = (int32_t*)ptr;
  1028. if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1029. break;
  1030. }
  1031. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1032. if (g->block_type == 2) {
  1033. /* XXX: check for 8000 Hz */
  1034. if (g->switch_point)
  1035. mdct_long_end = 2;
  1036. else
  1037. mdct_long_end = 0;
  1038. } else {
  1039. mdct_long_end = sblimit;
  1040. }
  1041. s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf, g->sb_hybrid,
  1042. mdct_long_end, g->switch_point,
  1043. g->block_type);
  1044. buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
  1045. ptr = g->sb_hybrid + 18 * mdct_long_end;
  1046. for (j = mdct_long_end; j < sblimit; j++) {
  1047. /* select frequency inversion */
  1048. win = RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
  1049. out_ptr = sb_samples + j;
  1050. for (i = 0; i < 6; i++) {
  1051. *out_ptr = buf[4*i];
  1052. out_ptr += SBLIMIT;
  1053. }
  1054. imdct12(out2, ptr + 0);
  1055. for (i = 0; i < 6; i++) {
  1056. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*1)];
  1057. buf[4*(i + 6*2)] = MULH3(out2[i + 6], win[i + 6], 1);
  1058. out_ptr += SBLIMIT;
  1059. }
  1060. imdct12(out2, ptr + 1);
  1061. for (i = 0; i < 6; i++) {
  1062. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*2)];
  1063. buf[4*(i + 6*0)] = MULH3(out2[i + 6], win[i + 6], 1);
  1064. out_ptr += SBLIMIT;
  1065. }
  1066. imdct12(out2, ptr + 2);
  1067. for (i = 0; i < 6; i++) {
  1068. buf[4*(i + 6*0)] = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*0)];
  1069. buf[4*(i + 6*1)] = MULH3(out2[i + 6], win[i + 6], 1);
  1070. buf[4*(i + 6*2)] = 0;
  1071. }
  1072. ptr += 18;
  1073. buf += (j&3) != 3 ? 1 : (4*18-3);
  1074. }
  1075. /* zero bands */
  1076. for (j = sblimit; j < SBLIMIT; j++) {
  1077. /* overlap */
  1078. out_ptr = sb_samples + j;
  1079. for (i = 0; i < 18; i++) {
  1080. *out_ptr = buf[4*i];
  1081. buf[4*i] = 0;
  1082. out_ptr += SBLIMIT;
  1083. }
  1084. buf += (j&3) != 3 ? 1 : (4*18-3);
  1085. }
  1086. }
  1087. /* main layer3 decoding function */
  1088. static int mp_decode_layer3(MPADecodeContext *s)
  1089. {
  1090. int nb_granules, main_data_begin;
  1091. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1092. GranuleDef *g;
  1093. int16_t exponents[576]; //FIXME try INTFLOAT
  1094. int ret;
  1095. /* read side info */
  1096. if (s->lsf) {
  1097. ret = handle_crc(s, ((s->nb_channels == 1) ? 8*9 : 8*17));
  1098. main_data_begin = get_bits(&s->gb, 8);
  1099. skip_bits(&s->gb, s->nb_channels);
  1100. nb_granules = 1;
  1101. } else {
  1102. ret = handle_crc(s, ((s->nb_channels == 1) ? 8*17 : 8*32));
  1103. main_data_begin = get_bits(&s->gb, 9);
  1104. if (s->nb_channels == 2)
  1105. skip_bits(&s->gb, 3);
  1106. else
  1107. skip_bits(&s->gb, 5);
  1108. nb_granules = 2;
  1109. for (ch = 0; ch < s->nb_channels; ch++) {
  1110. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1111. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1112. }
  1113. }
  1114. if (ret < 0)
  1115. return ret;
  1116. for (gr = 0; gr < nb_granules; gr++) {
  1117. for (ch = 0; ch < s->nb_channels; ch++) {
  1118. ff_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1119. g = &s->granules[ch][gr];
  1120. g->part2_3_length = get_bits(&s->gb, 12);
  1121. g->big_values = get_bits(&s->gb, 9);
  1122. if (g->big_values > 288) {
  1123. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1124. return AVERROR_INVALIDDATA;
  1125. }
  1126. g->global_gain = get_bits(&s->gb, 8);
  1127. /* if MS stereo only is selected, we precompute the
  1128. 1/sqrt(2) renormalization factor */
  1129. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1130. MODE_EXT_MS_STEREO)
  1131. g->global_gain -= 2;
  1132. if (s->lsf)
  1133. g->scalefac_compress = get_bits(&s->gb, 9);
  1134. else
  1135. g->scalefac_compress = get_bits(&s->gb, 4);
  1136. blocksplit_flag = get_bits1(&s->gb);
  1137. if (blocksplit_flag) {
  1138. g->block_type = get_bits(&s->gb, 2);
  1139. if (g->block_type == 0) {
  1140. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1141. return AVERROR_INVALIDDATA;
  1142. }
  1143. g->switch_point = get_bits1(&s->gb);
  1144. for (i = 0; i < 2; i++)
  1145. g->table_select[i] = get_bits(&s->gb, 5);
  1146. for (i = 0; i < 3; i++)
  1147. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1148. init_short_region(s, g);
  1149. } else {
  1150. int region_address1, region_address2;
  1151. g->block_type = 0;
  1152. g->switch_point = 0;
  1153. for (i = 0; i < 3; i++)
  1154. g->table_select[i] = get_bits(&s->gb, 5);
  1155. /* compute huffman coded region sizes */
  1156. region_address1 = get_bits(&s->gb, 4);
  1157. region_address2 = get_bits(&s->gb, 3);
  1158. ff_dlog(s->avctx, "region1=%d region2=%d\n",
  1159. region_address1, region_address2);
  1160. init_long_region(s, g, region_address1, region_address2);
  1161. }
  1162. region_offset2size(g);
  1163. compute_band_indexes(s, g);
  1164. g->preflag = 0;
  1165. if (!s->lsf)
  1166. g->preflag = get_bits1(&s->gb);
  1167. g->scalefac_scale = get_bits1(&s->gb);
  1168. g->count1table_select = get_bits1(&s->gb);
  1169. ff_dlog(s->avctx, "block_type=%d switch_point=%d\n",
  1170. g->block_type, g->switch_point);
  1171. }
  1172. }
  1173. if (!s->adu_mode) {
  1174. int skip;
  1175. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb) >> 3);
  1176. s->extrasize = av_clip((get_bits_left(&s->gb) >> 3) - s->extrasize, 0,
  1177. FFMAX(0, LAST_BUF_SIZE - s->last_buf_size));
  1178. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1179. /* now we get bits from the main_data_begin offset */
  1180. ff_dlog(s->avctx, "seekback:%d, lastbuf:%d\n",
  1181. main_data_begin, s->last_buf_size);
  1182. memcpy(s->last_buf + s->last_buf_size, ptr, s->extrasize);
  1183. s->in_gb = s->gb;
  1184. init_get_bits(&s->gb, s->last_buf, (s->last_buf_size + s->extrasize) * 8);
  1185. s->last_buf_size <<= 3;
  1186. for (gr = 0; gr < nb_granules && (s->last_buf_size >> 3) < main_data_begin; gr++) {
  1187. for (ch = 0; ch < s->nb_channels; ch++) {
  1188. g = &s->granules[ch][gr];
  1189. s->last_buf_size += g->part2_3_length;
  1190. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1191. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1192. }
  1193. }
  1194. skip = s->last_buf_size - 8 * main_data_begin;
  1195. if (skip >= s->gb.size_in_bits - s->extrasize * 8 && s->in_gb.buffer) {
  1196. skip_bits_long(&s->in_gb, skip - s->gb.size_in_bits + s->extrasize * 8);
  1197. s->gb = s->in_gb;
  1198. s->in_gb.buffer = NULL;
  1199. s->extrasize = 0;
  1200. } else {
  1201. skip_bits_long(&s->gb, skip);
  1202. }
  1203. } else {
  1204. gr = 0;
  1205. s->extrasize = 0;
  1206. }
  1207. for (; gr < nb_granules; gr++) {
  1208. for (ch = 0; ch < s->nb_channels; ch++) {
  1209. g = &s->granules[ch][gr];
  1210. bits_pos = get_bits_count(&s->gb);
  1211. if (!s->lsf) {
  1212. uint8_t *sc;
  1213. int slen, slen1, slen2;
  1214. /* MPEG-1 scale factors */
  1215. slen1 = ff_slen_table[0][g->scalefac_compress];
  1216. slen2 = ff_slen_table[1][g->scalefac_compress];
  1217. ff_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1218. if (g->block_type == 2) {
  1219. n = g->switch_point ? 17 : 18;
  1220. j = 0;
  1221. if (slen1) {
  1222. for (i = 0; i < n; i++)
  1223. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1224. } else {
  1225. for (i = 0; i < n; i++)
  1226. g->scale_factors[j++] = 0;
  1227. }
  1228. if (slen2) {
  1229. for (i = 0; i < 18; i++)
  1230. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1231. for (i = 0; i < 3; i++)
  1232. g->scale_factors[j++] = 0;
  1233. } else {
  1234. for (i = 0; i < 21; i++)
  1235. g->scale_factors[j++] = 0;
  1236. }
  1237. } else {
  1238. sc = s->granules[ch][0].scale_factors;
  1239. j = 0;
  1240. for (k = 0; k < 4; k++) {
  1241. n = k == 0 ? 6 : 5;
  1242. if ((g->scfsi & (0x8 >> k)) == 0) {
  1243. slen = (k < 2) ? slen1 : slen2;
  1244. if (slen) {
  1245. for (i = 0; i < n; i++)
  1246. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1247. } else {
  1248. for (i = 0; i < n; i++)
  1249. g->scale_factors[j++] = 0;
  1250. }
  1251. } else {
  1252. /* simply copy from last granule */
  1253. for (i = 0; i < n; i++) {
  1254. g->scale_factors[j] = sc[j];
  1255. j++;
  1256. }
  1257. }
  1258. }
  1259. g->scale_factors[j++] = 0;
  1260. }
  1261. } else {
  1262. int tindex, tindex2, slen[4], sl, sf;
  1263. /* LSF scale factors */
  1264. if (g->block_type == 2)
  1265. tindex = g->switch_point ? 2 : 1;
  1266. else
  1267. tindex = 0;
  1268. sf = g->scalefac_compress;
  1269. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1270. /* intensity stereo case */
  1271. sf >>= 1;
  1272. if (sf < 180) {
  1273. lsf_sf_expand(slen, sf, 6, 6, 0);
  1274. tindex2 = 3;
  1275. } else if (sf < 244) {
  1276. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1277. tindex2 = 4;
  1278. } else {
  1279. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1280. tindex2 = 5;
  1281. }
  1282. } else {
  1283. /* normal case */
  1284. if (sf < 400) {
  1285. lsf_sf_expand(slen, sf, 5, 4, 4);
  1286. tindex2 = 0;
  1287. } else if (sf < 500) {
  1288. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1289. tindex2 = 1;
  1290. } else {
  1291. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1292. tindex2 = 2;
  1293. g->preflag = 1;
  1294. }
  1295. }
  1296. j = 0;
  1297. for (k = 0; k < 4; k++) {
  1298. n = ff_lsf_nsf_table[tindex2][tindex][k];
  1299. sl = slen[k];
  1300. if (sl) {
  1301. for (i = 0; i < n; i++)
  1302. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1303. } else {
  1304. for (i = 0; i < n; i++)
  1305. g->scale_factors[j++] = 0;
  1306. }
  1307. }
  1308. /* XXX: should compute exact size */
  1309. for (; j < 40; j++)
  1310. g->scale_factors[j] = 0;
  1311. }
  1312. exponents_from_scale_factors(s, g, exponents);
  1313. /* read Huffman coded residue */
  1314. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1315. } /* ch */
  1316. if (s->mode == MPA_JSTEREO)
  1317. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1318. for (ch = 0; ch < s->nb_channels; ch++) {
  1319. g = &s->granules[ch][gr];
  1320. reorder_block(s, g);
  1321. compute_antialias(s, g);
  1322. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1323. }
  1324. } /* gr */
  1325. if (get_bits_count(&s->gb) < 0)
  1326. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1327. return nb_granules * 18;
  1328. }
  1329. static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
  1330. const uint8_t *buf, int buf_size)
  1331. {
  1332. int i, nb_frames, ch, ret;
  1333. OUT_INT *samples_ptr;
  1334. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
  1335. if (s->error_protection)
  1336. s->crc = get_bits(&s->gb, 16);
  1337. switch(s->layer) {
  1338. case 1:
  1339. s->avctx->frame_size = 384;
  1340. nb_frames = mp_decode_layer1(s);
  1341. break;
  1342. case 2:
  1343. s->avctx->frame_size = 1152;
  1344. nb_frames = mp_decode_layer2(s);
  1345. break;
  1346. case 3:
  1347. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1348. default:
  1349. nb_frames = mp_decode_layer3(s);
  1350. s->last_buf_size=0;
  1351. if (s->in_gb.buffer) {
  1352. align_get_bits(&s->gb);
  1353. i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
  1354. if (i >= 0 && i <= BACKSTEP_SIZE) {
  1355. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb) >> 3), i);
  1356. s->last_buf_size=i;
  1357. } else
  1358. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  1359. s->gb = s->in_gb;
  1360. s->in_gb.buffer = NULL;
  1361. s->extrasize = 0;
  1362. }
  1363. align_get_bits(&s->gb);
  1364. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1365. i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
  1366. if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
  1367. if (i < 0)
  1368. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  1369. i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  1370. }
  1371. av_assert1(i <= buf_size - HEADER_SIZE && i >= 0);
  1372. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  1373. s->last_buf_size += i;
  1374. }
  1375. if(nb_frames < 0)
  1376. return nb_frames;
  1377. /* get output buffer */
  1378. if (!samples) {
  1379. av_assert0(s->frame);
  1380. s->frame->nb_samples = s->avctx->frame_size;
  1381. if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0)
  1382. return ret;
  1383. samples = (OUT_INT **)s->frame->extended_data;
  1384. }
  1385. /* apply the synthesis filter */
  1386. for (ch = 0; ch < s->nb_channels; ch++) {
  1387. int sample_stride;
  1388. if (s->avctx->sample_fmt == OUT_FMT_P) {
  1389. samples_ptr = samples[ch];
  1390. sample_stride = 1;
  1391. } else {
  1392. samples_ptr = samples[0] + ch;
  1393. sample_stride = s->nb_channels;
  1394. }
  1395. for (i = 0; i < nb_frames; i++) {
  1396. RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch],
  1397. &(s->synth_buf_offset[ch]),
  1398. RENAME(ff_mpa_synth_window),
  1399. &s->dither_state, samples_ptr,
  1400. sample_stride, s->sb_samples[ch][i]);
  1401. samples_ptr += 32 * sample_stride;
  1402. }
  1403. }
  1404. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  1405. }
  1406. static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
  1407. AVPacket *avpkt)
  1408. {
  1409. const uint8_t *buf = avpkt->data;
  1410. int buf_size = avpkt->size;
  1411. MPADecodeContext *s = avctx->priv_data;
  1412. uint32_t header;
  1413. int ret;
  1414. int skipped = 0;
  1415. while(buf_size && !*buf){
  1416. buf++;
  1417. buf_size--;
  1418. skipped++;
  1419. }
  1420. if (buf_size < HEADER_SIZE)
  1421. return AVERROR_INVALIDDATA;
  1422. header = AV_RB32(buf);
  1423. if (header >> 8 == AV_RB32("TAG") >> 8) {
  1424. av_log(avctx, AV_LOG_DEBUG, "discarding ID3 tag\n");
  1425. return buf_size + skipped;
  1426. }
  1427. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1428. if (ret < 0) {
  1429. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  1430. return AVERROR_INVALIDDATA;
  1431. } else if (ret == 1) {
  1432. /* free format: prepare to compute frame size */
  1433. s->frame_size = -1;
  1434. return AVERROR_INVALIDDATA;
  1435. }
  1436. /* update codec info */
  1437. avctx->channels = s->nb_channels;
  1438. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1439. if (!avctx->bit_rate)
  1440. avctx->bit_rate = s->bit_rate;
  1441. if (s->frame_size <= 0) {
  1442. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  1443. return AVERROR_INVALIDDATA;
  1444. } else if (s->frame_size < buf_size) {
  1445. av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
  1446. buf_size= s->frame_size;
  1447. }
  1448. s->frame = data;
  1449. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1450. if (ret >= 0) {
  1451. s->frame->nb_samples = avctx->frame_size;
  1452. *got_frame_ptr = 1;
  1453. avctx->sample_rate = s->sample_rate;
  1454. //FIXME maybe move the other codec info stuff from above here too
  1455. } else {
  1456. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1457. /* Only return an error if the bad frame makes up the whole packet or
  1458. * the error is related to buffer management.
  1459. * If there is more data in the packet, just consume the bad frame
  1460. * instead of returning an error, which would discard the whole
  1461. * packet. */
  1462. *got_frame_ptr = 0;
  1463. if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
  1464. return ret;
  1465. }
  1466. s->frame_size = 0;
  1467. return buf_size + skipped;
  1468. }
  1469. static void mp_flush(MPADecodeContext *ctx)
  1470. {
  1471. memset(ctx->synth_buf, 0, sizeof(ctx->synth_buf));
  1472. memset(ctx->mdct_buf, 0, sizeof(ctx->mdct_buf));
  1473. ctx->last_buf_size = 0;
  1474. ctx->dither_state = 0;
  1475. }
  1476. static void flush(AVCodecContext *avctx)
  1477. {
  1478. mp_flush(avctx->priv_data);
  1479. }
  1480. #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
  1481. static int decode_frame_adu(AVCodecContext *avctx, void *data,
  1482. int *got_frame_ptr, AVPacket *avpkt)
  1483. {
  1484. const uint8_t *buf = avpkt->data;
  1485. int buf_size = avpkt->size;
  1486. MPADecodeContext *s = avctx->priv_data;
  1487. uint32_t header;
  1488. int len, ret;
  1489. int av_unused out_size;
  1490. len = buf_size;
  1491. // Discard too short frames
  1492. if (buf_size < HEADER_SIZE) {
  1493. av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
  1494. return AVERROR_INVALIDDATA;
  1495. }
  1496. if (len > MPA_MAX_CODED_FRAME_SIZE)
  1497. len = MPA_MAX_CODED_FRAME_SIZE;
  1498. // Get header and restore sync word
  1499. header = AV_RB32(buf) | 0xffe00000;
  1500. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1501. if (ret < 0) {
  1502. av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
  1503. return ret;
  1504. }
  1505. /* update codec info */
  1506. avctx->sample_rate = s->sample_rate;
  1507. avctx->channels = s->nb_channels;
  1508. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1509. if (!avctx->bit_rate)
  1510. avctx->bit_rate = s->bit_rate;
  1511. s->frame_size = len;
  1512. s->frame = data;
  1513. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1514. if (ret < 0) {
  1515. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1516. return ret;
  1517. }
  1518. *got_frame_ptr = 1;
  1519. return buf_size;
  1520. }
  1521. #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
  1522. #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
  1523. /**
  1524. * Context for MP3On4 decoder
  1525. */
  1526. typedef struct MP3On4DecodeContext {
  1527. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  1528. int syncword; ///< syncword patch
  1529. const uint8_t *coff; ///< channel offsets in output buffer
  1530. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  1531. } MP3On4DecodeContext;
  1532. #include "mpeg4audio.h"
  1533. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  1534. /* number of mp3 decoder instances */
  1535. static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
  1536. /* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
  1537. static const uint8_t chan_offset[8][5] = {
  1538. { 0 },
  1539. { 0 }, // C
  1540. { 0 }, // FLR
  1541. { 2, 0 }, // C FLR
  1542. { 2, 0, 3 }, // C FLR BS
  1543. { 2, 0, 3 }, // C FLR BLRS
  1544. { 2, 0, 4, 3 }, // C FLR BLRS LFE
  1545. { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
  1546. };
  1547. /* mp3on4 channel layouts */
  1548. static const int16_t chan_layout[8] = {
  1549. 0,
  1550. AV_CH_LAYOUT_MONO,
  1551. AV_CH_LAYOUT_STEREO,
  1552. AV_CH_LAYOUT_SURROUND,
  1553. AV_CH_LAYOUT_4POINT0,
  1554. AV_CH_LAYOUT_5POINT0,
  1555. AV_CH_LAYOUT_5POINT1,
  1556. AV_CH_LAYOUT_7POINT1
  1557. };
  1558. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  1559. {
  1560. MP3On4DecodeContext *s = avctx->priv_data;
  1561. int i;
  1562. for (i = 0; i < s->frames; i++)
  1563. av_freep(&s->mp3decctx[i]);
  1564. return 0;
  1565. }
  1566. static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
  1567. {
  1568. MP3On4DecodeContext *s = avctx->priv_data;
  1569. MPEG4AudioConfig cfg;
  1570. int i, ret;
  1571. if ((avctx->extradata_size < 2) || !avctx->extradata) {
  1572. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  1573. return AVERROR_INVALIDDATA;
  1574. }
  1575. avpriv_mpeg4audio_get_config2(&cfg, avctx->extradata,
  1576. avctx->extradata_size, 1, avctx);
  1577. if (!cfg.chan_config || cfg.chan_config > 7) {
  1578. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  1579. return AVERROR_INVALIDDATA;
  1580. }
  1581. s->frames = mp3Frames[cfg.chan_config];
  1582. s->coff = chan_offset[cfg.chan_config];
  1583. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  1584. avctx->channel_layout = chan_layout[cfg.chan_config];
  1585. if (cfg.sample_rate < 16000)
  1586. s->syncword = 0xffe00000;
  1587. else
  1588. s->syncword = 0xfff00000;
  1589. /* Init the first mp3 decoder in standard way, so that all tables get builded
  1590. * We replace avctx->priv_data with the context of the first decoder so that
  1591. * decode_init() does not have to be changed.
  1592. * Other decoders will be initialized here copying data from the first context
  1593. */
  1594. // Allocate zeroed memory for the first decoder context
  1595. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  1596. if (!s->mp3decctx[0])
  1597. return AVERROR(ENOMEM);
  1598. // Put decoder context in place to make init_decode() happy
  1599. avctx->priv_data = s->mp3decctx[0];
  1600. ret = decode_init(avctx);
  1601. // Restore mp3on4 context pointer
  1602. avctx->priv_data = s;
  1603. if (ret < 0)
  1604. return ret;
  1605. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  1606. /* Create a separate codec/context for each frame (first is already ok).
  1607. * Each frame is 1 or 2 channels - up to 5 frames allowed
  1608. */
  1609. for (i = 1; i < s->frames; i++) {
  1610. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  1611. if (!s->mp3decctx[i])
  1612. return AVERROR(ENOMEM);
  1613. s->mp3decctx[i]->adu_mode = 1;
  1614. s->mp3decctx[i]->avctx = avctx;
  1615. s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
  1616. s->mp3decctx[i]->butterflies_float = s->mp3decctx[0]->butterflies_float;
  1617. }
  1618. return 0;
  1619. }
  1620. static void flush_mp3on4(AVCodecContext *avctx)
  1621. {
  1622. int i;
  1623. MP3On4DecodeContext *s = avctx->priv_data;
  1624. for (i = 0; i < s->frames; i++)
  1625. mp_flush(s->mp3decctx[i]);
  1626. }
  1627. static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
  1628. int *got_frame_ptr, AVPacket *avpkt)
  1629. {
  1630. AVFrame *frame = data;
  1631. const uint8_t *buf = avpkt->data;
  1632. int buf_size = avpkt->size;
  1633. MP3On4DecodeContext *s = avctx->priv_data;
  1634. MPADecodeContext *m;
  1635. int fsize, len = buf_size, out_size = 0;
  1636. uint32_t header;
  1637. OUT_INT **out_samples;
  1638. OUT_INT *outptr[2];
  1639. int fr, ch, ret;
  1640. /* get output buffer */
  1641. frame->nb_samples = MPA_FRAME_SIZE;
  1642. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  1643. return ret;
  1644. out_samples = (OUT_INT **)frame->extended_data;
  1645. // Discard too short frames
  1646. if (buf_size < HEADER_SIZE)
  1647. return AVERROR_INVALIDDATA;
  1648. avctx->bit_rate = 0;
  1649. ch = 0;
  1650. for (fr = 0; fr < s->frames; fr++) {
  1651. fsize = AV_RB16(buf) >> 4;
  1652. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  1653. m = s->mp3decctx[fr];
  1654. av_assert1(m);
  1655. if (fsize < HEADER_SIZE) {
  1656. av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
  1657. return AVERROR_INVALIDDATA;
  1658. }
  1659. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  1660. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  1661. if (ret < 0) {
  1662. av_log(avctx, AV_LOG_ERROR, "Bad header, discard block\n");
  1663. return AVERROR_INVALIDDATA;
  1664. }
  1665. if (ch + m->nb_channels > avctx->channels ||
  1666. s->coff[fr] + m->nb_channels > avctx->channels) {
  1667. av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
  1668. "channel count\n");
  1669. return AVERROR_INVALIDDATA;
  1670. }
  1671. ch += m->nb_channels;
  1672. outptr[0] = out_samples[s->coff[fr]];
  1673. if (m->nb_channels > 1)
  1674. outptr[1] = out_samples[s->coff[fr] + 1];
  1675. if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0) {
  1676. av_log(avctx, AV_LOG_ERROR, "failed to decode channel %d\n", ch);
  1677. memset(outptr[0], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
  1678. if (m->nb_channels > 1)
  1679. memset(outptr[1], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
  1680. ret = m->nb_channels * MPA_FRAME_SIZE*sizeof(OUT_INT);
  1681. }
  1682. out_size += ret;
  1683. buf += fsize;
  1684. len -= fsize;
  1685. avctx->bit_rate += m->bit_rate;
  1686. }
  1687. if (ch != avctx->channels) {
  1688. av_log(avctx, AV_LOG_ERROR, "failed to decode all channels\n");
  1689. return AVERROR_INVALIDDATA;
  1690. }
  1691. /* update codec info */
  1692. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  1693. frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
  1694. *got_frame_ptr = 1;
  1695. return buf_size;
  1696. }
  1697. #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */