You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2019 lines
66KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder
  24. */
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/crc.h"
  29. #include "libavutil/float_dsp.h"
  30. #include "libavutil/libm.h"
  31. #include "avcodec.h"
  32. #include "get_bits.h"
  33. #include "internal.h"
  34. #include "mathops.h"
  35. #include "mpegaudiodsp.h"
  36. /*
  37. * TODO:
  38. * - test lsf / mpeg25 extensively.
  39. */
  40. #include "mpegaudio.h"
  41. #include "mpegaudiodecheader.h"
  42. #define BACKSTEP_SIZE 512
  43. #define EXTRABYTES 24
  44. #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
  45. /* layer 3 "granule" */
  46. typedef struct GranuleDef {
  47. uint8_t scfsi;
  48. int part2_3_length;
  49. int big_values;
  50. int global_gain;
  51. int scalefac_compress;
  52. uint8_t block_type;
  53. uint8_t switch_point;
  54. int table_select[3];
  55. int subblock_gain[3];
  56. uint8_t scalefac_scale;
  57. uint8_t count1table_select;
  58. int region_size[3]; /* number of huffman codes in each region */
  59. int preflag;
  60. int short_start, long_end; /* long/short band indexes */
  61. uint8_t scale_factors[40];
  62. DECLARE_ALIGNED(16, INTFLOAT, sb_hybrid)[SBLIMIT * 18]; /* 576 samples */
  63. } GranuleDef;
  64. typedef struct MPADecodeContext {
  65. MPA_DECODE_HEADER
  66. uint8_t last_buf[LAST_BUF_SIZE];
  67. int last_buf_size;
  68. int extrasize;
  69. /* next header (used in free format parsing) */
  70. uint32_t free_format_next_header;
  71. GetBitContext gb;
  72. GetBitContext in_gb;
  73. DECLARE_ALIGNED(32, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2];
  74. int synth_buf_offset[MPA_MAX_CHANNELS];
  75. DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
  76. INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
  77. GranuleDef granules[2][2]; /* Used in Layer 3 */
  78. int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
  79. int dither_state;
  80. int err_recognition;
  81. AVCodecContext* avctx;
  82. MPADSPContext mpadsp;
  83. AVFloatDSPContext *fdsp;
  84. AVFrame *frame;
  85. } MPADecodeContext;
  86. #define HEADER_SIZE 4
  87. #include "mpegaudiodata.h"
  88. #include "mpegaudiodectab.h"
  89. /* vlc structure for decoding layer 3 huffman tables */
  90. static VLC huff_vlc[16];
  91. static VLC_TYPE huff_vlc_tables[
  92. 0 + 128 + 128 + 128 + 130 + 128 + 154 + 166 +
  93. 142 + 204 + 190 + 170 + 542 + 460 + 662 + 414
  94. ][2];
  95. static const int huff_vlc_tables_sizes[16] = {
  96. 0, 128, 128, 128, 130, 128, 154, 166,
  97. 142, 204, 190, 170, 542, 460, 662, 414
  98. };
  99. static VLC huff_quad_vlc[2];
  100. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  101. static const int huff_quad_vlc_tables_sizes[2] = { 128, 16 };
  102. /* computed from band_size_long */
  103. static uint16_t band_index_long[9][23];
  104. #include "mpegaudio_tablegen.h"
  105. /* intensity stereo coef table */
  106. static INTFLOAT is_table[2][16];
  107. static INTFLOAT is_table_lsf[2][2][16];
  108. static INTFLOAT csa_table[8][4];
  109. static int16_t division_tab3[1<<6 ];
  110. static int16_t division_tab5[1<<8 ];
  111. static int16_t division_tab9[1<<11];
  112. static int16_t * const division_tabs[4] = {
  113. division_tab3, division_tab5, NULL, division_tab9
  114. };
  115. /* lower 2 bits: modulo 3, higher bits: shift */
  116. static uint16_t scale_factor_modshift[64];
  117. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  118. static int32_t scale_factor_mult[15][3];
  119. /* mult table for layer 2 group quantization */
  120. #define SCALE_GEN(v) \
  121. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  122. static const int32_t scale_factor_mult2[3][3] = {
  123. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  124. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  125. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  126. };
  127. /**
  128. * Convert region offsets to region sizes and truncate
  129. * size to big_values.
  130. */
  131. static void region_offset2size(GranuleDef *g)
  132. {
  133. int i, k, j = 0;
  134. g->region_size[2] = 576 / 2;
  135. for (i = 0; i < 3; i++) {
  136. k = FFMIN(g->region_size[i], g->big_values);
  137. g->region_size[i] = k - j;
  138. j = k;
  139. }
  140. }
  141. static void init_short_region(MPADecodeContext *s, GranuleDef *g)
  142. {
  143. if (g->block_type == 2) {
  144. if (s->sample_rate_index != 8)
  145. g->region_size[0] = (36 / 2);
  146. else
  147. g->region_size[0] = (72 / 2);
  148. } else {
  149. if (s->sample_rate_index <= 2)
  150. g->region_size[0] = (36 / 2);
  151. else if (s->sample_rate_index != 8)
  152. g->region_size[0] = (54 / 2);
  153. else
  154. g->region_size[0] = (108 / 2);
  155. }
  156. g->region_size[1] = (576 / 2);
  157. }
  158. static void init_long_region(MPADecodeContext *s, GranuleDef *g,
  159. int ra1, int ra2)
  160. {
  161. int l;
  162. g->region_size[0] = band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  163. /* should not overflow */
  164. l = FFMIN(ra1 + ra2 + 2, 22);
  165. g->region_size[1] = band_index_long[s->sample_rate_index][ l] >> 1;
  166. }
  167. static void compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
  168. {
  169. if (g->block_type == 2) {
  170. if (g->switch_point) {
  171. if(s->sample_rate_index == 8)
  172. avpriv_request_sample(s->avctx, "switch point in 8khz");
  173. /* if switched mode, we handle the 36 first samples as
  174. long blocks. For 8000Hz, we handle the 72 first
  175. exponents as long blocks */
  176. if (s->sample_rate_index <= 2)
  177. g->long_end = 8;
  178. else
  179. g->long_end = 6;
  180. g->short_start = 3;
  181. } else {
  182. g->long_end = 0;
  183. g->short_start = 0;
  184. }
  185. } else {
  186. g->short_start = 13;
  187. g->long_end = 22;
  188. }
  189. }
  190. /* layer 1 unscaling */
  191. /* n = number of bits of the mantissa minus 1 */
  192. static inline int l1_unscale(int n, int mant, int scale_factor)
  193. {
  194. int shift, mod;
  195. int64_t val;
  196. shift = scale_factor_modshift[scale_factor];
  197. mod = shift & 3;
  198. shift >>= 2;
  199. val = MUL64((int)(mant + (-1U << n) + 1), scale_factor_mult[n-1][mod]);
  200. shift += n;
  201. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  202. return (int)((val + (1LL << (shift - 1))) >> shift);
  203. }
  204. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  205. {
  206. int shift, mod, val;
  207. shift = scale_factor_modshift[scale_factor];
  208. mod = shift & 3;
  209. shift >>= 2;
  210. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  211. /* NOTE: at this point, 0 <= shift <= 21 */
  212. if (shift > 0)
  213. val = (val + (1 << (shift - 1))) >> shift;
  214. return val;
  215. }
  216. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  217. static inline int l3_unscale(int value, int exponent)
  218. {
  219. unsigned int m;
  220. int e;
  221. e = table_4_3_exp [4 * value + (exponent & 3)];
  222. m = table_4_3_value[4 * value + (exponent & 3)];
  223. e -= exponent >> 2;
  224. #ifdef DEBUG
  225. if(e < 1)
  226. av_log(NULL, AV_LOG_WARNING, "l3_unscale: e is %d\n", e);
  227. #endif
  228. if (e > (SUINT)31)
  229. return 0;
  230. m = (m + ((1U << e)>>1)) >> e;
  231. return m;
  232. }
  233. static av_cold void decode_init_static(void)
  234. {
  235. int i, j, k;
  236. int offset;
  237. /* scale factors table for layer 1/2 */
  238. for (i = 0; i < 64; i++) {
  239. int shift, mod;
  240. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  241. shift = i / 3;
  242. mod = i % 3;
  243. scale_factor_modshift[i] = mod | (shift << 2);
  244. }
  245. /* scale factor multiply for layer 1 */
  246. for (i = 0; i < 15; i++) {
  247. int n, norm;
  248. n = i + 2;
  249. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  250. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  251. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  252. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  253. ff_dlog(NULL, "%d: norm=%x s=%"PRIx32" %"PRIx32" %"PRIx32"\n", i,
  254. (unsigned)norm,
  255. scale_factor_mult[i][0],
  256. scale_factor_mult[i][1],
  257. scale_factor_mult[i][2]);
  258. }
  259. RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window));
  260. /* huffman decode tables */
  261. offset = 0;
  262. for (i = 1; i < 16; i++) {
  263. const HuffTable *h = &mpa_huff_tables[i];
  264. int xsize, x, y;
  265. uint8_t tmp_bits [512] = { 0 };
  266. uint16_t tmp_codes[512] = { 0 };
  267. xsize = h->xsize;
  268. j = 0;
  269. for (x = 0; x < xsize; x++) {
  270. for (y = 0; y < xsize; y++) {
  271. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  272. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  273. }
  274. }
  275. /* XXX: fail test */
  276. huff_vlc[i].table = huff_vlc_tables+offset;
  277. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  278. init_vlc(&huff_vlc[i], 7, 512,
  279. tmp_bits, 1, 1, tmp_codes, 2, 2,
  280. INIT_VLC_USE_NEW_STATIC);
  281. offset += huff_vlc_tables_sizes[i];
  282. }
  283. av_assert0(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  284. offset = 0;
  285. for (i = 0; i < 2; i++) {
  286. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  287. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  288. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  289. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  290. INIT_VLC_USE_NEW_STATIC);
  291. offset += huff_quad_vlc_tables_sizes[i];
  292. }
  293. av_assert0(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  294. for (i = 0; i < 9; i++) {
  295. k = 0;
  296. for (j = 0; j < 22; j++) {
  297. band_index_long[i][j] = k;
  298. k += band_size_long[i][j];
  299. }
  300. band_index_long[i][22] = k;
  301. }
  302. /* compute n ^ (4/3) and store it in mantissa/exp format */
  303. mpegaudio_tableinit();
  304. for (i = 0; i < 4; i++) {
  305. if (ff_mpa_quant_bits[i] < 0) {
  306. for (j = 0; j < (1 << (-ff_mpa_quant_bits[i]+1)); j++) {
  307. int val1, val2, val3, steps;
  308. int val = j;
  309. steps = ff_mpa_quant_steps[i];
  310. val1 = val % steps;
  311. val /= steps;
  312. val2 = val % steps;
  313. val3 = val / steps;
  314. division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
  315. }
  316. }
  317. }
  318. for (i = 0; i < 7; i++) {
  319. float f;
  320. INTFLOAT v;
  321. if (i != 6) {
  322. f = tan((double)i * M_PI / 12.0);
  323. v = FIXR(f / (1.0 + f));
  324. } else {
  325. v = FIXR(1.0);
  326. }
  327. is_table[0][ i] = v;
  328. is_table[1][6 - i] = v;
  329. }
  330. /* invalid values */
  331. for (i = 7; i < 16; i++)
  332. is_table[0][i] = is_table[1][i] = 0.0;
  333. for (i = 0; i < 16; i++) {
  334. double f;
  335. int e, k;
  336. for (j = 0; j < 2; j++) {
  337. e = -(j + 1) * ((i + 1) >> 1);
  338. f = exp2(e / 4.0);
  339. k = i & 1;
  340. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  341. is_table_lsf[j][k ][i] = FIXR(1.0);
  342. ff_dlog(NULL, "is_table_lsf %d %d: %f %f\n",
  343. i, j, (float) is_table_lsf[j][0][i],
  344. (float) is_table_lsf[j][1][i]);
  345. }
  346. }
  347. for (i = 0; i < 8; i++) {
  348. double ci, cs, ca;
  349. ci = ci_table[i];
  350. cs = 1.0 / sqrt(1.0 + ci * ci);
  351. ca = cs * ci;
  352. #if !USE_FLOATS
  353. csa_table[i][0] = FIXHR(cs/4);
  354. csa_table[i][1] = FIXHR(ca/4);
  355. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  356. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  357. #else
  358. csa_table[i][0] = cs;
  359. csa_table[i][1] = ca;
  360. csa_table[i][2] = ca + cs;
  361. csa_table[i][3] = ca - cs;
  362. #endif
  363. }
  364. }
  365. #if USE_FLOATS
  366. static av_cold int decode_close(AVCodecContext * avctx)
  367. {
  368. MPADecodeContext *s = avctx->priv_data;
  369. av_freep(&s->fdsp);
  370. return 0;
  371. }
  372. #endif
  373. static av_cold int decode_init(AVCodecContext * avctx)
  374. {
  375. static int initialized_tables = 0;
  376. MPADecodeContext *s = avctx->priv_data;
  377. if (!initialized_tables) {
  378. decode_init_static();
  379. initialized_tables = 1;
  380. }
  381. s->avctx = avctx;
  382. #if USE_FLOATS
  383. s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
  384. if (!s->fdsp)
  385. return AVERROR(ENOMEM);
  386. #endif
  387. ff_mpadsp_init(&s->mpadsp);
  388. if (avctx->request_sample_fmt == OUT_FMT &&
  389. avctx->codec_id != AV_CODEC_ID_MP3ON4)
  390. avctx->sample_fmt = OUT_FMT;
  391. else
  392. avctx->sample_fmt = OUT_FMT_P;
  393. s->err_recognition = avctx->err_recognition;
  394. if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
  395. s->adu_mode = 1;
  396. return 0;
  397. }
  398. #define C3 FIXHR(0.86602540378443864676/2)
  399. #define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
  400. #define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
  401. #define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
  402. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  403. cases. */
  404. static void imdct12(INTFLOAT *out, SUINTFLOAT *in)
  405. {
  406. SUINTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  407. in0 = in[0*3];
  408. in1 = in[1*3] + in[0*3];
  409. in2 = in[2*3] + in[1*3];
  410. in3 = in[3*3] + in[2*3];
  411. in4 = in[4*3] + in[3*3];
  412. in5 = in[5*3] + in[4*3];
  413. in5 += in3;
  414. in3 += in1;
  415. in2 = MULH3(in2, C3, 2);
  416. in3 = MULH3(in3, C3, 4);
  417. t1 = in0 - in4;
  418. t2 = MULH3(in1 - in5, C4, 2);
  419. out[ 7] =
  420. out[10] = t1 + t2;
  421. out[ 1] =
  422. out[ 4] = t1 - t2;
  423. in0 += SHR(in4, 1);
  424. in4 = in0 + in2;
  425. in5 += 2*in1;
  426. in1 = MULH3(in5 + in3, C5, 1);
  427. out[ 8] =
  428. out[ 9] = in4 + in1;
  429. out[ 2] =
  430. out[ 3] = in4 - in1;
  431. in0 -= in2;
  432. in5 = MULH3(in5 - in3, C6, 2);
  433. out[ 0] =
  434. out[ 5] = in0 - in5;
  435. out[ 6] =
  436. out[11] = in0 + in5;
  437. }
  438. /* return the number of decoded frames */
  439. static int mp_decode_layer1(MPADecodeContext *s)
  440. {
  441. int bound, i, v, n, ch, j, mant;
  442. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  443. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  444. if (s->mode == MPA_JSTEREO)
  445. bound = (s->mode_ext + 1) * 4;
  446. else
  447. bound = SBLIMIT;
  448. /* allocation bits */
  449. for (i = 0; i < bound; i++) {
  450. for (ch = 0; ch < s->nb_channels; ch++) {
  451. allocation[ch][i] = get_bits(&s->gb, 4);
  452. }
  453. }
  454. for (i = bound; i < SBLIMIT; i++)
  455. allocation[0][i] = get_bits(&s->gb, 4);
  456. /* scale factors */
  457. for (i = 0; i < bound; i++) {
  458. for (ch = 0; ch < s->nb_channels; ch++) {
  459. if (allocation[ch][i])
  460. scale_factors[ch][i] = get_bits(&s->gb, 6);
  461. }
  462. }
  463. for (i = bound; i < SBLIMIT; i++) {
  464. if (allocation[0][i]) {
  465. scale_factors[0][i] = get_bits(&s->gb, 6);
  466. scale_factors[1][i] = get_bits(&s->gb, 6);
  467. }
  468. }
  469. /* compute samples */
  470. for (j = 0; j < 12; j++) {
  471. for (i = 0; i < bound; i++) {
  472. for (ch = 0; ch < s->nb_channels; ch++) {
  473. n = allocation[ch][i];
  474. if (n) {
  475. mant = get_bits(&s->gb, n + 1);
  476. v = l1_unscale(n, mant, scale_factors[ch][i]);
  477. } else {
  478. v = 0;
  479. }
  480. s->sb_samples[ch][j][i] = v;
  481. }
  482. }
  483. for (i = bound; i < SBLIMIT; i++) {
  484. n = allocation[0][i];
  485. if (n) {
  486. mant = get_bits(&s->gb, n + 1);
  487. v = l1_unscale(n, mant, scale_factors[0][i]);
  488. s->sb_samples[0][j][i] = v;
  489. v = l1_unscale(n, mant, scale_factors[1][i]);
  490. s->sb_samples[1][j][i] = v;
  491. } else {
  492. s->sb_samples[0][j][i] = 0;
  493. s->sb_samples[1][j][i] = 0;
  494. }
  495. }
  496. }
  497. return 12;
  498. }
  499. static int mp_decode_layer2(MPADecodeContext *s)
  500. {
  501. int sblimit; /* number of used subbands */
  502. const unsigned char *alloc_table;
  503. int table, bit_alloc_bits, i, j, ch, bound, v;
  504. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  505. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  506. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  507. int scale, qindex, bits, steps, k, l, m, b;
  508. /* select decoding table */
  509. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  510. s->sample_rate, s->lsf);
  511. sblimit = ff_mpa_sblimit_table[table];
  512. alloc_table = ff_mpa_alloc_tables[table];
  513. if (s->mode == MPA_JSTEREO)
  514. bound = (s->mode_ext + 1) * 4;
  515. else
  516. bound = sblimit;
  517. ff_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  518. /* sanity check */
  519. if (bound > sblimit)
  520. bound = sblimit;
  521. /* parse bit allocation */
  522. j = 0;
  523. for (i = 0; i < bound; i++) {
  524. bit_alloc_bits = alloc_table[j];
  525. for (ch = 0; ch < s->nb_channels; ch++)
  526. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  527. j += 1 << bit_alloc_bits;
  528. }
  529. for (i = bound; i < sblimit; i++) {
  530. bit_alloc_bits = alloc_table[j];
  531. v = get_bits(&s->gb, bit_alloc_bits);
  532. bit_alloc[0][i] = v;
  533. bit_alloc[1][i] = v;
  534. j += 1 << bit_alloc_bits;
  535. }
  536. /* scale codes */
  537. for (i = 0; i < sblimit; i++) {
  538. for (ch = 0; ch < s->nb_channels; ch++) {
  539. if (bit_alloc[ch][i])
  540. scale_code[ch][i] = get_bits(&s->gb, 2);
  541. }
  542. }
  543. /* scale factors */
  544. for (i = 0; i < sblimit; i++) {
  545. for (ch = 0; ch < s->nb_channels; ch++) {
  546. if (bit_alloc[ch][i]) {
  547. sf = scale_factors[ch][i];
  548. switch (scale_code[ch][i]) {
  549. default:
  550. case 0:
  551. sf[0] = get_bits(&s->gb, 6);
  552. sf[1] = get_bits(&s->gb, 6);
  553. sf[2] = get_bits(&s->gb, 6);
  554. break;
  555. case 2:
  556. sf[0] = get_bits(&s->gb, 6);
  557. sf[1] = sf[0];
  558. sf[2] = sf[0];
  559. break;
  560. case 1:
  561. sf[0] = get_bits(&s->gb, 6);
  562. sf[2] = get_bits(&s->gb, 6);
  563. sf[1] = sf[0];
  564. break;
  565. case 3:
  566. sf[0] = get_bits(&s->gb, 6);
  567. sf[2] = get_bits(&s->gb, 6);
  568. sf[1] = sf[2];
  569. break;
  570. }
  571. }
  572. }
  573. }
  574. /* samples */
  575. for (k = 0; k < 3; k++) {
  576. for (l = 0; l < 12; l += 3) {
  577. j = 0;
  578. for (i = 0; i < bound; i++) {
  579. bit_alloc_bits = alloc_table[j];
  580. for (ch = 0; ch < s->nb_channels; ch++) {
  581. b = bit_alloc[ch][i];
  582. if (b) {
  583. scale = scale_factors[ch][i][k];
  584. qindex = alloc_table[j+b];
  585. bits = ff_mpa_quant_bits[qindex];
  586. if (bits < 0) {
  587. int v2;
  588. /* 3 values at the same time */
  589. v = get_bits(&s->gb, -bits);
  590. v2 = division_tabs[qindex][v];
  591. steps = ff_mpa_quant_steps[qindex];
  592. s->sb_samples[ch][k * 12 + l + 0][i] =
  593. l2_unscale_group(steps, v2 & 15, scale);
  594. s->sb_samples[ch][k * 12 + l + 1][i] =
  595. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  596. s->sb_samples[ch][k * 12 + l + 2][i] =
  597. l2_unscale_group(steps, v2 >> 8 , scale);
  598. } else {
  599. for (m = 0; m < 3; m++) {
  600. v = get_bits(&s->gb, bits);
  601. v = l1_unscale(bits - 1, v, scale);
  602. s->sb_samples[ch][k * 12 + l + m][i] = v;
  603. }
  604. }
  605. } else {
  606. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  607. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  608. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  609. }
  610. }
  611. /* next subband in alloc table */
  612. j += 1 << bit_alloc_bits;
  613. }
  614. /* XXX: find a way to avoid this duplication of code */
  615. for (i = bound; i < sblimit; i++) {
  616. bit_alloc_bits = alloc_table[j];
  617. b = bit_alloc[0][i];
  618. if (b) {
  619. int mant, scale0, scale1;
  620. scale0 = scale_factors[0][i][k];
  621. scale1 = scale_factors[1][i][k];
  622. qindex = alloc_table[j+b];
  623. bits = ff_mpa_quant_bits[qindex];
  624. if (bits < 0) {
  625. /* 3 values at the same time */
  626. v = get_bits(&s->gb, -bits);
  627. steps = ff_mpa_quant_steps[qindex];
  628. mant = v % steps;
  629. v = v / steps;
  630. s->sb_samples[0][k * 12 + l + 0][i] =
  631. l2_unscale_group(steps, mant, scale0);
  632. s->sb_samples[1][k * 12 + l + 0][i] =
  633. l2_unscale_group(steps, mant, scale1);
  634. mant = v % steps;
  635. v = v / steps;
  636. s->sb_samples[0][k * 12 + l + 1][i] =
  637. l2_unscale_group(steps, mant, scale0);
  638. s->sb_samples[1][k * 12 + l + 1][i] =
  639. l2_unscale_group(steps, mant, scale1);
  640. s->sb_samples[0][k * 12 + l + 2][i] =
  641. l2_unscale_group(steps, v, scale0);
  642. s->sb_samples[1][k * 12 + l + 2][i] =
  643. l2_unscale_group(steps, v, scale1);
  644. } else {
  645. for (m = 0; m < 3; m++) {
  646. mant = get_bits(&s->gb, bits);
  647. s->sb_samples[0][k * 12 + l + m][i] =
  648. l1_unscale(bits - 1, mant, scale0);
  649. s->sb_samples[1][k * 12 + l + m][i] =
  650. l1_unscale(bits - 1, mant, scale1);
  651. }
  652. }
  653. } else {
  654. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  655. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  656. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  657. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  658. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  659. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  660. }
  661. /* next subband in alloc table */
  662. j += 1 << bit_alloc_bits;
  663. }
  664. /* fill remaining samples to zero */
  665. for (i = sblimit; i < SBLIMIT; i++) {
  666. for (ch = 0; ch < s->nb_channels; ch++) {
  667. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  668. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  669. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  670. }
  671. }
  672. }
  673. }
  674. return 3 * 12;
  675. }
  676. #define SPLIT(dst,sf,n) \
  677. if (n == 3) { \
  678. int m = (sf * 171) >> 9; \
  679. dst = sf - 3 * m; \
  680. sf = m; \
  681. } else if (n == 4) { \
  682. dst = sf & 3; \
  683. sf >>= 2; \
  684. } else if (n == 5) { \
  685. int m = (sf * 205) >> 10; \
  686. dst = sf - 5 * m; \
  687. sf = m; \
  688. } else if (n == 6) { \
  689. int m = (sf * 171) >> 10; \
  690. dst = sf - 6 * m; \
  691. sf = m; \
  692. } else { \
  693. dst = 0; \
  694. }
  695. static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
  696. int n3)
  697. {
  698. SPLIT(slen[3], sf, n3)
  699. SPLIT(slen[2], sf, n2)
  700. SPLIT(slen[1], sf, n1)
  701. slen[0] = sf;
  702. }
  703. static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
  704. int16_t *exponents)
  705. {
  706. const uint8_t *bstab, *pretab;
  707. int len, i, j, k, l, v0, shift, gain, gains[3];
  708. int16_t *exp_ptr;
  709. exp_ptr = exponents;
  710. gain = g->global_gain - 210;
  711. shift = g->scalefac_scale + 1;
  712. bstab = band_size_long[s->sample_rate_index];
  713. pretab = mpa_pretab[g->preflag];
  714. for (i = 0; i < g->long_end; i++) {
  715. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  716. len = bstab[i];
  717. for (j = len; j > 0; j--)
  718. *exp_ptr++ = v0;
  719. }
  720. if (g->short_start < 13) {
  721. bstab = band_size_short[s->sample_rate_index];
  722. gains[0] = gain - (g->subblock_gain[0] << 3);
  723. gains[1] = gain - (g->subblock_gain[1] << 3);
  724. gains[2] = gain - (g->subblock_gain[2] << 3);
  725. k = g->long_end;
  726. for (i = g->short_start; i < 13; i++) {
  727. len = bstab[i];
  728. for (l = 0; l < 3; l++) {
  729. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  730. for (j = len; j > 0; j--)
  731. *exp_ptr++ = v0;
  732. }
  733. }
  734. }
  735. }
  736. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
  737. int *end_pos2)
  738. {
  739. if (s->in_gb.buffer && *pos >= s->gb.size_in_bits - s->extrasize * 8) {
  740. s->gb = s->in_gb;
  741. s->in_gb.buffer = NULL;
  742. s->extrasize = 0;
  743. av_assert2((get_bits_count(&s->gb) & 7) == 0);
  744. skip_bits_long(&s->gb, *pos - *end_pos);
  745. *end_pos2 =
  746. *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
  747. *pos = get_bits_count(&s->gb);
  748. }
  749. }
  750. /* Following is an optimized code for
  751. INTFLOAT v = *src
  752. if(get_bits1(&s->gb))
  753. v = -v;
  754. *dst = v;
  755. */
  756. #if USE_FLOATS
  757. #define READ_FLIP_SIGN(dst,src) \
  758. v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
  759. AV_WN32A(dst, v);
  760. #else
  761. #define READ_FLIP_SIGN(dst,src) \
  762. v = -get_bits1(&s->gb); \
  763. *(dst) = (*(src) ^ v) - v;
  764. #endif
  765. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  766. int16_t *exponents, int end_pos2)
  767. {
  768. int s_index;
  769. int i;
  770. int last_pos, bits_left;
  771. VLC *vlc;
  772. int end_pos = FFMIN(end_pos2, s->gb.size_in_bits - s->extrasize * 8);
  773. /* low frequencies (called big values) */
  774. s_index = 0;
  775. for (i = 0; i < 3; i++) {
  776. int j, k, l, linbits;
  777. j = g->region_size[i];
  778. if (j == 0)
  779. continue;
  780. /* select vlc table */
  781. k = g->table_select[i];
  782. l = mpa_huff_data[k][0];
  783. linbits = mpa_huff_data[k][1];
  784. vlc = &huff_vlc[l];
  785. if (!l) {
  786. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
  787. s_index += 2 * j;
  788. continue;
  789. }
  790. /* read huffcode and compute each couple */
  791. for (; j > 0; j--) {
  792. int exponent, x, y;
  793. int v;
  794. int pos = get_bits_count(&s->gb);
  795. if (pos >= end_pos){
  796. switch_buffer(s, &pos, &end_pos, &end_pos2);
  797. if (pos >= end_pos)
  798. break;
  799. }
  800. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  801. if (!y) {
  802. g->sb_hybrid[s_index ] =
  803. g->sb_hybrid[s_index+1] = 0;
  804. s_index += 2;
  805. continue;
  806. }
  807. exponent= exponents[s_index];
  808. ff_dlog(s->avctx, "region=%d n=%d y=%d exp=%d\n",
  809. i, g->region_size[i] - j, y, exponent);
  810. if (y & 16) {
  811. x = y >> 5;
  812. y = y & 0x0f;
  813. if (x < 15) {
  814. READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
  815. } else {
  816. x += get_bitsz(&s->gb, linbits);
  817. v = l3_unscale(x, exponent);
  818. if (get_bits1(&s->gb))
  819. v = -v;
  820. g->sb_hybrid[s_index] = v;
  821. }
  822. if (y < 15) {
  823. READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
  824. } else {
  825. y += get_bitsz(&s->gb, linbits);
  826. v = l3_unscale(y, exponent);
  827. if (get_bits1(&s->gb))
  828. v = -v;
  829. g->sb_hybrid[s_index+1] = v;
  830. }
  831. } else {
  832. x = y >> 5;
  833. y = y & 0x0f;
  834. x += y;
  835. if (x < 15) {
  836. READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
  837. } else {
  838. x += get_bitsz(&s->gb, linbits);
  839. v = l3_unscale(x, exponent);
  840. if (get_bits1(&s->gb))
  841. v = -v;
  842. g->sb_hybrid[s_index+!!y] = v;
  843. }
  844. g->sb_hybrid[s_index + !y] = 0;
  845. }
  846. s_index += 2;
  847. }
  848. }
  849. /* high frequencies */
  850. vlc = &huff_quad_vlc[g->count1table_select];
  851. last_pos = 0;
  852. while (s_index <= 572) {
  853. int pos, code;
  854. pos = get_bits_count(&s->gb);
  855. if (pos >= end_pos) {
  856. if (pos > end_pos2 && last_pos) {
  857. /* some encoders generate an incorrect size for this
  858. part. We must go back into the data */
  859. s_index -= 4;
  860. skip_bits_long(&s->gb, last_pos - pos);
  861. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  862. if(s->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))
  863. s_index=0;
  864. break;
  865. }
  866. switch_buffer(s, &pos, &end_pos, &end_pos2);
  867. if (pos >= end_pos)
  868. break;
  869. }
  870. last_pos = pos;
  871. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  872. ff_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  873. g->sb_hybrid[s_index+0] =
  874. g->sb_hybrid[s_index+1] =
  875. g->sb_hybrid[s_index+2] =
  876. g->sb_hybrid[s_index+3] = 0;
  877. while (code) {
  878. static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
  879. int v;
  880. int pos = s_index + idxtab[code];
  881. code ^= 8 >> idxtab[code];
  882. READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
  883. }
  884. s_index += 4;
  885. }
  886. /* skip extension bits */
  887. bits_left = end_pos2 - get_bits_count(&s->gb);
  888. if (bits_left < 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_COMPLIANT))) {
  889. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  890. s_index=0;
  891. } else if (bits_left > 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) {
  892. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  893. s_index = 0;
  894. }
  895. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
  896. skip_bits_long(&s->gb, bits_left);
  897. i = get_bits_count(&s->gb);
  898. switch_buffer(s, &i, &end_pos, &end_pos2);
  899. return 0;
  900. }
  901. /* Reorder short blocks from bitstream order to interleaved order. It
  902. would be faster to do it in parsing, but the code would be far more
  903. complicated */
  904. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  905. {
  906. int i, j, len;
  907. INTFLOAT *ptr, *dst, *ptr1;
  908. INTFLOAT tmp[576];
  909. if (g->block_type != 2)
  910. return;
  911. if (g->switch_point) {
  912. if (s->sample_rate_index != 8)
  913. ptr = g->sb_hybrid + 36;
  914. else
  915. ptr = g->sb_hybrid + 72;
  916. } else {
  917. ptr = g->sb_hybrid;
  918. }
  919. for (i = g->short_start; i < 13; i++) {
  920. len = band_size_short[s->sample_rate_index][i];
  921. ptr1 = ptr;
  922. dst = tmp;
  923. for (j = len; j > 0; j--) {
  924. *dst++ = ptr[0*len];
  925. *dst++ = ptr[1*len];
  926. *dst++ = ptr[2*len];
  927. ptr++;
  928. }
  929. ptr += 2 * len;
  930. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  931. }
  932. }
  933. #define ISQRT2 FIXR(0.70710678118654752440)
  934. static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
  935. {
  936. int i, j, k, l;
  937. int sf_max, sf, len, non_zero_found;
  938. INTFLOAT (*is_tab)[16], *tab0, *tab1, v1, v2;
  939. SUINTFLOAT tmp0, tmp1;
  940. int non_zero_found_short[3];
  941. /* intensity stereo */
  942. if (s->mode_ext & MODE_EXT_I_STEREO) {
  943. if (!s->lsf) {
  944. is_tab = is_table;
  945. sf_max = 7;
  946. } else {
  947. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  948. sf_max = 16;
  949. }
  950. tab0 = g0->sb_hybrid + 576;
  951. tab1 = g1->sb_hybrid + 576;
  952. non_zero_found_short[0] = 0;
  953. non_zero_found_short[1] = 0;
  954. non_zero_found_short[2] = 0;
  955. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  956. for (i = 12; i >= g1->short_start; i--) {
  957. /* for last band, use previous scale factor */
  958. if (i != 11)
  959. k -= 3;
  960. len = band_size_short[s->sample_rate_index][i];
  961. for (l = 2; l >= 0; l--) {
  962. tab0 -= len;
  963. tab1 -= len;
  964. if (!non_zero_found_short[l]) {
  965. /* test if non zero band. if so, stop doing i-stereo */
  966. for (j = 0; j < len; j++) {
  967. if (tab1[j] != 0) {
  968. non_zero_found_short[l] = 1;
  969. goto found1;
  970. }
  971. }
  972. sf = g1->scale_factors[k + l];
  973. if (sf >= sf_max)
  974. goto found1;
  975. v1 = is_tab[0][sf];
  976. v2 = is_tab[1][sf];
  977. for (j = 0; j < len; j++) {
  978. tmp0 = tab0[j];
  979. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  980. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  981. }
  982. } else {
  983. found1:
  984. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  985. /* lower part of the spectrum : do ms stereo
  986. if enabled */
  987. for (j = 0; j < len; j++) {
  988. tmp0 = tab0[j];
  989. tmp1 = tab1[j];
  990. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  991. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  992. }
  993. }
  994. }
  995. }
  996. }
  997. non_zero_found = non_zero_found_short[0] |
  998. non_zero_found_short[1] |
  999. non_zero_found_short[2];
  1000. for (i = g1->long_end - 1;i >= 0;i--) {
  1001. len = band_size_long[s->sample_rate_index][i];
  1002. tab0 -= len;
  1003. tab1 -= len;
  1004. /* test if non zero band. if so, stop doing i-stereo */
  1005. if (!non_zero_found) {
  1006. for (j = 0; j < len; j++) {
  1007. if (tab1[j] != 0) {
  1008. non_zero_found = 1;
  1009. goto found2;
  1010. }
  1011. }
  1012. /* for last band, use previous scale factor */
  1013. k = (i == 21) ? 20 : i;
  1014. sf = g1->scale_factors[k];
  1015. if (sf >= sf_max)
  1016. goto found2;
  1017. v1 = is_tab[0][sf];
  1018. v2 = is_tab[1][sf];
  1019. for (j = 0; j < len; j++) {
  1020. tmp0 = tab0[j];
  1021. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1022. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1023. }
  1024. } else {
  1025. found2:
  1026. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1027. /* lower part of the spectrum : do ms stereo
  1028. if enabled */
  1029. for (j = 0; j < len; j++) {
  1030. tmp0 = tab0[j];
  1031. tmp1 = tab1[j];
  1032. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1033. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1034. }
  1035. }
  1036. }
  1037. }
  1038. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1039. /* ms stereo ONLY */
  1040. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1041. global gain */
  1042. #if USE_FLOATS
  1043. s->fdsp->butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
  1044. #else
  1045. tab0 = g0->sb_hybrid;
  1046. tab1 = g1->sb_hybrid;
  1047. for (i = 0; i < 576; i++) {
  1048. tmp0 = tab0[i];
  1049. tmp1 = tab1[i];
  1050. tab0[i] = tmp0 + tmp1;
  1051. tab1[i] = tmp0 - tmp1;
  1052. }
  1053. #endif
  1054. }
  1055. }
  1056. #if USE_FLOATS
  1057. #if HAVE_MIPSFPU
  1058. # include "mips/compute_antialias_float.h"
  1059. #endif /* HAVE_MIPSFPU */
  1060. #else
  1061. #if HAVE_MIPSDSP
  1062. # include "mips/compute_antialias_fixed.h"
  1063. #endif /* HAVE_MIPSDSP */
  1064. #endif /* USE_FLOATS */
  1065. #ifndef compute_antialias
  1066. #if USE_FLOATS
  1067. #define AA(j) do { \
  1068. float tmp0 = ptr[-1-j]; \
  1069. float tmp1 = ptr[ j]; \
  1070. ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
  1071. ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
  1072. } while (0)
  1073. #else
  1074. #define AA(j) do { \
  1075. SUINT tmp0 = ptr[-1-j]; \
  1076. SUINT tmp1 = ptr[ j]; \
  1077. SUINT tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
  1078. ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
  1079. ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
  1080. } while (0)
  1081. #endif
  1082. static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
  1083. {
  1084. INTFLOAT *ptr;
  1085. int n, i;
  1086. /* we antialias only "long" bands */
  1087. if (g->block_type == 2) {
  1088. if (!g->switch_point)
  1089. return;
  1090. /* XXX: check this for 8000Hz case */
  1091. n = 1;
  1092. } else {
  1093. n = SBLIMIT - 1;
  1094. }
  1095. ptr = g->sb_hybrid + 18;
  1096. for (i = n; i > 0; i--) {
  1097. AA(0);
  1098. AA(1);
  1099. AA(2);
  1100. AA(3);
  1101. AA(4);
  1102. AA(5);
  1103. AA(6);
  1104. AA(7);
  1105. ptr += 18;
  1106. }
  1107. }
  1108. #endif /* compute_antialias */
  1109. static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
  1110. INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
  1111. {
  1112. INTFLOAT *win, *out_ptr, *ptr, *buf, *ptr1;
  1113. INTFLOAT out2[12];
  1114. int i, j, mdct_long_end, sblimit;
  1115. /* find last non zero block */
  1116. ptr = g->sb_hybrid + 576;
  1117. ptr1 = g->sb_hybrid + 2 * 18;
  1118. while (ptr >= ptr1) {
  1119. int32_t *p;
  1120. ptr -= 6;
  1121. p = (int32_t*)ptr;
  1122. if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1123. break;
  1124. }
  1125. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1126. if (g->block_type == 2) {
  1127. /* XXX: check for 8000 Hz */
  1128. if (g->switch_point)
  1129. mdct_long_end = 2;
  1130. else
  1131. mdct_long_end = 0;
  1132. } else {
  1133. mdct_long_end = sblimit;
  1134. }
  1135. s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf, g->sb_hybrid,
  1136. mdct_long_end, g->switch_point,
  1137. g->block_type);
  1138. buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
  1139. ptr = g->sb_hybrid + 18 * mdct_long_end;
  1140. for (j = mdct_long_end; j < sblimit; j++) {
  1141. /* select frequency inversion */
  1142. win = RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
  1143. out_ptr = sb_samples + j;
  1144. for (i = 0; i < 6; i++) {
  1145. *out_ptr = buf[4*i];
  1146. out_ptr += SBLIMIT;
  1147. }
  1148. imdct12(out2, ptr + 0);
  1149. for (i = 0; i < 6; i++) {
  1150. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*1)];
  1151. buf[4*(i + 6*2)] = MULH3(out2[i + 6], win[i + 6], 1);
  1152. out_ptr += SBLIMIT;
  1153. }
  1154. imdct12(out2, ptr + 1);
  1155. for (i = 0; i < 6; i++) {
  1156. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*2)];
  1157. buf[4*(i + 6*0)] = MULH3(out2[i + 6], win[i + 6], 1);
  1158. out_ptr += SBLIMIT;
  1159. }
  1160. imdct12(out2, ptr + 2);
  1161. for (i = 0; i < 6; i++) {
  1162. buf[4*(i + 6*0)] = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*0)];
  1163. buf[4*(i + 6*1)] = MULH3(out2[i + 6], win[i + 6], 1);
  1164. buf[4*(i + 6*2)] = 0;
  1165. }
  1166. ptr += 18;
  1167. buf += (j&3) != 3 ? 1 : (4*18-3);
  1168. }
  1169. /* zero bands */
  1170. for (j = sblimit; j < SBLIMIT; j++) {
  1171. /* overlap */
  1172. out_ptr = sb_samples + j;
  1173. for (i = 0; i < 18; i++) {
  1174. *out_ptr = buf[4*i];
  1175. buf[4*i] = 0;
  1176. out_ptr += SBLIMIT;
  1177. }
  1178. buf += (j&3) != 3 ? 1 : (4*18-3);
  1179. }
  1180. }
  1181. /* main layer3 decoding function */
  1182. static int mp_decode_layer3(MPADecodeContext *s)
  1183. {
  1184. int nb_granules, main_data_begin;
  1185. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1186. GranuleDef *g;
  1187. int16_t exponents[576]; //FIXME try INTFLOAT
  1188. /* read side info */
  1189. if (s->lsf) {
  1190. main_data_begin = get_bits(&s->gb, 8);
  1191. skip_bits(&s->gb, s->nb_channels);
  1192. nb_granules = 1;
  1193. } else {
  1194. main_data_begin = get_bits(&s->gb, 9);
  1195. if (s->nb_channels == 2)
  1196. skip_bits(&s->gb, 3);
  1197. else
  1198. skip_bits(&s->gb, 5);
  1199. nb_granules = 2;
  1200. for (ch = 0; ch < s->nb_channels; ch++) {
  1201. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1202. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1203. }
  1204. }
  1205. for (gr = 0; gr < nb_granules; gr++) {
  1206. for (ch = 0; ch < s->nb_channels; ch++) {
  1207. ff_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1208. g = &s->granules[ch][gr];
  1209. g->part2_3_length = get_bits(&s->gb, 12);
  1210. g->big_values = get_bits(&s->gb, 9);
  1211. if (g->big_values > 288) {
  1212. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1213. return AVERROR_INVALIDDATA;
  1214. }
  1215. g->global_gain = get_bits(&s->gb, 8);
  1216. /* if MS stereo only is selected, we precompute the
  1217. 1/sqrt(2) renormalization factor */
  1218. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1219. MODE_EXT_MS_STEREO)
  1220. g->global_gain -= 2;
  1221. if (s->lsf)
  1222. g->scalefac_compress = get_bits(&s->gb, 9);
  1223. else
  1224. g->scalefac_compress = get_bits(&s->gb, 4);
  1225. blocksplit_flag = get_bits1(&s->gb);
  1226. if (blocksplit_flag) {
  1227. g->block_type = get_bits(&s->gb, 2);
  1228. if (g->block_type == 0) {
  1229. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1230. return AVERROR_INVALIDDATA;
  1231. }
  1232. g->switch_point = get_bits1(&s->gb);
  1233. for (i = 0; i < 2; i++)
  1234. g->table_select[i] = get_bits(&s->gb, 5);
  1235. for (i = 0; i < 3; i++)
  1236. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1237. init_short_region(s, g);
  1238. } else {
  1239. int region_address1, region_address2;
  1240. g->block_type = 0;
  1241. g->switch_point = 0;
  1242. for (i = 0; i < 3; i++)
  1243. g->table_select[i] = get_bits(&s->gb, 5);
  1244. /* compute huffman coded region sizes */
  1245. region_address1 = get_bits(&s->gb, 4);
  1246. region_address2 = get_bits(&s->gb, 3);
  1247. ff_dlog(s->avctx, "region1=%d region2=%d\n",
  1248. region_address1, region_address2);
  1249. init_long_region(s, g, region_address1, region_address2);
  1250. }
  1251. region_offset2size(g);
  1252. compute_band_indexes(s, g);
  1253. g->preflag = 0;
  1254. if (!s->lsf)
  1255. g->preflag = get_bits1(&s->gb);
  1256. g->scalefac_scale = get_bits1(&s->gb);
  1257. g->count1table_select = get_bits1(&s->gb);
  1258. ff_dlog(s->avctx, "block_type=%d switch_point=%d\n",
  1259. g->block_type, g->switch_point);
  1260. }
  1261. }
  1262. if (!s->adu_mode) {
  1263. int skip;
  1264. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1265. s->extrasize = av_clip((get_bits_left(&s->gb) >> 3) - s->extrasize, 0,
  1266. FFMAX(0, LAST_BUF_SIZE - s->last_buf_size));
  1267. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1268. /* now we get bits from the main_data_begin offset */
  1269. ff_dlog(s->avctx, "seekback:%d, lastbuf:%d\n",
  1270. main_data_begin, s->last_buf_size);
  1271. memcpy(s->last_buf + s->last_buf_size, ptr, s->extrasize);
  1272. s->in_gb = s->gb;
  1273. init_get_bits(&s->gb, s->last_buf, (s->last_buf_size + s->extrasize) * 8);
  1274. s->last_buf_size <<= 3;
  1275. for (gr = 0; gr < nb_granules && (s->last_buf_size >> 3) < main_data_begin; gr++) {
  1276. for (ch = 0; ch < s->nb_channels; ch++) {
  1277. g = &s->granules[ch][gr];
  1278. s->last_buf_size += g->part2_3_length;
  1279. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1280. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1281. }
  1282. }
  1283. skip = s->last_buf_size - 8 * main_data_begin;
  1284. if (skip >= s->gb.size_in_bits - s->extrasize * 8 && s->in_gb.buffer) {
  1285. skip_bits_long(&s->in_gb, skip - s->gb.size_in_bits + s->extrasize * 8);
  1286. s->gb = s->in_gb;
  1287. s->in_gb.buffer = NULL;
  1288. s->extrasize = 0;
  1289. } else {
  1290. skip_bits_long(&s->gb, skip);
  1291. }
  1292. } else {
  1293. gr = 0;
  1294. s->extrasize = 0;
  1295. }
  1296. for (; gr < nb_granules; gr++) {
  1297. for (ch = 0; ch < s->nb_channels; ch++) {
  1298. g = &s->granules[ch][gr];
  1299. bits_pos = get_bits_count(&s->gb);
  1300. if (!s->lsf) {
  1301. uint8_t *sc;
  1302. int slen, slen1, slen2;
  1303. /* MPEG-1 scale factors */
  1304. slen1 = slen_table[0][g->scalefac_compress];
  1305. slen2 = slen_table[1][g->scalefac_compress];
  1306. ff_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1307. if (g->block_type == 2) {
  1308. n = g->switch_point ? 17 : 18;
  1309. j = 0;
  1310. if (slen1) {
  1311. for (i = 0; i < n; i++)
  1312. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1313. } else {
  1314. for (i = 0; i < n; i++)
  1315. g->scale_factors[j++] = 0;
  1316. }
  1317. if (slen2) {
  1318. for (i = 0; i < 18; i++)
  1319. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1320. for (i = 0; i < 3; i++)
  1321. g->scale_factors[j++] = 0;
  1322. } else {
  1323. for (i = 0; i < 21; i++)
  1324. g->scale_factors[j++] = 0;
  1325. }
  1326. } else {
  1327. sc = s->granules[ch][0].scale_factors;
  1328. j = 0;
  1329. for (k = 0; k < 4; k++) {
  1330. n = k == 0 ? 6 : 5;
  1331. if ((g->scfsi & (0x8 >> k)) == 0) {
  1332. slen = (k < 2) ? slen1 : slen2;
  1333. if (slen) {
  1334. for (i = 0; i < n; i++)
  1335. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1336. } else {
  1337. for (i = 0; i < n; i++)
  1338. g->scale_factors[j++] = 0;
  1339. }
  1340. } else {
  1341. /* simply copy from last granule */
  1342. for (i = 0; i < n; i++) {
  1343. g->scale_factors[j] = sc[j];
  1344. j++;
  1345. }
  1346. }
  1347. }
  1348. g->scale_factors[j++] = 0;
  1349. }
  1350. } else {
  1351. int tindex, tindex2, slen[4], sl, sf;
  1352. /* LSF scale factors */
  1353. if (g->block_type == 2)
  1354. tindex = g->switch_point ? 2 : 1;
  1355. else
  1356. tindex = 0;
  1357. sf = g->scalefac_compress;
  1358. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1359. /* intensity stereo case */
  1360. sf >>= 1;
  1361. if (sf < 180) {
  1362. lsf_sf_expand(slen, sf, 6, 6, 0);
  1363. tindex2 = 3;
  1364. } else if (sf < 244) {
  1365. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1366. tindex2 = 4;
  1367. } else {
  1368. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1369. tindex2 = 5;
  1370. }
  1371. } else {
  1372. /* normal case */
  1373. if (sf < 400) {
  1374. lsf_sf_expand(slen, sf, 5, 4, 4);
  1375. tindex2 = 0;
  1376. } else if (sf < 500) {
  1377. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1378. tindex2 = 1;
  1379. } else {
  1380. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1381. tindex2 = 2;
  1382. g->preflag = 1;
  1383. }
  1384. }
  1385. j = 0;
  1386. for (k = 0; k < 4; k++) {
  1387. n = lsf_nsf_table[tindex2][tindex][k];
  1388. sl = slen[k];
  1389. if (sl) {
  1390. for (i = 0; i < n; i++)
  1391. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1392. } else {
  1393. for (i = 0; i < n; i++)
  1394. g->scale_factors[j++] = 0;
  1395. }
  1396. }
  1397. /* XXX: should compute exact size */
  1398. for (; j < 40; j++)
  1399. g->scale_factors[j] = 0;
  1400. }
  1401. exponents_from_scale_factors(s, g, exponents);
  1402. /* read Huffman coded residue */
  1403. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1404. } /* ch */
  1405. if (s->mode == MPA_JSTEREO)
  1406. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1407. for (ch = 0; ch < s->nb_channels; ch++) {
  1408. g = &s->granules[ch][gr];
  1409. reorder_block(s, g);
  1410. compute_antialias(s, g);
  1411. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1412. }
  1413. } /* gr */
  1414. if (get_bits_count(&s->gb) < 0)
  1415. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1416. return nb_granules * 18;
  1417. }
  1418. static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
  1419. const uint8_t *buf, int buf_size)
  1420. {
  1421. int i, nb_frames, ch, ret;
  1422. OUT_INT *samples_ptr;
  1423. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
  1424. if (s->error_protection) {
  1425. uint16_t crc = get_bits(&s->gb, 16);
  1426. if (s->err_recognition & AV_EF_CRCCHECK) {
  1427. const int sec_len = s->lsf ? ((s->nb_channels == 1) ? 9 : 17) :
  1428. ((s->nb_channels == 1) ? 17 : 32);
  1429. const AVCRC *crc_tab = av_crc_get_table(AV_CRC_16_ANSI);
  1430. uint32_t crc_cal = av_crc(crc_tab, UINT16_MAX, &buf[2], 2);
  1431. crc_cal = av_crc(crc_tab, crc_cal, &buf[6], sec_len);
  1432. if (av_bswap16(crc) ^ crc_cal) {
  1433. av_log(s->avctx, AV_LOG_ERROR, "CRC mismatch!\n");
  1434. if (s->err_recognition & AV_EF_EXPLODE)
  1435. return AVERROR_INVALIDDATA;
  1436. }
  1437. }
  1438. }
  1439. switch(s->layer) {
  1440. case 1:
  1441. s->avctx->frame_size = 384;
  1442. nb_frames = mp_decode_layer1(s);
  1443. break;
  1444. case 2:
  1445. s->avctx->frame_size = 1152;
  1446. nb_frames = mp_decode_layer2(s);
  1447. break;
  1448. case 3:
  1449. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1450. default:
  1451. nb_frames = mp_decode_layer3(s);
  1452. s->last_buf_size=0;
  1453. if (s->in_gb.buffer) {
  1454. align_get_bits(&s->gb);
  1455. i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
  1456. if (i >= 0 && i <= BACKSTEP_SIZE) {
  1457. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  1458. s->last_buf_size=i;
  1459. } else
  1460. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  1461. s->gb = s->in_gb;
  1462. s->in_gb.buffer = NULL;
  1463. s->extrasize = 0;
  1464. }
  1465. align_get_bits(&s->gb);
  1466. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1467. i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
  1468. if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
  1469. if (i < 0)
  1470. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  1471. i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  1472. }
  1473. av_assert1(i <= buf_size - HEADER_SIZE && i >= 0);
  1474. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  1475. s->last_buf_size += i;
  1476. }
  1477. if(nb_frames < 0)
  1478. return nb_frames;
  1479. /* get output buffer */
  1480. if (!samples) {
  1481. av_assert0(s->frame);
  1482. s->frame->nb_samples = s->avctx->frame_size;
  1483. if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0)
  1484. return ret;
  1485. samples = (OUT_INT **)s->frame->extended_data;
  1486. }
  1487. /* apply the synthesis filter */
  1488. for (ch = 0; ch < s->nb_channels; ch++) {
  1489. int sample_stride;
  1490. if (s->avctx->sample_fmt == OUT_FMT_P) {
  1491. samples_ptr = samples[ch];
  1492. sample_stride = 1;
  1493. } else {
  1494. samples_ptr = samples[0] + ch;
  1495. sample_stride = s->nb_channels;
  1496. }
  1497. for (i = 0; i < nb_frames; i++) {
  1498. RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch],
  1499. &(s->synth_buf_offset[ch]),
  1500. RENAME(ff_mpa_synth_window),
  1501. &s->dither_state, samples_ptr,
  1502. sample_stride, s->sb_samples[ch][i]);
  1503. samples_ptr += 32 * sample_stride;
  1504. }
  1505. }
  1506. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  1507. }
  1508. static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
  1509. AVPacket *avpkt)
  1510. {
  1511. const uint8_t *buf = avpkt->data;
  1512. int buf_size = avpkt->size;
  1513. MPADecodeContext *s = avctx->priv_data;
  1514. uint32_t header;
  1515. int ret;
  1516. int skipped = 0;
  1517. while(buf_size && !*buf){
  1518. buf++;
  1519. buf_size--;
  1520. skipped++;
  1521. }
  1522. if (buf_size < HEADER_SIZE)
  1523. return AVERROR_INVALIDDATA;
  1524. header = AV_RB32(buf);
  1525. if (header>>8 == AV_RB32("TAG")>>8) {
  1526. av_log(avctx, AV_LOG_DEBUG, "discarding ID3 tag\n");
  1527. return buf_size + skipped;
  1528. }
  1529. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1530. if (ret < 0) {
  1531. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  1532. return AVERROR_INVALIDDATA;
  1533. } else if (ret == 1) {
  1534. /* free format: prepare to compute frame size */
  1535. s->frame_size = -1;
  1536. return AVERROR_INVALIDDATA;
  1537. }
  1538. /* update codec info */
  1539. avctx->channels = s->nb_channels;
  1540. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1541. if (!avctx->bit_rate)
  1542. avctx->bit_rate = s->bit_rate;
  1543. if (s->frame_size <= 0) {
  1544. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  1545. return AVERROR_INVALIDDATA;
  1546. } else if (s->frame_size < buf_size) {
  1547. av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
  1548. buf_size= s->frame_size;
  1549. }
  1550. s->frame = data;
  1551. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1552. if (ret >= 0) {
  1553. s->frame->nb_samples = avctx->frame_size;
  1554. *got_frame_ptr = 1;
  1555. avctx->sample_rate = s->sample_rate;
  1556. //FIXME maybe move the other codec info stuff from above here too
  1557. } else {
  1558. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1559. /* Only return an error if the bad frame makes up the whole packet or
  1560. * the error is related to buffer management.
  1561. * If there is more data in the packet, just consume the bad frame
  1562. * instead of returning an error, which would discard the whole
  1563. * packet. */
  1564. *got_frame_ptr = 0;
  1565. if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
  1566. return ret;
  1567. }
  1568. s->frame_size = 0;
  1569. return buf_size + skipped;
  1570. }
  1571. static void mp_flush(MPADecodeContext *ctx)
  1572. {
  1573. memset(ctx->synth_buf, 0, sizeof(ctx->synth_buf));
  1574. memset(ctx->mdct_buf, 0, sizeof(ctx->mdct_buf));
  1575. ctx->last_buf_size = 0;
  1576. ctx->dither_state = 0;
  1577. }
  1578. static void flush(AVCodecContext *avctx)
  1579. {
  1580. mp_flush(avctx->priv_data);
  1581. }
  1582. #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
  1583. static int decode_frame_adu(AVCodecContext *avctx, void *data,
  1584. int *got_frame_ptr, AVPacket *avpkt)
  1585. {
  1586. const uint8_t *buf = avpkt->data;
  1587. int buf_size = avpkt->size;
  1588. MPADecodeContext *s = avctx->priv_data;
  1589. uint32_t header;
  1590. int len, ret;
  1591. int av_unused out_size;
  1592. len = buf_size;
  1593. // Discard too short frames
  1594. if (buf_size < HEADER_SIZE) {
  1595. av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
  1596. return AVERROR_INVALIDDATA;
  1597. }
  1598. if (len > MPA_MAX_CODED_FRAME_SIZE)
  1599. len = MPA_MAX_CODED_FRAME_SIZE;
  1600. // Get header and restore sync word
  1601. header = AV_RB32(buf) | 0xffe00000;
  1602. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1603. if (ret < 0) {
  1604. av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
  1605. return ret;
  1606. }
  1607. /* update codec info */
  1608. avctx->sample_rate = s->sample_rate;
  1609. avctx->channels = s->nb_channels;
  1610. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1611. if (!avctx->bit_rate)
  1612. avctx->bit_rate = s->bit_rate;
  1613. s->frame_size = len;
  1614. s->frame = data;
  1615. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1616. if (ret < 0) {
  1617. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1618. return ret;
  1619. }
  1620. *got_frame_ptr = 1;
  1621. return buf_size;
  1622. }
  1623. #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
  1624. #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
  1625. /**
  1626. * Context for MP3On4 decoder
  1627. */
  1628. typedef struct MP3On4DecodeContext {
  1629. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  1630. int syncword; ///< syncword patch
  1631. const uint8_t *coff; ///< channel offsets in output buffer
  1632. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  1633. } MP3On4DecodeContext;
  1634. #include "mpeg4audio.h"
  1635. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  1636. /* number of mp3 decoder instances */
  1637. static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
  1638. /* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
  1639. static const uint8_t chan_offset[8][5] = {
  1640. { 0 },
  1641. { 0 }, // C
  1642. { 0 }, // FLR
  1643. { 2, 0 }, // C FLR
  1644. { 2, 0, 3 }, // C FLR BS
  1645. { 2, 0, 3 }, // C FLR BLRS
  1646. { 2, 0, 4, 3 }, // C FLR BLRS LFE
  1647. { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
  1648. };
  1649. /* mp3on4 channel layouts */
  1650. static const int16_t chan_layout[8] = {
  1651. 0,
  1652. AV_CH_LAYOUT_MONO,
  1653. AV_CH_LAYOUT_STEREO,
  1654. AV_CH_LAYOUT_SURROUND,
  1655. AV_CH_LAYOUT_4POINT0,
  1656. AV_CH_LAYOUT_5POINT0,
  1657. AV_CH_LAYOUT_5POINT1,
  1658. AV_CH_LAYOUT_7POINT1
  1659. };
  1660. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  1661. {
  1662. MP3On4DecodeContext *s = avctx->priv_data;
  1663. int i;
  1664. if (s->mp3decctx[0])
  1665. av_freep(&s->mp3decctx[0]->fdsp);
  1666. for (i = 0; i < s->frames; i++)
  1667. av_freep(&s->mp3decctx[i]);
  1668. return 0;
  1669. }
  1670. static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
  1671. {
  1672. MP3On4DecodeContext *s = avctx->priv_data;
  1673. MPEG4AudioConfig cfg;
  1674. int i, ret;
  1675. if ((avctx->extradata_size < 2) || !avctx->extradata) {
  1676. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  1677. return AVERROR_INVALIDDATA;
  1678. }
  1679. avpriv_mpeg4audio_get_config2(&cfg, avctx->extradata,
  1680. avctx->extradata_size, 1, avctx);
  1681. if (!cfg.chan_config || cfg.chan_config > 7) {
  1682. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  1683. return AVERROR_INVALIDDATA;
  1684. }
  1685. s->frames = mp3Frames[cfg.chan_config];
  1686. s->coff = chan_offset[cfg.chan_config];
  1687. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  1688. avctx->channel_layout = chan_layout[cfg.chan_config];
  1689. if (cfg.sample_rate < 16000)
  1690. s->syncword = 0xffe00000;
  1691. else
  1692. s->syncword = 0xfff00000;
  1693. /* Init the first mp3 decoder in standard way, so that all tables get builded
  1694. * We replace avctx->priv_data with the context of the first decoder so that
  1695. * decode_init() does not have to be changed.
  1696. * Other decoders will be initialized here copying data from the first context
  1697. */
  1698. // Allocate zeroed memory for the first decoder context
  1699. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  1700. if (!s->mp3decctx[0])
  1701. goto alloc_fail;
  1702. // Put decoder context in place to make init_decode() happy
  1703. avctx->priv_data = s->mp3decctx[0];
  1704. ret = decode_init(avctx);
  1705. // Restore mp3on4 context pointer
  1706. avctx->priv_data = s;
  1707. if (ret < 0) {
  1708. decode_close_mp3on4(avctx);
  1709. return ret;
  1710. }
  1711. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  1712. /* Create a separate codec/context for each frame (first is already ok).
  1713. * Each frame is 1 or 2 channels - up to 5 frames allowed
  1714. */
  1715. for (i = 1; i < s->frames; i++) {
  1716. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  1717. if (!s->mp3decctx[i])
  1718. goto alloc_fail;
  1719. s->mp3decctx[i]->adu_mode = 1;
  1720. s->mp3decctx[i]->avctx = avctx;
  1721. s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
  1722. s->mp3decctx[i]->fdsp = s->mp3decctx[0]->fdsp;
  1723. }
  1724. return 0;
  1725. alloc_fail:
  1726. decode_close_mp3on4(avctx);
  1727. return AVERROR(ENOMEM);
  1728. }
  1729. static void flush_mp3on4(AVCodecContext *avctx)
  1730. {
  1731. int i;
  1732. MP3On4DecodeContext *s = avctx->priv_data;
  1733. for (i = 0; i < s->frames; i++)
  1734. mp_flush(s->mp3decctx[i]);
  1735. }
  1736. static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
  1737. int *got_frame_ptr, AVPacket *avpkt)
  1738. {
  1739. AVFrame *frame = data;
  1740. const uint8_t *buf = avpkt->data;
  1741. int buf_size = avpkt->size;
  1742. MP3On4DecodeContext *s = avctx->priv_data;
  1743. MPADecodeContext *m;
  1744. int fsize, len = buf_size, out_size = 0;
  1745. uint32_t header;
  1746. OUT_INT **out_samples;
  1747. OUT_INT *outptr[2];
  1748. int fr, ch, ret;
  1749. /* get output buffer */
  1750. frame->nb_samples = MPA_FRAME_SIZE;
  1751. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  1752. return ret;
  1753. out_samples = (OUT_INT **)frame->extended_data;
  1754. // Discard too short frames
  1755. if (buf_size < HEADER_SIZE)
  1756. return AVERROR_INVALIDDATA;
  1757. avctx->bit_rate = 0;
  1758. ch = 0;
  1759. for (fr = 0; fr < s->frames; fr++) {
  1760. fsize = AV_RB16(buf) >> 4;
  1761. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  1762. m = s->mp3decctx[fr];
  1763. av_assert1(m);
  1764. if (fsize < HEADER_SIZE) {
  1765. av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
  1766. return AVERROR_INVALIDDATA;
  1767. }
  1768. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  1769. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  1770. if (ret < 0) {
  1771. av_log(avctx, AV_LOG_ERROR, "Bad header, discard block\n");
  1772. return AVERROR_INVALIDDATA;
  1773. }
  1774. if (ch + m->nb_channels > avctx->channels ||
  1775. s->coff[fr] + m->nb_channels > avctx->channels) {
  1776. av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
  1777. "channel count\n");
  1778. return AVERROR_INVALIDDATA;
  1779. }
  1780. ch += m->nb_channels;
  1781. outptr[0] = out_samples[s->coff[fr]];
  1782. if (m->nb_channels > 1)
  1783. outptr[1] = out_samples[s->coff[fr] + 1];
  1784. if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0) {
  1785. av_log(avctx, AV_LOG_ERROR, "failed to decode channel %d\n", ch);
  1786. memset(outptr[0], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
  1787. if (m->nb_channels > 1)
  1788. memset(outptr[1], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
  1789. ret = m->nb_channels * MPA_FRAME_SIZE*sizeof(OUT_INT);
  1790. }
  1791. out_size += ret;
  1792. buf += fsize;
  1793. len -= fsize;
  1794. avctx->bit_rate += m->bit_rate;
  1795. }
  1796. if (ch != avctx->channels) {
  1797. av_log(avctx, AV_LOG_ERROR, "failed to decode all channels\n");
  1798. return AVERROR_INVALIDDATA;
  1799. }
  1800. /* update codec info */
  1801. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  1802. frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
  1803. *got_frame_ptr = 1;
  1804. return buf_size;
  1805. }
  1806. #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */