You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1949 lines
63KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder
  24. */
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/float_dsp.h"
  29. #include "avcodec.h"
  30. #include "get_bits.h"
  31. #include "internal.h"
  32. #include "mathops.h"
  33. #include "mpegaudiodsp.h"
  34. /*
  35. * TODO:
  36. * - test lsf / mpeg25 extensively.
  37. */
  38. #include "mpegaudio.h"
  39. #include "mpegaudiodecheader.h"
  40. #define BACKSTEP_SIZE 512
  41. #define EXTRABYTES 24
  42. #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
  43. /* layer 3 "granule" */
  44. typedef struct GranuleDef {
  45. uint8_t scfsi;
  46. int part2_3_length;
  47. int big_values;
  48. int global_gain;
  49. int scalefac_compress;
  50. uint8_t block_type;
  51. uint8_t switch_point;
  52. int table_select[3];
  53. int subblock_gain[3];
  54. uint8_t scalefac_scale;
  55. uint8_t count1table_select;
  56. int region_size[3]; /* number of huffman codes in each region */
  57. int preflag;
  58. int short_start, long_end; /* long/short band indexes */
  59. uint8_t scale_factors[40];
  60. DECLARE_ALIGNED(16, INTFLOAT, sb_hybrid)[SBLIMIT * 18]; /* 576 samples */
  61. } GranuleDef;
  62. typedef struct MPADecodeContext {
  63. MPA_DECODE_HEADER
  64. uint8_t last_buf[LAST_BUF_SIZE];
  65. int last_buf_size;
  66. /* next header (used in free format parsing) */
  67. uint32_t free_format_next_header;
  68. GetBitContext gb;
  69. GetBitContext in_gb;
  70. DECLARE_ALIGNED(32, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2];
  71. int synth_buf_offset[MPA_MAX_CHANNELS];
  72. DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
  73. INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
  74. GranuleDef granules[2][2]; /* Used in Layer 3 */
  75. int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
  76. int dither_state;
  77. int err_recognition;
  78. AVCodecContext* avctx;
  79. MPADSPContext mpadsp;
  80. AVFloatDSPContext fdsp;
  81. AVFrame *frame;
  82. } MPADecodeContext;
  83. #define HEADER_SIZE 4
  84. #include "mpegaudiodata.h"
  85. #include "mpegaudiodectab.h"
  86. /* vlc structure for decoding layer 3 huffman tables */
  87. static VLC huff_vlc[16];
  88. static VLC_TYPE huff_vlc_tables[
  89. 0 + 128 + 128 + 128 + 130 + 128 + 154 + 166 +
  90. 142 + 204 + 190 + 170 + 542 + 460 + 662 + 414
  91. ][2];
  92. static const int huff_vlc_tables_sizes[16] = {
  93. 0, 128, 128, 128, 130, 128, 154, 166,
  94. 142, 204, 190, 170, 542, 460, 662, 414
  95. };
  96. static VLC huff_quad_vlc[2];
  97. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  98. static const int huff_quad_vlc_tables_sizes[2] = { 128, 16 };
  99. /* computed from band_size_long */
  100. static uint16_t band_index_long[9][23];
  101. #include "mpegaudio_tablegen.h"
  102. /* intensity stereo coef table */
  103. static INTFLOAT is_table[2][16];
  104. static INTFLOAT is_table_lsf[2][2][16];
  105. static INTFLOAT csa_table[8][4];
  106. static int16_t division_tab3[1<<6 ];
  107. static int16_t division_tab5[1<<8 ];
  108. static int16_t division_tab9[1<<11];
  109. static int16_t * const division_tabs[4] = {
  110. division_tab3, division_tab5, NULL, division_tab9
  111. };
  112. /* lower 2 bits: modulo 3, higher bits: shift */
  113. static uint16_t scale_factor_modshift[64];
  114. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  115. static int32_t scale_factor_mult[15][3];
  116. /* mult table for layer 2 group quantization */
  117. #define SCALE_GEN(v) \
  118. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  119. static const int32_t scale_factor_mult2[3][3] = {
  120. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  121. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  122. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  123. };
  124. /**
  125. * Convert region offsets to region sizes and truncate
  126. * size to big_values.
  127. */
  128. static void region_offset2size(GranuleDef *g)
  129. {
  130. int i, k, j = 0;
  131. g->region_size[2] = 576 / 2;
  132. for (i = 0; i < 3; i++) {
  133. k = FFMIN(g->region_size[i], g->big_values);
  134. g->region_size[i] = k - j;
  135. j = k;
  136. }
  137. }
  138. static void init_short_region(MPADecodeContext *s, GranuleDef *g)
  139. {
  140. if (g->block_type == 2) {
  141. if (s->sample_rate_index != 8)
  142. g->region_size[0] = (36 / 2);
  143. else
  144. g->region_size[0] = (72 / 2);
  145. } else {
  146. if (s->sample_rate_index <= 2)
  147. g->region_size[0] = (36 / 2);
  148. else if (s->sample_rate_index != 8)
  149. g->region_size[0] = (54 / 2);
  150. else
  151. g->region_size[0] = (108 / 2);
  152. }
  153. g->region_size[1] = (576 / 2);
  154. }
  155. static void init_long_region(MPADecodeContext *s, GranuleDef *g,
  156. int ra1, int ra2)
  157. {
  158. int l;
  159. g->region_size[0] = band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  160. /* should not overflow */
  161. l = FFMIN(ra1 + ra2 + 2, 22);
  162. g->region_size[1] = band_index_long[s->sample_rate_index][ l] >> 1;
  163. }
  164. static void compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
  165. {
  166. if (g->block_type == 2) {
  167. if (g->switch_point) {
  168. /* if switched mode, we handle the 36 first samples as
  169. long blocks. For 8000Hz, we handle the 72 first
  170. exponents as long blocks */
  171. if (s->sample_rate_index <= 2)
  172. g->long_end = 8;
  173. else
  174. g->long_end = 6;
  175. g->short_start = 3;
  176. } else {
  177. g->long_end = 0;
  178. g->short_start = 0;
  179. }
  180. } else {
  181. g->short_start = 13;
  182. g->long_end = 22;
  183. }
  184. }
  185. /* layer 1 unscaling */
  186. /* n = number of bits of the mantissa minus 1 */
  187. static inline int l1_unscale(int n, int mant, int scale_factor)
  188. {
  189. int shift, mod;
  190. int64_t val;
  191. shift = scale_factor_modshift[scale_factor];
  192. mod = shift & 3;
  193. shift >>= 2;
  194. val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
  195. shift += n;
  196. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  197. return (int)((val + (1LL << (shift - 1))) >> shift);
  198. }
  199. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  200. {
  201. int shift, mod, val;
  202. shift = scale_factor_modshift[scale_factor];
  203. mod = shift & 3;
  204. shift >>= 2;
  205. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  206. /* NOTE: at this point, 0 <= shift <= 21 */
  207. if (shift > 0)
  208. val = (val + (1 << (shift - 1))) >> shift;
  209. return val;
  210. }
  211. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  212. static inline int l3_unscale(int value, int exponent)
  213. {
  214. unsigned int m;
  215. int e;
  216. e = table_4_3_exp [4 * value + (exponent & 3)];
  217. m = table_4_3_value[4 * value + (exponent & 3)];
  218. e -= exponent >> 2;
  219. assert(e >= 1);
  220. if (e > 31)
  221. return 0;
  222. m = (m + (1 << (e - 1))) >> e;
  223. return m;
  224. }
  225. static av_cold void decode_init_static(void)
  226. {
  227. int i, j, k;
  228. int offset;
  229. /* scale factors table for layer 1/2 */
  230. for (i = 0; i < 64; i++) {
  231. int shift, mod;
  232. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  233. shift = i / 3;
  234. mod = i % 3;
  235. scale_factor_modshift[i] = mod | (shift << 2);
  236. }
  237. /* scale factor multiply for layer 1 */
  238. for (i = 0; i < 15; i++) {
  239. int n, norm;
  240. n = i + 2;
  241. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  242. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  243. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  244. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  245. av_dlog(NULL, "%d: norm=%x s=%x %x %x\n", i, norm,
  246. scale_factor_mult[i][0],
  247. scale_factor_mult[i][1],
  248. scale_factor_mult[i][2]);
  249. }
  250. RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window));
  251. /* huffman decode tables */
  252. offset = 0;
  253. for (i = 1; i < 16; i++) {
  254. const HuffTable *h = &mpa_huff_tables[i];
  255. int xsize, x, y;
  256. uint8_t tmp_bits [512] = { 0 };
  257. uint16_t tmp_codes[512] = { 0 };
  258. xsize = h->xsize;
  259. j = 0;
  260. for (x = 0; x < xsize; x++) {
  261. for (y = 0; y < xsize; y++) {
  262. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  263. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  264. }
  265. }
  266. /* XXX: fail test */
  267. huff_vlc[i].table = huff_vlc_tables+offset;
  268. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  269. init_vlc(&huff_vlc[i], 7, 512,
  270. tmp_bits, 1, 1, tmp_codes, 2, 2,
  271. INIT_VLC_USE_NEW_STATIC);
  272. offset += huff_vlc_tables_sizes[i];
  273. }
  274. assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  275. offset = 0;
  276. for (i = 0; i < 2; i++) {
  277. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  278. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  279. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  280. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  281. INIT_VLC_USE_NEW_STATIC);
  282. offset += huff_quad_vlc_tables_sizes[i];
  283. }
  284. assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  285. for (i = 0; i < 9; i++) {
  286. k = 0;
  287. for (j = 0; j < 22; j++) {
  288. band_index_long[i][j] = k;
  289. k += band_size_long[i][j];
  290. }
  291. band_index_long[i][22] = k;
  292. }
  293. /* compute n ^ (4/3) and store it in mantissa/exp format */
  294. mpegaudio_tableinit();
  295. for (i = 0; i < 4; i++) {
  296. if (ff_mpa_quant_bits[i] < 0) {
  297. for (j = 0; j < (1 << (-ff_mpa_quant_bits[i]+1)); j++) {
  298. int val1, val2, val3, steps;
  299. int val = j;
  300. steps = ff_mpa_quant_steps[i];
  301. val1 = val % steps;
  302. val /= steps;
  303. val2 = val % steps;
  304. val3 = val / steps;
  305. division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
  306. }
  307. }
  308. }
  309. for (i = 0; i < 7; i++) {
  310. float f;
  311. INTFLOAT v;
  312. if (i != 6) {
  313. f = tan((double)i * M_PI / 12.0);
  314. v = FIXR(f / (1.0 + f));
  315. } else {
  316. v = FIXR(1.0);
  317. }
  318. is_table[0][ i] = v;
  319. is_table[1][6 - i] = v;
  320. }
  321. /* invalid values */
  322. for (i = 7; i < 16; i++)
  323. is_table[0][i] = is_table[1][i] = 0.0;
  324. for (i = 0; i < 16; i++) {
  325. double f;
  326. int e, k;
  327. for (j = 0; j < 2; j++) {
  328. e = -(j + 1) * ((i + 1) >> 1);
  329. f = pow(2.0, e / 4.0);
  330. k = i & 1;
  331. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  332. is_table_lsf[j][k ][i] = FIXR(1.0);
  333. av_dlog(NULL, "is_table_lsf %d %d: %f %f\n",
  334. i, j, (float) is_table_lsf[j][0][i],
  335. (float) is_table_lsf[j][1][i]);
  336. }
  337. }
  338. for (i = 0; i < 8; i++) {
  339. float ci, cs, ca;
  340. ci = ci_table[i];
  341. cs = 1.0 / sqrt(1.0 + ci * ci);
  342. ca = cs * ci;
  343. #if !CONFIG_FLOAT
  344. csa_table[i][0] = FIXHR(cs/4);
  345. csa_table[i][1] = FIXHR(ca/4);
  346. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  347. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  348. #else
  349. csa_table[i][0] = cs;
  350. csa_table[i][1] = ca;
  351. csa_table[i][2] = ca + cs;
  352. csa_table[i][3] = ca - cs;
  353. #endif
  354. }
  355. }
  356. static av_cold int decode_init(AVCodecContext * avctx)
  357. {
  358. static int initialized_tables = 0;
  359. MPADecodeContext *s = avctx->priv_data;
  360. if (!initialized_tables) {
  361. decode_init_static();
  362. initialized_tables = 1;
  363. }
  364. s->avctx = avctx;
  365. avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
  366. ff_mpadsp_init(&s->mpadsp);
  367. if (avctx->request_sample_fmt == OUT_FMT &&
  368. avctx->codec_id != AV_CODEC_ID_MP3ON4)
  369. avctx->sample_fmt = OUT_FMT;
  370. else
  371. avctx->sample_fmt = OUT_FMT_P;
  372. s->err_recognition = avctx->err_recognition;
  373. if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
  374. s->adu_mode = 1;
  375. return 0;
  376. }
  377. #define C3 FIXHR(0.86602540378443864676/2)
  378. #define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
  379. #define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
  380. #define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
  381. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  382. cases. */
  383. static void imdct12(INTFLOAT *out, INTFLOAT *in)
  384. {
  385. INTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  386. in0 = in[0*3];
  387. in1 = in[1*3] + in[0*3];
  388. in2 = in[2*3] + in[1*3];
  389. in3 = in[3*3] + in[2*3];
  390. in4 = in[4*3] + in[3*3];
  391. in5 = in[5*3] + in[4*3];
  392. in5 += in3;
  393. in3 += in1;
  394. in2 = MULH3(in2, C3, 2);
  395. in3 = MULH3(in3, C3, 4);
  396. t1 = in0 - in4;
  397. t2 = MULH3(in1 - in5, C4, 2);
  398. out[ 7] =
  399. out[10] = t1 + t2;
  400. out[ 1] =
  401. out[ 4] = t1 - t2;
  402. in0 += SHR(in4, 1);
  403. in4 = in0 + in2;
  404. in5 += 2*in1;
  405. in1 = MULH3(in5 + in3, C5, 1);
  406. out[ 8] =
  407. out[ 9] = in4 + in1;
  408. out[ 2] =
  409. out[ 3] = in4 - in1;
  410. in0 -= in2;
  411. in5 = MULH3(in5 - in3, C6, 2);
  412. out[ 0] =
  413. out[ 5] = in0 - in5;
  414. out[ 6] =
  415. out[11] = in0 + in5;
  416. }
  417. /* return the number of decoded frames */
  418. static int mp_decode_layer1(MPADecodeContext *s)
  419. {
  420. int bound, i, v, n, ch, j, mant;
  421. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  422. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  423. if (s->mode == MPA_JSTEREO)
  424. bound = (s->mode_ext + 1) * 4;
  425. else
  426. bound = SBLIMIT;
  427. /* allocation bits */
  428. for (i = 0; i < bound; i++) {
  429. for (ch = 0; ch < s->nb_channels; ch++) {
  430. allocation[ch][i] = get_bits(&s->gb, 4);
  431. }
  432. }
  433. for (i = bound; i < SBLIMIT; i++)
  434. allocation[0][i] = get_bits(&s->gb, 4);
  435. /* scale factors */
  436. for (i = 0; i < bound; i++) {
  437. for (ch = 0; ch < s->nb_channels; ch++) {
  438. if (allocation[ch][i])
  439. scale_factors[ch][i] = get_bits(&s->gb, 6);
  440. }
  441. }
  442. for (i = bound; i < SBLIMIT; i++) {
  443. if (allocation[0][i]) {
  444. scale_factors[0][i] = get_bits(&s->gb, 6);
  445. scale_factors[1][i] = get_bits(&s->gb, 6);
  446. }
  447. }
  448. /* compute samples */
  449. for (j = 0; j < 12; j++) {
  450. for (i = 0; i < bound; i++) {
  451. for (ch = 0; ch < s->nb_channels; ch++) {
  452. n = allocation[ch][i];
  453. if (n) {
  454. mant = get_bits(&s->gb, n + 1);
  455. v = l1_unscale(n, mant, scale_factors[ch][i]);
  456. } else {
  457. v = 0;
  458. }
  459. s->sb_samples[ch][j][i] = v;
  460. }
  461. }
  462. for (i = bound; i < SBLIMIT; i++) {
  463. n = allocation[0][i];
  464. if (n) {
  465. mant = get_bits(&s->gb, n + 1);
  466. v = l1_unscale(n, mant, scale_factors[0][i]);
  467. s->sb_samples[0][j][i] = v;
  468. v = l1_unscale(n, mant, scale_factors[1][i]);
  469. s->sb_samples[1][j][i] = v;
  470. } else {
  471. s->sb_samples[0][j][i] = 0;
  472. s->sb_samples[1][j][i] = 0;
  473. }
  474. }
  475. }
  476. return 12;
  477. }
  478. static int mp_decode_layer2(MPADecodeContext *s)
  479. {
  480. int sblimit; /* number of used subbands */
  481. const unsigned char *alloc_table;
  482. int table, bit_alloc_bits, i, j, ch, bound, v;
  483. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  484. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  485. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  486. int scale, qindex, bits, steps, k, l, m, b;
  487. /* select decoding table */
  488. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  489. s->sample_rate, s->lsf);
  490. sblimit = ff_mpa_sblimit_table[table];
  491. alloc_table = ff_mpa_alloc_tables[table];
  492. if (s->mode == MPA_JSTEREO)
  493. bound = (s->mode_ext + 1) * 4;
  494. else
  495. bound = sblimit;
  496. av_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  497. /* sanity check */
  498. if (bound > sblimit)
  499. bound = sblimit;
  500. /* parse bit allocation */
  501. j = 0;
  502. for (i = 0; i < bound; i++) {
  503. bit_alloc_bits = alloc_table[j];
  504. for (ch = 0; ch < s->nb_channels; ch++)
  505. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  506. j += 1 << bit_alloc_bits;
  507. }
  508. for (i = bound; i < sblimit; i++) {
  509. bit_alloc_bits = alloc_table[j];
  510. v = get_bits(&s->gb, bit_alloc_bits);
  511. bit_alloc[0][i] = v;
  512. bit_alloc[1][i] = v;
  513. j += 1 << bit_alloc_bits;
  514. }
  515. /* scale codes */
  516. for (i = 0; i < sblimit; i++) {
  517. for (ch = 0; ch < s->nb_channels; ch++) {
  518. if (bit_alloc[ch][i])
  519. scale_code[ch][i] = get_bits(&s->gb, 2);
  520. }
  521. }
  522. /* scale factors */
  523. for (i = 0; i < sblimit; i++) {
  524. for (ch = 0; ch < s->nb_channels; ch++) {
  525. if (bit_alloc[ch][i]) {
  526. sf = scale_factors[ch][i];
  527. switch (scale_code[ch][i]) {
  528. default:
  529. case 0:
  530. sf[0] = get_bits(&s->gb, 6);
  531. sf[1] = get_bits(&s->gb, 6);
  532. sf[2] = get_bits(&s->gb, 6);
  533. break;
  534. case 2:
  535. sf[0] = get_bits(&s->gb, 6);
  536. sf[1] = sf[0];
  537. sf[2] = sf[0];
  538. break;
  539. case 1:
  540. sf[0] = get_bits(&s->gb, 6);
  541. sf[2] = get_bits(&s->gb, 6);
  542. sf[1] = sf[0];
  543. break;
  544. case 3:
  545. sf[0] = get_bits(&s->gb, 6);
  546. sf[2] = get_bits(&s->gb, 6);
  547. sf[1] = sf[2];
  548. break;
  549. }
  550. }
  551. }
  552. }
  553. /* samples */
  554. for (k = 0; k < 3; k++) {
  555. for (l = 0; l < 12; l += 3) {
  556. j = 0;
  557. for (i = 0; i < bound; i++) {
  558. bit_alloc_bits = alloc_table[j];
  559. for (ch = 0; ch < s->nb_channels; ch++) {
  560. b = bit_alloc[ch][i];
  561. if (b) {
  562. scale = scale_factors[ch][i][k];
  563. qindex = alloc_table[j+b];
  564. bits = ff_mpa_quant_bits[qindex];
  565. if (bits < 0) {
  566. int v2;
  567. /* 3 values at the same time */
  568. v = get_bits(&s->gb, -bits);
  569. v2 = division_tabs[qindex][v];
  570. steps = ff_mpa_quant_steps[qindex];
  571. s->sb_samples[ch][k * 12 + l + 0][i] =
  572. l2_unscale_group(steps, v2 & 15, scale);
  573. s->sb_samples[ch][k * 12 + l + 1][i] =
  574. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  575. s->sb_samples[ch][k * 12 + l + 2][i] =
  576. l2_unscale_group(steps, v2 >> 8 , scale);
  577. } else {
  578. for (m = 0; m < 3; m++) {
  579. v = get_bits(&s->gb, bits);
  580. v = l1_unscale(bits - 1, v, scale);
  581. s->sb_samples[ch][k * 12 + l + m][i] = v;
  582. }
  583. }
  584. } else {
  585. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  586. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  587. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  588. }
  589. }
  590. /* next subband in alloc table */
  591. j += 1 << bit_alloc_bits;
  592. }
  593. /* XXX: find a way to avoid this duplication of code */
  594. for (i = bound; i < sblimit; i++) {
  595. bit_alloc_bits = alloc_table[j];
  596. b = bit_alloc[0][i];
  597. if (b) {
  598. int mant, scale0, scale1;
  599. scale0 = scale_factors[0][i][k];
  600. scale1 = scale_factors[1][i][k];
  601. qindex = alloc_table[j+b];
  602. bits = ff_mpa_quant_bits[qindex];
  603. if (bits < 0) {
  604. /* 3 values at the same time */
  605. v = get_bits(&s->gb, -bits);
  606. steps = ff_mpa_quant_steps[qindex];
  607. mant = v % steps;
  608. v = v / steps;
  609. s->sb_samples[0][k * 12 + l + 0][i] =
  610. l2_unscale_group(steps, mant, scale0);
  611. s->sb_samples[1][k * 12 + l + 0][i] =
  612. l2_unscale_group(steps, mant, scale1);
  613. mant = v % steps;
  614. v = v / steps;
  615. s->sb_samples[0][k * 12 + l + 1][i] =
  616. l2_unscale_group(steps, mant, scale0);
  617. s->sb_samples[1][k * 12 + l + 1][i] =
  618. l2_unscale_group(steps, mant, scale1);
  619. s->sb_samples[0][k * 12 + l + 2][i] =
  620. l2_unscale_group(steps, v, scale0);
  621. s->sb_samples[1][k * 12 + l + 2][i] =
  622. l2_unscale_group(steps, v, scale1);
  623. } else {
  624. for (m = 0; m < 3; m++) {
  625. mant = get_bits(&s->gb, bits);
  626. s->sb_samples[0][k * 12 + l + m][i] =
  627. l1_unscale(bits - 1, mant, scale0);
  628. s->sb_samples[1][k * 12 + l + m][i] =
  629. l1_unscale(bits - 1, mant, scale1);
  630. }
  631. }
  632. } else {
  633. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  634. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  635. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  636. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  637. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  638. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  639. }
  640. /* next subband in alloc table */
  641. j += 1 << bit_alloc_bits;
  642. }
  643. /* fill remaining samples to zero */
  644. for (i = sblimit; i < SBLIMIT; i++) {
  645. for (ch = 0; ch < s->nb_channels; ch++) {
  646. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  647. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  648. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  649. }
  650. }
  651. }
  652. }
  653. return 3 * 12;
  654. }
  655. #define SPLIT(dst,sf,n) \
  656. if (n == 3) { \
  657. int m = (sf * 171) >> 9; \
  658. dst = sf - 3 * m; \
  659. sf = m; \
  660. } else if (n == 4) { \
  661. dst = sf & 3; \
  662. sf >>= 2; \
  663. } else if (n == 5) { \
  664. int m = (sf * 205) >> 10; \
  665. dst = sf - 5 * m; \
  666. sf = m; \
  667. } else if (n == 6) { \
  668. int m = (sf * 171) >> 10; \
  669. dst = sf - 6 * m; \
  670. sf = m; \
  671. } else { \
  672. dst = 0; \
  673. }
  674. static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
  675. int n3)
  676. {
  677. SPLIT(slen[3], sf, n3)
  678. SPLIT(slen[2], sf, n2)
  679. SPLIT(slen[1], sf, n1)
  680. slen[0] = sf;
  681. }
  682. static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
  683. int16_t *exponents)
  684. {
  685. const uint8_t *bstab, *pretab;
  686. int len, i, j, k, l, v0, shift, gain, gains[3];
  687. int16_t *exp_ptr;
  688. exp_ptr = exponents;
  689. gain = g->global_gain - 210;
  690. shift = g->scalefac_scale + 1;
  691. bstab = band_size_long[s->sample_rate_index];
  692. pretab = mpa_pretab[g->preflag];
  693. for (i = 0; i < g->long_end; i++) {
  694. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  695. len = bstab[i];
  696. for (j = len; j > 0; j--)
  697. *exp_ptr++ = v0;
  698. }
  699. if (g->short_start < 13) {
  700. bstab = band_size_short[s->sample_rate_index];
  701. gains[0] = gain - (g->subblock_gain[0] << 3);
  702. gains[1] = gain - (g->subblock_gain[1] << 3);
  703. gains[2] = gain - (g->subblock_gain[2] << 3);
  704. k = g->long_end;
  705. for (i = g->short_start; i < 13; i++) {
  706. len = bstab[i];
  707. for (l = 0; l < 3; l++) {
  708. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  709. for (j = len; j > 0; j--)
  710. *exp_ptr++ = v0;
  711. }
  712. }
  713. }
  714. }
  715. /* handle n = 0 too */
  716. static inline int get_bitsz(GetBitContext *s, int n)
  717. {
  718. return n ? get_bits(s, n) : 0;
  719. }
  720. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
  721. int *end_pos2)
  722. {
  723. if (s->in_gb.buffer && *pos >= s->gb.size_in_bits) {
  724. s->gb = s->in_gb;
  725. s->in_gb.buffer = NULL;
  726. assert((get_bits_count(&s->gb) & 7) == 0);
  727. skip_bits_long(&s->gb, *pos - *end_pos);
  728. *end_pos2 =
  729. *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
  730. *pos = get_bits_count(&s->gb);
  731. }
  732. }
  733. /* Following is a optimized code for
  734. INTFLOAT v = *src
  735. if(get_bits1(&s->gb))
  736. v = -v;
  737. *dst = v;
  738. */
  739. #if CONFIG_FLOAT
  740. #define READ_FLIP_SIGN(dst,src) \
  741. v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
  742. AV_WN32A(dst, v);
  743. #else
  744. #define READ_FLIP_SIGN(dst,src) \
  745. v = -get_bits1(&s->gb); \
  746. *(dst) = (*(src) ^ v) - v;
  747. #endif
  748. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  749. int16_t *exponents, int end_pos2)
  750. {
  751. int s_index;
  752. int i;
  753. int last_pos, bits_left;
  754. VLC *vlc;
  755. int end_pos = FFMIN(end_pos2, s->gb.size_in_bits);
  756. /* low frequencies (called big values) */
  757. s_index = 0;
  758. for (i = 0; i < 3; i++) {
  759. int j, k, l, linbits;
  760. j = g->region_size[i];
  761. if (j == 0)
  762. continue;
  763. /* select vlc table */
  764. k = g->table_select[i];
  765. l = mpa_huff_data[k][0];
  766. linbits = mpa_huff_data[k][1];
  767. vlc = &huff_vlc[l];
  768. if (!l) {
  769. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
  770. s_index += 2 * j;
  771. continue;
  772. }
  773. /* read huffcode and compute each couple */
  774. for (; j > 0; j--) {
  775. int exponent, x, y;
  776. int v;
  777. int pos = get_bits_count(&s->gb);
  778. if (pos >= end_pos){
  779. switch_buffer(s, &pos, &end_pos, &end_pos2);
  780. if (pos >= end_pos)
  781. break;
  782. }
  783. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  784. if (!y) {
  785. g->sb_hybrid[s_index ] =
  786. g->sb_hybrid[s_index+1] = 0;
  787. s_index += 2;
  788. continue;
  789. }
  790. exponent= exponents[s_index];
  791. av_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  792. i, g->region_size[i] - j, x, y, exponent);
  793. if (y & 16) {
  794. x = y >> 5;
  795. y = y & 0x0f;
  796. if (x < 15) {
  797. READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
  798. } else {
  799. x += get_bitsz(&s->gb, linbits);
  800. v = l3_unscale(x, exponent);
  801. if (get_bits1(&s->gb))
  802. v = -v;
  803. g->sb_hybrid[s_index] = v;
  804. }
  805. if (y < 15) {
  806. READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
  807. } else {
  808. y += get_bitsz(&s->gb, linbits);
  809. v = l3_unscale(y, exponent);
  810. if (get_bits1(&s->gb))
  811. v = -v;
  812. g->sb_hybrid[s_index+1] = v;
  813. }
  814. } else {
  815. x = y >> 5;
  816. y = y & 0x0f;
  817. x += y;
  818. if (x < 15) {
  819. READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
  820. } else {
  821. x += get_bitsz(&s->gb, linbits);
  822. v = l3_unscale(x, exponent);
  823. if (get_bits1(&s->gb))
  824. v = -v;
  825. g->sb_hybrid[s_index+!!y] = v;
  826. }
  827. g->sb_hybrid[s_index + !y] = 0;
  828. }
  829. s_index += 2;
  830. }
  831. }
  832. /* high frequencies */
  833. vlc = &huff_quad_vlc[g->count1table_select];
  834. last_pos = 0;
  835. while (s_index <= 572) {
  836. int pos, code;
  837. pos = get_bits_count(&s->gb);
  838. if (pos >= end_pos) {
  839. if (pos > end_pos2 && last_pos) {
  840. /* some encoders generate an incorrect size for this
  841. part. We must go back into the data */
  842. s_index -= 4;
  843. skip_bits_long(&s->gb, last_pos - pos);
  844. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  845. if(s->err_recognition & AV_EF_BITSTREAM)
  846. s_index=0;
  847. break;
  848. }
  849. switch_buffer(s, &pos, &end_pos, &end_pos2);
  850. if (pos >= end_pos)
  851. break;
  852. }
  853. last_pos = pos;
  854. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  855. av_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  856. g->sb_hybrid[s_index+0] =
  857. g->sb_hybrid[s_index+1] =
  858. g->sb_hybrid[s_index+2] =
  859. g->sb_hybrid[s_index+3] = 0;
  860. while (code) {
  861. static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
  862. int v;
  863. int pos = s_index + idxtab[code];
  864. code ^= 8 >> idxtab[code];
  865. READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
  866. }
  867. s_index += 4;
  868. }
  869. /* skip extension bits */
  870. bits_left = end_pos2 - get_bits_count(&s->gb);
  871. if (bits_left < 0 && (s->err_recognition & AV_EF_BUFFER)) {
  872. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  873. s_index=0;
  874. } else if (bits_left > 0 && (s->err_recognition & AV_EF_BUFFER)) {
  875. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  876. s_index = 0;
  877. }
  878. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
  879. skip_bits_long(&s->gb, bits_left);
  880. i = get_bits_count(&s->gb);
  881. switch_buffer(s, &i, &end_pos, &end_pos2);
  882. return 0;
  883. }
  884. /* Reorder short blocks from bitstream order to interleaved order. It
  885. would be faster to do it in parsing, but the code would be far more
  886. complicated */
  887. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  888. {
  889. int i, j, len;
  890. INTFLOAT *ptr, *dst, *ptr1;
  891. INTFLOAT tmp[576];
  892. if (g->block_type != 2)
  893. return;
  894. if (g->switch_point) {
  895. if (s->sample_rate_index != 8)
  896. ptr = g->sb_hybrid + 36;
  897. else
  898. ptr = g->sb_hybrid + 72;
  899. } else {
  900. ptr = g->sb_hybrid;
  901. }
  902. for (i = g->short_start; i < 13; i++) {
  903. len = band_size_short[s->sample_rate_index][i];
  904. ptr1 = ptr;
  905. dst = tmp;
  906. for (j = len; j > 0; j--) {
  907. *dst++ = ptr[0*len];
  908. *dst++ = ptr[1*len];
  909. *dst++ = ptr[2*len];
  910. ptr++;
  911. }
  912. ptr += 2 * len;
  913. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  914. }
  915. }
  916. #define ISQRT2 FIXR(0.70710678118654752440)
  917. static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
  918. {
  919. int i, j, k, l;
  920. int sf_max, sf, len, non_zero_found;
  921. INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2;
  922. int non_zero_found_short[3];
  923. /* intensity stereo */
  924. if (s->mode_ext & MODE_EXT_I_STEREO) {
  925. if (!s->lsf) {
  926. is_tab = is_table;
  927. sf_max = 7;
  928. } else {
  929. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  930. sf_max = 16;
  931. }
  932. tab0 = g0->sb_hybrid + 576;
  933. tab1 = g1->sb_hybrid + 576;
  934. non_zero_found_short[0] = 0;
  935. non_zero_found_short[1] = 0;
  936. non_zero_found_short[2] = 0;
  937. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  938. for (i = 12; i >= g1->short_start; i--) {
  939. /* for last band, use previous scale factor */
  940. if (i != 11)
  941. k -= 3;
  942. len = band_size_short[s->sample_rate_index][i];
  943. for (l = 2; l >= 0; l--) {
  944. tab0 -= len;
  945. tab1 -= len;
  946. if (!non_zero_found_short[l]) {
  947. /* test if non zero band. if so, stop doing i-stereo */
  948. for (j = 0; j < len; j++) {
  949. if (tab1[j] != 0) {
  950. non_zero_found_short[l] = 1;
  951. goto found1;
  952. }
  953. }
  954. sf = g1->scale_factors[k + l];
  955. if (sf >= sf_max)
  956. goto found1;
  957. v1 = is_tab[0][sf];
  958. v2 = is_tab[1][sf];
  959. for (j = 0; j < len; j++) {
  960. tmp0 = tab0[j];
  961. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  962. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  963. }
  964. } else {
  965. found1:
  966. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  967. /* lower part of the spectrum : do ms stereo
  968. if enabled */
  969. for (j = 0; j < len; j++) {
  970. tmp0 = tab0[j];
  971. tmp1 = tab1[j];
  972. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  973. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  974. }
  975. }
  976. }
  977. }
  978. }
  979. non_zero_found = non_zero_found_short[0] |
  980. non_zero_found_short[1] |
  981. non_zero_found_short[2];
  982. for (i = g1->long_end - 1;i >= 0;i--) {
  983. len = band_size_long[s->sample_rate_index][i];
  984. tab0 -= len;
  985. tab1 -= len;
  986. /* test if non zero band. if so, stop doing i-stereo */
  987. if (!non_zero_found) {
  988. for (j = 0; j < len; j++) {
  989. if (tab1[j] != 0) {
  990. non_zero_found = 1;
  991. goto found2;
  992. }
  993. }
  994. /* for last band, use previous scale factor */
  995. k = (i == 21) ? 20 : i;
  996. sf = g1->scale_factors[k];
  997. if (sf >= sf_max)
  998. goto found2;
  999. v1 = is_tab[0][sf];
  1000. v2 = is_tab[1][sf];
  1001. for (j = 0; j < len; j++) {
  1002. tmp0 = tab0[j];
  1003. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1004. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1005. }
  1006. } else {
  1007. found2:
  1008. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1009. /* lower part of the spectrum : do ms stereo
  1010. if enabled */
  1011. for (j = 0; j < len; j++) {
  1012. tmp0 = tab0[j];
  1013. tmp1 = tab1[j];
  1014. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1015. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1016. }
  1017. }
  1018. }
  1019. }
  1020. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1021. /* ms stereo ONLY */
  1022. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1023. global gain */
  1024. #if CONFIG_FLOAT
  1025. s->fdsp.butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
  1026. #else
  1027. tab0 = g0->sb_hybrid;
  1028. tab1 = g1->sb_hybrid;
  1029. for (i = 0; i < 576; i++) {
  1030. tmp0 = tab0[i];
  1031. tmp1 = tab1[i];
  1032. tab0[i] = tmp0 + tmp1;
  1033. tab1[i] = tmp0 - tmp1;
  1034. }
  1035. #endif
  1036. }
  1037. }
  1038. #if CONFIG_FLOAT
  1039. #define AA(j) do { \
  1040. float tmp0 = ptr[-1-j]; \
  1041. float tmp1 = ptr[ j]; \
  1042. ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
  1043. ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
  1044. } while (0)
  1045. #else
  1046. #define AA(j) do { \
  1047. int tmp0 = ptr[-1-j]; \
  1048. int tmp1 = ptr[ j]; \
  1049. int tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
  1050. ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
  1051. ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
  1052. } while (0)
  1053. #endif
  1054. static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
  1055. {
  1056. INTFLOAT *ptr;
  1057. int n, i;
  1058. /* we antialias only "long" bands */
  1059. if (g->block_type == 2) {
  1060. if (!g->switch_point)
  1061. return;
  1062. /* XXX: check this for 8000Hz case */
  1063. n = 1;
  1064. } else {
  1065. n = SBLIMIT - 1;
  1066. }
  1067. ptr = g->sb_hybrid + 18;
  1068. for (i = n; i > 0; i--) {
  1069. AA(0);
  1070. AA(1);
  1071. AA(2);
  1072. AA(3);
  1073. AA(4);
  1074. AA(5);
  1075. AA(6);
  1076. AA(7);
  1077. ptr += 18;
  1078. }
  1079. }
  1080. static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
  1081. INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
  1082. {
  1083. INTFLOAT *win, *out_ptr, *ptr, *buf, *ptr1;
  1084. INTFLOAT out2[12];
  1085. int i, j, mdct_long_end, sblimit;
  1086. /* find last non zero block */
  1087. ptr = g->sb_hybrid + 576;
  1088. ptr1 = g->sb_hybrid + 2 * 18;
  1089. while (ptr >= ptr1) {
  1090. int32_t *p;
  1091. ptr -= 6;
  1092. p = (int32_t*)ptr;
  1093. if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1094. break;
  1095. }
  1096. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1097. if (g->block_type == 2) {
  1098. /* XXX: check for 8000 Hz */
  1099. if (g->switch_point)
  1100. mdct_long_end = 2;
  1101. else
  1102. mdct_long_end = 0;
  1103. } else {
  1104. mdct_long_end = sblimit;
  1105. }
  1106. s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf, g->sb_hybrid,
  1107. mdct_long_end, g->switch_point,
  1108. g->block_type);
  1109. buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
  1110. ptr = g->sb_hybrid + 18 * mdct_long_end;
  1111. for (j = mdct_long_end; j < sblimit; j++) {
  1112. /* select frequency inversion */
  1113. win = RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
  1114. out_ptr = sb_samples + j;
  1115. for (i = 0; i < 6; i++) {
  1116. *out_ptr = buf[4*i];
  1117. out_ptr += SBLIMIT;
  1118. }
  1119. imdct12(out2, ptr + 0);
  1120. for (i = 0; i < 6; i++) {
  1121. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*1)];
  1122. buf[4*(i + 6*2)] = MULH3(out2[i + 6], win[i + 6], 1);
  1123. out_ptr += SBLIMIT;
  1124. }
  1125. imdct12(out2, ptr + 1);
  1126. for (i = 0; i < 6; i++) {
  1127. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*2)];
  1128. buf[4*(i + 6*0)] = MULH3(out2[i + 6], win[i + 6], 1);
  1129. out_ptr += SBLIMIT;
  1130. }
  1131. imdct12(out2, ptr + 2);
  1132. for (i = 0; i < 6; i++) {
  1133. buf[4*(i + 6*0)] = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*0)];
  1134. buf[4*(i + 6*1)] = MULH3(out2[i + 6], win[i + 6], 1);
  1135. buf[4*(i + 6*2)] = 0;
  1136. }
  1137. ptr += 18;
  1138. buf += (j&3) != 3 ? 1 : (4*18-3);
  1139. }
  1140. /* zero bands */
  1141. for (j = sblimit; j < SBLIMIT; j++) {
  1142. /* overlap */
  1143. out_ptr = sb_samples + j;
  1144. for (i = 0; i < 18; i++) {
  1145. *out_ptr = buf[4*i];
  1146. buf[4*i] = 0;
  1147. out_ptr += SBLIMIT;
  1148. }
  1149. buf += (j&3) != 3 ? 1 : (4*18-3);
  1150. }
  1151. }
  1152. /* main layer3 decoding function */
  1153. static int mp_decode_layer3(MPADecodeContext *s)
  1154. {
  1155. int nb_granules, main_data_begin;
  1156. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1157. GranuleDef *g;
  1158. int16_t exponents[576]; //FIXME try INTFLOAT
  1159. /* read side info */
  1160. if (s->lsf) {
  1161. main_data_begin = get_bits(&s->gb, 8);
  1162. skip_bits(&s->gb, s->nb_channels);
  1163. nb_granules = 1;
  1164. } else {
  1165. main_data_begin = get_bits(&s->gb, 9);
  1166. if (s->nb_channels == 2)
  1167. skip_bits(&s->gb, 3);
  1168. else
  1169. skip_bits(&s->gb, 5);
  1170. nb_granules = 2;
  1171. for (ch = 0; ch < s->nb_channels; ch++) {
  1172. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1173. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1174. }
  1175. }
  1176. for (gr = 0; gr < nb_granules; gr++) {
  1177. for (ch = 0; ch < s->nb_channels; ch++) {
  1178. av_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1179. g = &s->granules[ch][gr];
  1180. g->part2_3_length = get_bits(&s->gb, 12);
  1181. g->big_values = get_bits(&s->gb, 9);
  1182. if (g->big_values > 288) {
  1183. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1184. return AVERROR_INVALIDDATA;
  1185. }
  1186. g->global_gain = get_bits(&s->gb, 8);
  1187. /* if MS stereo only is selected, we precompute the
  1188. 1/sqrt(2) renormalization factor */
  1189. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1190. MODE_EXT_MS_STEREO)
  1191. g->global_gain -= 2;
  1192. if (s->lsf)
  1193. g->scalefac_compress = get_bits(&s->gb, 9);
  1194. else
  1195. g->scalefac_compress = get_bits(&s->gb, 4);
  1196. blocksplit_flag = get_bits1(&s->gb);
  1197. if (blocksplit_flag) {
  1198. g->block_type = get_bits(&s->gb, 2);
  1199. if (g->block_type == 0) {
  1200. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1201. return AVERROR_INVALIDDATA;
  1202. }
  1203. g->switch_point = get_bits1(&s->gb);
  1204. for (i = 0; i < 2; i++)
  1205. g->table_select[i] = get_bits(&s->gb, 5);
  1206. for (i = 0; i < 3; i++)
  1207. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1208. init_short_region(s, g);
  1209. } else {
  1210. int region_address1, region_address2;
  1211. g->block_type = 0;
  1212. g->switch_point = 0;
  1213. for (i = 0; i < 3; i++)
  1214. g->table_select[i] = get_bits(&s->gb, 5);
  1215. /* compute huffman coded region sizes */
  1216. region_address1 = get_bits(&s->gb, 4);
  1217. region_address2 = get_bits(&s->gb, 3);
  1218. av_dlog(s->avctx, "region1=%d region2=%d\n",
  1219. region_address1, region_address2);
  1220. init_long_region(s, g, region_address1, region_address2);
  1221. }
  1222. region_offset2size(g);
  1223. compute_band_indexes(s, g);
  1224. g->preflag = 0;
  1225. if (!s->lsf)
  1226. g->preflag = get_bits1(&s->gb);
  1227. g->scalefac_scale = get_bits1(&s->gb);
  1228. g->count1table_select = get_bits1(&s->gb);
  1229. av_dlog(s->avctx, "block_type=%d switch_point=%d\n",
  1230. g->block_type, g->switch_point);
  1231. }
  1232. }
  1233. if (!s->adu_mode) {
  1234. int skip;
  1235. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1236. int extrasize = av_clip(get_bits_left(&s->gb) >> 3, 0,
  1237. FFMAX(0, LAST_BUF_SIZE - s->last_buf_size));
  1238. assert((get_bits_count(&s->gb) & 7) == 0);
  1239. /* now we get bits from the main_data_begin offset */
  1240. av_dlog(s->avctx, "seekback:%d, lastbuf:%d\n",
  1241. main_data_begin, s->last_buf_size);
  1242. memcpy(s->last_buf + s->last_buf_size, ptr, extrasize);
  1243. s->in_gb = s->gb;
  1244. init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
  1245. #if !UNCHECKED_BITSTREAM_READER
  1246. s->gb.size_in_bits_plus8 += extrasize * 8;
  1247. #endif
  1248. s->last_buf_size <<= 3;
  1249. for (gr = 0; gr < nb_granules && (s->last_buf_size >> 3) < main_data_begin; gr++) {
  1250. for (ch = 0; ch < s->nb_channels; ch++) {
  1251. g = &s->granules[ch][gr];
  1252. s->last_buf_size += g->part2_3_length;
  1253. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1254. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1255. }
  1256. }
  1257. skip = s->last_buf_size - 8 * main_data_begin;
  1258. if (skip >= s->gb.size_in_bits && s->in_gb.buffer) {
  1259. skip_bits_long(&s->in_gb, skip - s->gb.size_in_bits);
  1260. s->gb = s->in_gb;
  1261. s->in_gb.buffer = NULL;
  1262. } else {
  1263. skip_bits_long(&s->gb, skip);
  1264. }
  1265. } else {
  1266. gr = 0;
  1267. }
  1268. for (; gr < nb_granules; gr++) {
  1269. for (ch = 0; ch < s->nb_channels; ch++) {
  1270. g = &s->granules[ch][gr];
  1271. bits_pos = get_bits_count(&s->gb);
  1272. if (!s->lsf) {
  1273. uint8_t *sc;
  1274. int slen, slen1, slen2;
  1275. /* MPEG1 scale factors */
  1276. slen1 = slen_table[0][g->scalefac_compress];
  1277. slen2 = slen_table[1][g->scalefac_compress];
  1278. av_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1279. if (g->block_type == 2) {
  1280. n = g->switch_point ? 17 : 18;
  1281. j = 0;
  1282. if (slen1) {
  1283. for (i = 0; i < n; i++)
  1284. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1285. } else {
  1286. for (i = 0; i < n; i++)
  1287. g->scale_factors[j++] = 0;
  1288. }
  1289. if (slen2) {
  1290. for (i = 0; i < 18; i++)
  1291. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1292. for (i = 0; i < 3; i++)
  1293. g->scale_factors[j++] = 0;
  1294. } else {
  1295. for (i = 0; i < 21; i++)
  1296. g->scale_factors[j++] = 0;
  1297. }
  1298. } else {
  1299. sc = s->granules[ch][0].scale_factors;
  1300. j = 0;
  1301. for (k = 0; k < 4; k++) {
  1302. n = k == 0 ? 6 : 5;
  1303. if ((g->scfsi & (0x8 >> k)) == 0) {
  1304. slen = (k < 2) ? slen1 : slen2;
  1305. if (slen) {
  1306. for (i = 0; i < n; i++)
  1307. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1308. } else {
  1309. for (i = 0; i < n; i++)
  1310. g->scale_factors[j++] = 0;
  1311. }
  1312. } else {
  1313. /* simply copy from last granule */
  1314. for (i = 0; i < n; i++) {
  1315. g->scale_factors[j] = sc[j];
  1316. j++;
  1317. }
  1318. }
  1319. }
  1320. g->scale_factors[j++] = 0;
  1321. }
  1322. } else {
  1323. int tindex, tindex2, slen[4], sl, sf;
  1324. /* LSF scale factors */
  1325. if (g->block_type == 2)
  1326. tindex = g->switch_point ? 2 : 1;
  1327. else
  1328. tindex = 0;
  1329. sf = g->scalefac_compress;
  1330. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1331. /* intensity stereo case */
  1332. sf >>= 1;
  1333. if (sf < 180) {
  1334. lsf_sf_expand(slen, sf, 6, 6, 0);
  1335. tindex2 = 3;
  1336. } else if (sf < 244) {
  1337. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1338. tindex2 = 4;
  1339. } else {
  1340. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1341. tindex2 = 5;
  1342. }
  1343. } else {
  1344. /* normal case */
  1345. if (sf < 400) {
  1346. lsf_sf_expand(slen, sf, 5, 4, 4);
  1347. tindex2 = 0;
  1348. } else if (sf < 500) {
  1349. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1350. tindex2 = 1;
  1351. } else {
  1352. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1353. tindex2 = 2;
  1354. g->preflag = 1;
  1355. }
  1356. }
  1357. j = 0;
  1358. for (k = 0; k < 4; k++) {
  1359. n = lsf_nsf_table[tindex2][tindex][k];
  1360. sl = slen[k];
  1361. if (sl) {
  1362. for (i = 0; i < n; i++)
  1363. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1364. } else {
  1365. for (i = 0; i < n; i++)
  1366. g->scale_factors[j++] = 0;
  1367. }
  1368. }
  1369. /* XXX: should compute exact size */
  1370. for (; j < 40; j++)
  1371. g->scale_factors[j] = 0;
  1372. }
  1373. exponents_from_scale_factors(s, g, exponents);
  1374. /* read Huffman coded residue */
  1375. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1376. } /* ch */
  1377. if (s->mode == MPA_JSTEREO)
  1378. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1379. for (ch = 0; ch < s->nb_channels; ch++) {
  1380. g = &s->granules[ch][gr];
  1381. reorder_block(s, g);
  1382. compute_antialias(s, g);
  1383. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1384. }
  1385. } /* gr */
  1386. if (get_bits_count(&s->gb) < 0)
  1387. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1388. return nb_granules * 18;
  1389. }
  1390. static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
  1391. const uint8_t *buf, int buf_size)
  1392. {
  1393. int i, nb_frames, ch, ret;
  1394. OUT_INT *samples_ptr;
  1395. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
  1396. /* skip error protection field */
  1397. if (s->error_protection)
  1398. skip_bits(&s->gb, 16);
  1399. switch(s->layer) {
  1400. case 1:
  1401. s->avctx->frame_size = 384;
  1402. nb_frames = mp_decode_layer1(s);
  1403. break;
  1404. case 2:
  1405. s->avctx->frame_size = 1152;
  1406. nb_frames = mp_decode_layer2(s);
  1407. break;
  1408. case 3:
  1409. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1410. default:
  1411. nb_frames = mp_decode_layer3(s);
  1412. if (nb_frames < 0)
  1413. return nb_frames;
  1414. s->last_buf_size=0;
  1415. if (s->in_gb.buffer) {
  1416. align_get_bits(&s->gb);
  1417. i = get_bits_left(&s->gb)>>3;
  1418. if (i >= 0 && i <= BACKSTEP_SIZE) {
  1419. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  1420. s->last_buf_size=i;
  1421. } else
  1422. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  1423. s->gb = s->in_gb;
  1424. s->in_gb.buffer = NULL;
  1425. }
  1426. align_get_bits(&s->gb);
  1427. assert((get_bits_count(&s->gb) & 7) == 0);
  1428. i = get_bits_left(&s->gb) >> 3;
  1429. if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
  1430. if (i < 0)
  1431. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  1432. i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  1433. }
  1434. assert(i <= buf_size - HEADER_SIZE && i >= 0);
  1435. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  1436. s->last_buf_size += i;
  1437. }
  1438. /* get output buffer */
  1439. if (!samples) {
  1440. av_assert0(s->frame != NULL);
  1441. s->frame->nb_samples = s->avctx->frame_size;
  1442. if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0) {
  1443. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  1444. return ret;
  1445. }
  1446. samples = (OUT_INT **)s->frame->extended_data;
  1447. }
  1448. /* apply the synthesis filter */
  1449. for (ch = 0; ch < s->nb_channels; ch++) {
  1450. int sample_stride;
  1451. if (s->avctx->sample_fmt == OUT_FMT_P) {
  1452. samples_ptr = samples[ch];
  1453. sample_stride = 1;
  1454. } else {
  1455. samples_ptr = samples[0] + ch;
  1456. sample_stride = s->nb_channels;
  1457. }
  1458. for (i = 0; i < nb_frames; i++) {
  1459. RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch],
  1460. &(s->synth_buf_offset[ch]),
  1461. RENAME(ff_mpa_synth_window),
  1462. &s->dither_state, samples_ptr,
  1463. sample_stride, s->sb_samples[ch][i]);
  1464. samples_ptr += 32 * sample_stride;
  1465. }
  1466. }
  1467. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  1468. }
  1469. static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
  1470. AVPacket *avpkt)
  1471. {
  1472. const uint8_t *buf = avpkt->data;
  1473. int buf_size = avpkt->size;
  1474. MPADecodeContext *s = avctx->priv_data;
  1475. uint32_t header;
  1476. int ret;
  1477. if (buf_size < HEADER_SIZE)
  1478. return AVERROR_INVALIDDATA;
  1479. header = AV_RB32(buf);
  1480. if (ff_mpa_check_header(header) < 0) {
  1481. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  1482. return AVERROR_INVALIDDATA;
  1483. }
  1484. if (avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) {
  1485. /* free format: prepare to compute frame size */
  1486. s->frame_size = -1;
  1487. return AVERROR_INVALIDDATA;
  1488. }
  1489. /* update codec info */
  1490. avctx->channels = s->nb_channels;
  1491. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1492. if (!avctx->bit_rate)
  1493. avctx->bit_rate = s->bit_rate;
  1494. if (s->frame_size <= 0 || s->frame_size > buf_size) {
  1495. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  1496. return AVERROR_INVALIDDATA;
  1497. } else if (s->frame_size < buf_size) {
  1498. buf_size= s->frame_size;
  1499. }
  1500. s->frame = data;
  1501. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1502. if (ret >= 0) {
  1503. s->frame->nb_samples = avctx->frame_size;
  1504. *got_frame_ptr = 1;
  1505. avctx->sample_rate = s->sample_rate;
  1506. //FIXME maybe move the other codec info stuff from above here too
  1507. } else {
  1508. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1509. /* Only return an error if the bad frame makes up the whole packet or
  1510. * the error is related to buffer management.
  1511. * If there is more data in the packet, just consume the bad frame
  1512. * instead of returning an error, which would discard the whole
  1513. * packet. */
  1514. *got_frame_ptr = 0;
  1515. if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
  1516. return ret;
  1517. }
  1518. s->frame_size = 0;
  1519. return buf_size;
  1520. }
  1521. static void mp_flush(MPADecodeContext *ctx)
  1522. {
  1523. memset(ctx->synth_buf, 0, sizeof(ctx->synth_buf));
  1524. ctx->last_buf_size = 0;
  1525. }
  1526. static void flush(AVCodecContext *avctx)
  1527. {
  1528. mp_flush(avctx->priv_data);
  1529. }
  1530. #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
  1531. static int decode_frame_adu(AVCodecContext *avctx, void *data,
  1532. int *got_frame_ptr, AVPacket *avpkt)
  1533. {
  1534. const uint8_t *buf = avpkt->data;
  1535. int buf_size = avpkt->size;
  1536. MPADecodeContext *s = avctx->priv_data;
  1537. uint32_t header;
  1538. int len, ret;
  1539. len = buf_size;
  1540. // Discard too short frames
  1541. if (buf_size < HEADER_SIZE) {
  1542. av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
  1543. return AVERROR_INVALIDDATA;
  1544. }
  1545. if (len > MPA_MAX_CODED_FRAME_SIZE)
  1546. len = MPA_MAX_CODED_FRAME_SIZE;
  1547. // Get header and restore sync word
  1548. header = AV_RB32(buf) | 0xffe00000;
  1549. if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
  1550. av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
  1551. return AVERROR_INVALIDDATA;
  1552. }
  1553. avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1554. /* update codec info */
  1555. avctx->sample_rate = s->sample_rate;
  1556. avctx->channels = s->nb_channels;
  1557. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1558. if (!avctx->bit_rate)
  1559. avctx->bit_rate = s->bit_rate;
  1560. s->frame_size = len;
  1561. s->frame = data;
  1562. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1563. if (ret < 0) {
  1564. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1565. return ret;
  1566. }
  1567. *got_frame_ptr = 1;
  1568. return buf_size;
  1569. }
  1570. #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
  1571. #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
  1572. /**
  1573. * Context for MP3On4 decoder
  1574. */
  1575. typedef struct MP3On4DecodeContext {
  1576. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  1577. int syncword; ///< syncword patch
  1578. const uint8_t *coff; ///< channel offsets in output buffer
  1579. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  1580. } MP3On4DecodeContext;
  1581. #include "mpeg4audio.h"
  1582. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  1583. /* number of mp3 decoder instances */
  1584. static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
  1585. /* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
  1586. static const uint8_t chan_offset[8][5] = {
  1587. { 0 },
  1588. { 0 }, // C
  1589. { 0 }, // FLR
  1590. { 2, 0 }, // C FLR
  1591. { 2, 0, 3 }, // C FLR BS
  1592. { 2, 0, 3 }, // C FLR BLRS
  1593. { 2, 0, 4, 3 }, // C FLR BLRS LFE
  1594. { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
  1595. };
  1596. /* mp3on4 channel layouts */
  1597. static const int16_t chan_layout[8] = {
  1598. 0,
  1599. AV_CH_LAYOUT_MONO,
  1600. AV_CH_LAYOUT_STEREO,
  1601. AV_CH_LAYOUT_SURROUND,
  1602. AV_CH_LAYOUT_4POINT0,
  1603. AV_CH_LAYOUT_5POINT0,
  1604. AV_CH_LAYOUT_5POINT1,
  1605. AV_CH_LAYOUT_7POINT1
  1606. };
  1607. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  1608. {
  1609. MP3On4DecodeContext *s = avctx->priv_data;
  1610. int i;
  1611. for (i = 0; i < s->frames; i++)
  1612. av_free(s->mp3decctx[i]);
  1613. return 0;
  1614. }
  1615. static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
  1616. {
  1617. MP3On4DecodeContext *s = avctx->priv_data;
  1618. MPEG4AudioConfig cfg;
  1619. int i;
  1620. if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
  1621. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  1622. return AVERROR_INVALIDDATA;
  1623. }
  1624. avpriv_mpeg4audio_get_config(&cfg, avctx->extradata,
  1625. avctx->extradata_size * 8, 1);
  1626. if (!cfg.chan_config || cfg.chan_config > 7) {
  1627. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  1628. return AVERROR_INVALIDDATA;
  1629. }
  1630. s->frames = mp3Frames[cfg.chan_config];
  1631. s->coff = chan_offset[cfg.chan_config];
  1632. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  1633. avctx->channel_layout = chan_layout[cfg.chan_config];
  1634. if (cfg.sample_rate < 16000)
  1635. s->syncword = 0xffe00000;
  1636. else
  1637. s->syncword = 0xfff00000;
  1638. /* Init the first mp3 decoder in standard way, so that all tables get builded
  1639. * We replace avctx->priv_data with the context of the first decoder so that
  1640. * decode_init() does not have to be changed.
  1641. * Other decoders will be initialized here copying data from the first context
  1642. */
  1643. // Allocate zeroed memory for the first decoder context
  1644. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  1645. if (!s->mp3decctx[0])
  1646. goto alloc_fail;
  1647. // Put decoder context in place to make init_decode() happy
  1648. avctx->priv_data = s->mp3decctx[0];
  1649. decode_init(avctx);
  1650. // Restore mp3on4 context pointer
  1651. avctx->priv_data = s;
  1652. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  1653. /* Create a separate codec/context for each frame (first is already ok).
  1654. * Each frame is 1 or 2 channels - up to 5 frames allowed
  1655. */
  1656. for (i = 1; i < s->frames; i++) {
  1657. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  1658. if (!s->mp3decctx[i])
  1659. goto alloc_fail;
  1660. s->mp3decctx[i]->adu_mode = 1;
  1661. s->mp3decctx[i]->avctx = avctx;
  1662. s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
  1663. }
  1664. return 0;
  1665. alloc_fail:
  1666. decode_close_mp3on4(avctx);
  1667. return AVERROR(ENOMEM);
  1668. }
  1669. static void flush_mp3on4(AVCodecContext *avctx)
  1670. {
  1671. int i;
  1672. MP3On4DecodeContext *s = avctx->priv_data;
  1673. for (i = 0; i < s->frames; i++)
  1674. mp_flush(s->mp3decctx[i]);
  1675. }
  1676. static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
  1677. int *got_frame_ptr, AVPacket *avpkt)
  1678. {
  1679. AVFrame *frame = data;
  1680. const uint8_t *buf = avpkt->data;
  1681. int buf_size = avpkt->size;
  1682. MP3On4DecodeContext *s = avctx->priv_data;
  1683. MPADecodeContext *m;
  1684. int fsize, len = buf_size, out_size = 0;
  1685. uint32_t header;
  1686. OUT_INT **out_samples;
  1687. OUT_INT *outptr[2];
  1688. int fr, ch, ret;
  1689. /* get output buffer */
  1690. frame->nb_samples = MPA_FRAME_SIZE;
  1691. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
  1692. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  1693. return ret;
  1694. }
  1695. out_samples = (OUT_INT **)frame->extended_data;
  1696. // Discard too short frames
  1697. if (buf_size < HEADER_SIZE)
  1698. return AVERROR_INVALIDDATA;
  1699. avctx->bit_rate = 0;
  1700. ch = 0;
  1701. for (fr = 0; fr < s->frames; fr++) {
  1702. fsize = AV_RB16(buf) >> 4;
  1703. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  1704. m = s->mp3decctx[fr];
  1705. assert(m != NULL);
  1706. if (fsize < HEADER_SIZE) {
  1707. av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
  1708. return AVERROR_INVALIDDATA;
  1709. }
  1710. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  1711. if (ff_mpa_check_header(header) < 0) // Bad header, discard block
  1712. break;
  1713. avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  1714. if (ch + m->nb_channels > avctx->channels ||
  1715. s->coff[fr] + m->nb_channels > avctx->channels) {
  1716. av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
  1717. "channel count\n");
  1718. return AVERROR_INVALIDDATA;
  1719. }
  1720. ch += m->nb_channels;
  1721. outptr[0] = out_samples[s->coff[fr]];
  1722. if (m->nb_channels > 1)
  1723. outptr[1] = out_samples[s->coff[fr] + 1];
  1724. if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0)
  1725. return ret;
  1726. out_size += ret;
  1727. buf += fsize;
  1728. len -= fsize;
  1729. avctx->bit_rate += m->bit_rate;
  1730. }
  1731. /* update codec info */
  1732. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  1733. frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
  1734. *got_frame_ptr = 1;
  1735. return buf_size;
  1736. }
  1737. #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */