You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2298 lines
71KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder.
  24. */
  25. #include "libavutil/audioconvert.h"
  26. #include "avcodec.h"
  27. #include "get_bits.h"
  28. #include "dsputil.h"
  29. #include "mathops.h"
  30. /*
  31. * TODO:
  32. * - test lsf / mpeg25 extensively.
  33. */
  34. #include "mpegaudio.h"
  35. #include "mpegaudiodecheader.h"
  36. #if CONFIG_FLOAT
  37. # define SHR(a,b) ((a)*(1.0f/(1<<(b))))
  38. # define compute_antialias compute_antialias_float
  39. # define FIXR_OLD(a) ((int)((a) * FRAC_ONE + 0.5))
  40. # define FIXR(x) ((float)(x))
  41. # define FIXHR(x) ((float)(x))
  42. # define MULH3(x, y, s) ((s)*(y)*(x))
  43. # define MULLx(x, y, s) ((y)*(x))
  44. # define RENAME(a) a ## _float
  45. #else
  46. # define SHR(a,b) ((a)>>(b))
  47. # define compute_antialias compute_antialias_integer
  48. /* WARNING: only correct for posititive numbers */
  49. # define FIXR_OLD(a) ((int)((a) * FRAC_ONE + 0.5))
  50. # define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
  51. # define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
  52. # define MULH3(x, y, s) MULH((s)*(x), y)
  53. # define MULLx(x, y, s) MULL(x,y,s)
  54. # define RENAME(a) a
  55. #endif
  56. /****************/
  57. #define HEADER_SIZE 4
  58. #include "mpegaudiodata.h"
  59. #include "mpegaudiodectab.h"
  60. #if CONFIG_FLOAT
  61. # include "fft.h"
  62. #else
  63. # include "dct32.c"
  64. #endif
  65. static void compute_antialias(MPADecodeContext *s, GranuleDef *g);
  66. static void apply_window_mp3_c(MPA_INT *synth_buf, MPA_INT *window,
  67. int *dither_state, OUT_INT *samples, int incr);
  68. /* vlc structure for decoding layer 3 huffman tables */
  69. static VLC huff_vlc[16];
  70. static VLC_TYPE huff_vlc_tables[
  71. 0+128+128+128+130+128+154+166+
  72. 142+204+190+170+542+460+662+414
  73. ][2];
  74. static const int huff_vlc_tables_sizes[16] = {
  75. 0, 128, 128, 128, 130, 128, 154, 166,
  76. 142, 204, 190, 170, 542, 460, 662, 414
  77. };
  78. static VLC huff_quad_vlc[2];
  79. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  80. static const int huff_quad_vlc_tables_sizes[2] = {
  81. 128, 16
  82. };
  83. /* computed from band_size_long */
  84. static uint16_t band_index_long[9][23];
  85. #include "mpegaudio_tablegen.h"
  86. /* intensity stereo coef table */
  87. static INTFLOAT is_table[2][16];
  88. static INTFLOAT is_table_lsf[2][2][16];
  89. static int32_t csa_table[8][4];
  90. static float csa_table_float[8][4];
  91. static INTFLOAT mdct_win[8][36];
  92. static int16_t division_tab3[1<<6 ];
  93. static int16_t division_tab5[1<<8 ];
  94. static int16_t division_tab9[1<<11];
  95. static int16_t * const division_tabs[4] = {
  96. division_tab3, division_tab5, NULL, division_tab9
  97. };
  98. /* lower 2 bits: modulo 3, higher bits: shift */
  99. static uint16_t scale_factor_modshift[64];
  100. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  101. static int32_t scale_factor_mult[15][3];
  102. /* mult table for layer 2 group quantization */
  103. #define SCALE_GEN(v) \
  104. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  105. static const int32_t scale_factor_mult2[3][3] = {
  106. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  107. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  108. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  109. };
  110. DECLARE_ALIGNED(16, MPA_INT, RENAME(ff_mpa_synth_window))[512+256];
  111. /**
  112. * Convert region offsets to region sizes and truncate
  113. * size to big_values.
  114. */
  115. static void ff_region_offset2size(GranuleDef *g){
  116. int i, k, j=0;
  117. g->region_size[2] = (576 / 2);
  118. for(i=0;i<3;i++) {
  119. k = FFMIN(g->region_size[i], g->big_values);
  120. g->region_size[i] = k - j;
  121. j = k;
  122. }
  123. }
  124. static void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){
  125. if (g->block_type == 2)
  126. g->region_size[0] = (36 / 2);
  127. else {
  128. if (s->sample_rate_index <= 2)
  129. g->region_size[0] = (36 / 2);
  130. else if (s->sample_rate_index != 8)
  131. g->region_size[0] = (54 / 2);
  132. else
  133. g->region_size[0] = (108 / 2);
  134. }
  135. g->region_size[1] = (576 / 2);
  136. }
  137. static void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){
  138. int l;
  139. g->region_size[0] =
  140. band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  141. /* should not overflow */
  142. l = FFMIN(ra1 + ra2 + 2, 22);
  143. g->region_size[1] =
  144. band_index_long[s->sample_rate_index][l] >> 1;
  145. }
  146. static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
  147. if (g->block_type == 2) {
  148. if (g->switch_point) {
  149. /* if switched mode, we handle the 36 first samples as
  150. long blocks. For 8000Hz, we handle the 48 first
  151. exponents as long blocks (XXX: check this!) */
  152. if (s->sample_rate_index <= 2)
  153. g->long_end = 8;
  154. else if (s->sample_rate_index != 8)
  155. g->long_end = 6;
  156. else
  157. g->long_end = 4; /* 8000 Hz */
  158. g->short_start = 2 + (s->sample_rate_index != 8);
  159. } else {
  160. g->long_end = 0;
  161. g->short_start = 0;
  162. }
  163. } else {
  164. g->short_start = 13;
  165. g->long_end = 22;
  166. }
  167. }
  168. /* layer 1 unscaling */
  169. /* n = number of bits of the mantissa minus 1 */
  170. static inline int l1_unscale(int n, int mant, int scale_factor)
  171. {
  172. int shift, mod;
  173. int64_t val;
  174. shift = scale_factor_modshift[scale_factor];
  175. mod = shift & 3;
  176. shift >>= 2;
  177. val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
  178. shift += n;
  179. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  180. return (int)((val + (1LL << (shift - 1))) >> shift);
  181. }
  182. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  183. {
  184. int shift, mod, val;
  185. shift = scale_factor_modshift[scale_factor];
  186. mod = shift & 3;
  187. shift >>= 2;
  188. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  189. /* NOTE: at this point, 0 <= shift <= 21 */
  190. if (shift > 0)
  191. val = (val + (1 << (shift - 1))) >> shift;
  192. return val;
  193. }
  194. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  195. static inline int l3_unscale(int value, int exponent)
  196. {
  197. unsigned int m;
  198. int e;
  199. e = table_4_3_exp [4*value + (exponent&3)];
  200. m = table_4_3_value[4*value + (exponent&3)];
  201. e -= (exponent >> 2);
  202. assert(e>=1);
  203. if (e > 31)
  204. return 0;
  205. m = (m + (1 << (e-1))) >> e;
  206. return m;
  207. }
  208. /* all integer n^(4/3) computation code */
  209. #define DEV_ORDER 13
  210. #define POW_FRAC_BITS 24
  211. #define POW_FRAC_ONE (1 << POW_FRAC_BITS)
  212. #define POW_FIX(a) ((int)((a) * POW_FRAC_ONE))
  213. #define POW_MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> POW_FRAC_BITS)
  214. static int dev_4_3_coefs[DEV_ORDER];
  215. static av_cold void int_pow_init(void)
  216. {
  217. int i, a;
  218. a = POW_FIX(1.0);
  219. for(i=0;i<DEV_ORDER;i++) {
  220. a = POW_MULL(a, POW_FIX(4.0 / 3.0) - i * POW_FIX(1.0)) / (i + 1);
  221. dev_4_3_coefs[i] = a;
  222. }
  223. }
  224. static av_cold int decode_init(AVCodecContext * avctx)
  225. {
  226. MPADecodeContext *s = avctx->priv_data;
  227. static int init=0;
  228. int i, j, k;
  229. s->avctx = avctx;
  230. s->apply_window_mp3 = apply_window_mp3_c;
  231. #if HAVE_MMX && CONFIG_FLOAT
  232. ff_mpegaudiodec_init_mmx(s);
  233. #endif
  234. #if CONFIG_FLOAT
  235. ff_dct_init(&s->dct, 5, DCT_II);
  236. #endif
  237. if (HAVE_ALTIVEC && CONFIG_FLOAT) ff_mpegaudiodec_init_altivec(s);
  238. avctx->sample_fmt= OUT_FMT;
  239. s->error_recognition= avctx->error_recognition;
  240. if (!init && !avctx->parse_only) {
  241. int offset;
  242. /* scale factors table for layer 1/2 */
  243. for(i=0;i<64;i++) {
  244. int shift, mod;
  245. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  246. shift = (i / 3);
  247. mod = i % 3;
  248. scale_factor_modshift[i] = mod | (shift << 2);
  249. }
  250. /* scale factor multiply for layer 1 */
  251. for(i=0;i<15;i++) {
  252. int n, norm;
  253. n = i + 2;
  254. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  255. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  256. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  257. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  258. av_dlog(avctx, "%d: norm=%x s=%x %x %x\n",
  259. i, norm,
  260. scale_factor_mult[i][0],
  261. scale_factor_mult[i][1],
  262. scale_factor_mult[i][2]);
  263. }
  264. RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window));
  265. /* huffman decode tables */
  266. offset = 0;
  267. for(i=1;i<16;i++) {
  268. const HuffTable *h = &mpa_huff_tables[i];
  269. int xsize, x, y;
  270. uint8_t tmp_bits [512];
  271. uint16_t tmp_codes[512];
  272. memset(tmp_bits , 0, sizeof(tmp_bits ));
  273. memset(tmp_codes, 0, sizeof(tmp_codes));
  274. xsize = h->xsize;
  275. j = 0;
  276. for(x=0;x<xsize;x++) {
  277. for(y=0;y<xsize;y++){
  278. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  279. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  280. }
  281. }
  282. /* XXX: fail test */
  283. huff_vlc[i].table = huff_vlc_tables+offset;
  284. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  285. init_vlc(&huff_vlc[i], 7, 512,
  286. tmp_bits, 1, 1, tmp_codes, 2, 2,
  287. INIT_VLC_USE_NEW_STATIC);
  288. offset += huff_vlc_tables_sizes[i];
  289. }
  290. assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  291. offset = 0;
  292. for(i=0;i<2;i++) {
  293. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  294. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  295. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  296. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  297. INIT_VLC_USE_NEW_STATIC);
  298. offset += huff_quad_vlc_tables_sizes[i];
  299. }
  300. assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  301. for(i=0;i<9;i++) {
  302. k = 0;
  303. for(j=0;j<22;j++) {
  304. band_index_long[i][j] = k;
  305. k += band_size_long[i][j];
  306. }
  307. band_index_long[i][22] = k;
  308. }
  309. /* compute n ^ (4/3) and store it in mantissa/exp format */
  310. int_pow_init();
  311. mpegaudio_tableinit();
  312. for (i = 0; i < 4; i++)
  313. if (ff_mpa_quant_bits[i] < 0)
  314. for (j = 0; j < (1<<(-ff_mpa_quant_bits[i]+1)); j++) {
  315. int val1, val2, val3, steps;
  316. int val = j;
  317. steps = ff_mpa_quant_steps[i];
  318. val1 = val % steps;
  319. val /= steps;
  320. val2 = val % steps;
  321. val3 = val / steps;
  322. division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
  323. }
  324. for(i=0;i<7;i++) {
  325. float f;
  326. INTFLOAT v;
  327. if (i != 6) {
  328. f = tan((double)i * M_PI / 12.0);
  329. v = FIXR(f / (1.0 + f));
  330. } else {
  331. v = FIXR(1.0);
  332. }
  333. is_table[0][i] = v;
  334. is_table[1][6 - i] = v;
  335. }
  336. /* invalid values */
  337. for(i=7;i<16;i++)
  338. is_table[0][i] = is_table[1][i] = 0.0;
  339. for(i=0;i<16;i++) {
  340. double f;
  341. int e, k;
  342. for(j=0;j<2;j++) {
  343. e = -(j + 1) * ((i + 1) >> 1);
  344. f = pow(2.0, e / 4.0);
  345. k = i & 1;
  346. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  347. is_table_lsf[j][k][i] = FIXR(1.0);
  348. av_dlog(avctx, "is_table_lsf %d %d: %x %x\n",
  349. i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]);
  350. }
  351. }
  352. for(i=0;i<8;i++) {
  353. float ci, cs, ca;
  354. ci = ci_table[i];
  355. cs = 1.0 / sqrt(1.0 + ci * ci);
  356. ca = cs * ci;
  357. csa_table[i][0] = FIXHR(cs/4);
  358. csa_table[i][1] = FIXHR(ca/4);
  359. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  360. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  361. csa_table_float[i][0] = cs;
  362. csa_table_float[i][1] = ca;
  363. csa_table_float[i][2] = ca + cs;
  364. csa_table_float[i][3] = ca - cs;
  365. }
  366. /* compute mdct windows */
  367. for(i=0;i<36;i++) {
  368. for(j=0; j<4; j++){
  369. double d;
  370. if(j==2 && i%3 != 1)
  371. continue;
  372. d= sin(M_PI * (i + 0.5) / 36.0);
  373. if(j==1){
  374. if (i>=30) d= 0;
  375. else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0);
  376. else if(i>=18) d= 1;
  377. }else if(j==3){
  378. if (i< 6) d= 0;
  379. else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0);
  380. else if(i< 18) d= 1;
  381. }
  382. //merge last stage of imdct into the window coefficients
  383. d*= 0.5 / cos(M_PI*(2*i + 19)/72);
  384. if(j==2)
  385. mdct_win[j][i/3] = FIXHR((d / (1<<5)));
  386. else
  387. mdct_win[j][i ] = FIXHR((d / (1<<5)));
  388. }
  389. }
  390. /* NOTE: we do frequency inversion adter the MDCT by changing
  391. the sign of the right window coefs */
  392. for(j=0;j<4;j++) {
  393. for(i=0;i<36;i+=2) {
  394. mdct_win[j + 4][i] = mdct_win[j][i];
  395. mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1];
  396. }
  397. }
  398. init = 1;
  399. }
  400. if (avctx->codec_id == CODEC_ID_MP3ADU)
  401. s->adu_mode = 1;
  402. return 0;
  403. }
  404. #if CONFIG_FLOAT
  405. static inline float round_sample(float *sum)
  406. {
  407. float sum1=*sum;
  408. *sum = 0;
  409. return sum1;
  410. }
  411. /* signed 16x16 -> 32 multiply add accumulate */
  412. #define MACS(rt, ra, rb) rt+=(ra)*(rb)
  413. /* signed 16x16 -> 32 multiply */
  414. #define MULS(ra, rb) ((ra)*(rb))
  415. #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
  416. #else
  417. static inline int round_sample(int64_t *sum)
  418. {
  419. int sum1;
  420. sum1 = (int)((*sum) >> OUT_SHIFT);
  421. *sum &= (1<<OUT_SHIFT)-1;
  422. return av_clip(sum1, OUT_MIN, OUT_MAX);
  423. }
  424. # define MULS(ra, rb) MUL64(ra, rb)
  425. # define MACS(rt, ra, rb) MAC64(rt, ra, rb)
  426. # define MLSS(rt, ra, rb) MLS64(rt, ra, rb)
  427. #endif
  428. #define SUM8(op, sum, w, p) \
  429. { \
  430. op(sum, (w)[0 * 64], (p)[0 * 64]); \
  431. op(sum, (w)[1 * 64], (p)[1 * 64]); \
  432. op(sum, (w)[2 * 64], (p)[2 * 64]); \
  433. op(sum, (w)[3 * 64], (p)[3 * 64]); \
  434. op(sum, (w)[4 * 64], (p)[4 * 64]); \
  435. op(sum, (w)[5 * 64], (p)[5 * 64]); \
  436. op(sum, (w)[6 * 64], (p)[6 * 64]); \
  437. op(sum, (w)[7 * 64], (p)[7 * 64]); \
  438. }
  439. #define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \
  440. { \
  441. INTFLOAT tmp;\
  442. tmp = p[0 * 64];\
  443. op1(sum1, (w1)[0 * 64], tmp);\
  444. op2(sum2, (w2)[0 * 64], tmp);\
  445. tmp = p[1 * 64];\
  446. op1(sum1, (w1)[1 * 64], tmp);\
  447. op2(sum2, (w2)[1 * 64], tmp);\
  448. tmp = p[2 * 64];\
  449. op1(sum1, (w1)[2 * 64], tmp);\
  450. op2(sum2, (w2)[2 * 64], tmp);\
  451. tmp = p[3 * 64];\
  452. op1(sum1, (w1)[3 * 64], tmp);\
  453. op2(sum2, (w2)[3 * 64], tmp);\
  454. tmp = p[4 * 64];\
  455. op1(sum1, (w1)[4 * 64], tmp);\
  456. op2(sum2, (w2)[4 * 64], tmp);\
  457. tmp = p[5 * 64];\
  458. op1(sum1, (w1)[5 * 64], tmp);\
  459. op2(sum2, (w2)[5 * 64], tmp);\
  460. tmp = p[6 * 64];\
  461. op1(sum1, (w1)[6 * 64], tmp);\
  462. op2(sum2, (w2)[6 * 64], tmp);\
  463. tmp = p[7 * 64];\
  464. op1(sum1, (w1)[7 * 64], tmp);\
  465. op2(sum2, (w2)[7 * 64], tmp);\
  466. }
  467. void av_cold RENAME(ff_mpa_synth_init)(MPA_INT *window)
  468. {
  469. int i, j;
  470. /* max = 18760, max sum over all 16 coefs : 44736 */
  471. for(i=0;i<257;i++) {
  472. INTFLOAT v;
  473. v = ff_mpa_enwindow[i];
  474. #if CONFIG_FLOAT
  475. v *= 1.0 / (1LL<<(16 + FRAC_BITS));
  476. #endif
  477. window[i] = v;
  478. if ((i & 63) != 0)
  479. v = -v;
  480. if (i != 0)
  481. window[512 - i] = v;
  482. }
  483. // Needed for avoiding shuffles in ASM implementations
  484. for(i=0; i < 8; i++)
  485. for(j=0; j < 16; j++)
  486. window[512+16*i+j] = window[64*i+32-j];
  487. for(i=0; i < 8; i++)
  488. for(j=0; j < 16; j++)
  489. window[512+128+16*i+j] = window[64*i+48-j];
  490. }
  491. static void apply_window_mp3_c(MPA_INT *synth_buf, MPA_INT *window,
  492. int *dither_state, OUT_INT *samples, int incr)
  493. {
  494. register const MPA_INT *w, *w2, *p;
  495. int j;
  496. OUT_INT *samples2;
  497. #if CONFIG_FLOAT
  498. float sum, sum2;
  499. #else
  500. int64_t sum, sum2;
  501. #endif
  502. /* copy to avoid wrap */
  503. memcpy(synth_buf + 512, synth_buf, 32 * sizeof(*synth_buf));
  504. samples2 = samples + 31 * incr;
  505. w = window;
  506. w2 = window + 31;
  507. sum = *dither_state;
  508. p = synth_buf + 16;
  509. SUM8(MACS, sum, w, p);
  510. p = synth_buf + 48;
  511. SUM8(MLSS, sum, w + 32, p);
  512. *samples = round_sample(&sum);
  513. samples += incr;
  514. w++;
  515. /* we calculate two samples at the same time to avoid one memory
  516. access per two sample */
  517. for(j=1;j<16;j++) {
  518. sum2 = 0;
  519. p = synth_buf + 16 + j;
  520. SUM8P2(sum, MACS, sum2, MLSS, w, w2, p);
  521. p = synth_buf + 48 - j;
  522. SUM8P2(sum, MLSS, sum2, MLSS, w + 32, w2 + 32, p);
  523. *samples = round_sample(&sum);
  524. samples += incr;
  525. sum += sum2;
  526. *samples2 = round_sample(&sum);
  527. samples2 -= incr;
  528. w++;
  529. w2--;
  530. }
  531. p = synth_buf + 32;
  532. SUM8(MLSS, sum, w + 32, p);
  533. *samples = round_sample(&sum);
  534. *dither_state= sum;
  535. }
  536. /* 32 sub band synthesis filter. Input: 32 sub band samples, Output:
  537. 32 samples. */
  538. /* XXX: optimize by avoiding ring buffer usage */
  539. #if !CONFIG_FLOAT
  540. void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset,
  541. MPA_INT *window, int *dither_state,
  542. OUT_INT *samples, int incr,
  543. INTFLOAT sb_samples[SBLIMIT])
  544. {
  545. register MPA_INT *synth_buf;
  546. int offset;
  547. offset = *synth_buf_offset;
  548. synth_buf = synth_buf_ptr + offset;
  549. dct32(synth_buf, sb_samples);
  550. apply_window_mp3_c(synth_buf, window, dither_state, samples, incr);
  551. offset = (offset - 32) & 511;
  552. *synth_buf_offset = offset;
  553. }
  554. #endif
  555. #define C3 FIXHR(0.86602540378443864676/2)
  556. /* 0.5 / cos(pi*(2*i+1)/36) */
  557. static const INTFLOAT icos36[9] = {
  558. FIXR(0.50190991877167369479),
  559. FIXR(0.51763809020504152469), //0
  560. FIXR(0.55168895948124587824),
  561. FIXR(0.61038729438072803416),
  562. FIXR(0.70710678118654752439), //1
  563. FIXR(0.87172339781054900991),
  564. FIXR(1.18310079157624925896),
  565. FIXR(1.93185165257813657349), //2
  566. FIXR(5.73685662283492756461),
  567. };
  568. /* 0.5 / cos(pi*(2*i+1)/36) */
  569. static const INTFLOAT icos36h[9] = {
  570. FIXHR(0.50190991877167369479/2),
  571. FIXHR(0.51763809020504152469/2), //0
  572. FIXHR(0.55168895948124587824/2),
  573. FIXHR(0.61038729438072803416/2),
  574. FIXHR(0.70710678118654752439/2), //1
  575. FIXHR(0.87172339781054900991/2),
  576. FIXHR(1.18310079157624925896/4),
  577. FIXHR(1.93185165257813657349/4), //2
  578. // FIXHR(5.73685662283492756461),
  579. };
  580. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  581. cases. */
  582. static void imdct12(INTFLOAT *out, INTFLOAT *in)
  583. {
  584. INTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  585. in0= in[0*3];
  586. in1= in[1*3] + in[0*3];
  587. in2= in[2*3] + in[1*3];
  588. in3= in[3*3] + in[2*3];
  589. in4= in[4*3] + in[3*3];
  590. in5= in[5*3] + in[4*3];
  591. in5 += in3;
  592. in3 += in1;
  593. in2= MULH3(in2, C3, 2);
  594. in3= MULH3(in3, C3, 4);
  595. t1 = in0 - in4;
  596. t2 = MULH3(in1 - in5, icos36h[4], 2);
  597. out[ 7]=
  598. out[10]= t1 + t2;
  599. out[ 1]=
  600. out[ 4]= t1 - t2;
  601. in0 += SHR(in4, 1);
  602. in4 = in0 + in2;
  603. in5 += 2*in1;
  604. in1 = MULH3(in5 + in3, icos36h[1], 1);
  605. out[ 8]=
  606. out[ 9]= in4 + in1;
  607. out[ 2]=
  608. out[ 3]= in4 - in1;
  609. in0 -= in2;
  610. in5 = MULH3(in5 - in3, icos36h[7], 2);
  611. out[ 0]=
  612. out[ 5]= in0 - in5;
  613. out[ 6]=
  614. out[11]= in0 + in5;
  615. }
  616. /* cos(pi*i/18) */
  617. #define C1 FIXHR(0.98480775301220805936/2)
  618. #define C2 FIXHR(0.93969262078590838405/2)
  619. #define C3 FIXHR(0.86602540378443864676/2)
  620. #define C4 FIXHR(0.76604444311897803520/2)
  621. #define C5 FIXHR(0.64278760968653932632/2)
  622. #define C6 FIXHR(0.5/2)
  623. #define C7 FIXHR(0.34202014332566873304/2)
  624. #define C8 FIXHR(0.17364817766693034885/2)
  625. /* using Lee like decomposition followed by hand coded 9 points DCT */
  626. static void imdct36(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in, INTFLOAT *win)
  627. {
  628. int i, j;
  629. INTFLOAT t0, t1, t2, t3, s0, s1, s2, s3;
  630. INTFLOAT tmp[18], *tmp1, *in1;
  631. for(i=17;i>=1;i--)
  632. in[i] += in[i-1];
  633. for(i=17;i>=3;i-=2)
  634. in[i] += in[i-2];
  635. for(j=0;j<2;j++) {
  636. tmp1 = tmp + j;
  637. in1 = in + j;
  638. t2 = in1[2*4] + in1[2*8] - in1[2*2];
  639. t3 = in1[2*0] + SHR(in1[2*6],1);
  640. t1 = in1[2*0] - in1[2*6];
  641. tmp1[ 6] = t1 - SHR(t2,1);
  642. tmp1[16] = t1 + t2;
  643. t0 = MULH3(in1[2*2] + in1[2*4] , C2, 2);
  644. t1 = MULH3(in1[2*4] - in1[2*8] , -2*C8, 1);
  645. t2 = MULH3(in1[2*2] + in1[2*8] , -C4, 2);
  646. tmp1[10] = t3 - t0 - t2;
  647. tmp1[ 2] = t3 + t0 + t1;
  648. tmp1[14] = t3 + t2 - t1;
  649. tmp1[ 4] = MULH3(in1[2*5] + in1[2*7] - in1[2*1], -C3, 2);
  650. t2 = MULH3(in1[2*1] + in1[2*5], C1, 2);
  651. t3 = MULH3(in1[2*5] - in1[2*7], -2*C7, 1);
  652. t0 = MULH3(in1[2*3], C3, 2);
  653. t1 = MULH3(in1[2*1] + in1[2*7], -C5, 2);
  654. tmp1[ 0] = t2 + t3 + t0;
  655. tmp1[12] = t2 + t1 - t0;
  656. tmp1[ 8] = t3 - t1 - t0;
  657. }
  658. i = 0;
  659. for(j=0;j<4;j++) {
  660. t0 = tmp[i];
  661. t1 = tmp[i + 2];
  662. s0 = t1 + t0;
  663. s2 = t1 - t0;
  664. t2 = tmp[i + 1];
  665. t3 = tmp[i + 3];
  666. s1 = MULH3(t3 + t2, icos36h[j], 2);
  667. s3 = MULLx(t3 - t2, icos36[8 - j], FRAC_BITS);
  668. t0 = s0 + s1;
  669. t1 = s0 - s1;
  670. out[(9 + j)*SBLIMIT] = MULH3(t1, win[9 + j], 1) + buf[9 + j];
  671. out[(8 - j)*SBLIMIT] = MULH3(t1, win[8 - j], 1) + buf[8 - j];
  672. buf[9 + j] = MULH3(t0, win[18 + 9 + j], 1);
  673. buf[8 - j] = MULH3(t0, win[18 + 8 - j], 1);
  674. t0 = s2 + s3;
  675. t1 = s2 - s3;
  676. out[(9 + 8 - j)*SBLIMIT] = MULH3(t1, win[9 + 8 - j], 1) + buf[9 + 8 - j];
  677. out[( j)*SBLIMIT] = MULH3(t1, win[ j], 1) + buf[ j];
  678. buf[9 + 8 - j] = MULH3(t0, win[18 + 9 + 8 - j], 1);
  679. buf[ + j] = MULH3(t0, win[18 + j], 1);
  680. i += 4;
  681. }
  682. s0 = tmp[16];
  683. s1 = MULH3(tmp[17], icos36h[4], 2);
  684. t0 = s0 + s1;
  685. t1 = s0 - s1;
  686. out[(9 + 4)*SBLIMIT] = MULH3(t1, win[9 + 4], 1) + buf[9 + 4];
  687. out[(8 - 4)*SBLIMIT] = MULH3(t1, win[8 - 4], 1) + buf[8 - 4];
  688. buf[9 + 4] = MULH3(t0, win[18 + 9 + 4], 1);
  689. buf[8 - 4] = MULH3(t0, win[18 + 8 - 4], 1);
  690. }
  691. /* return the number of decoded frames */
  692. static int mp_decode_layer1(MPADecodeContext *s)
  693. {
  694. int bound, i, v, n, ch, j, mant;
  695. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  696. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  697. if (s->mode == MPA_JSTEREO)
  698. bound = (s->mode_ext + 1) * 4;
  699. else
  700. bound = SBLIMIT;
  701. /* allocation bits */
  702. for(i=0;i<bound;i++) {
  703. for(ch=0;ch<s->nb_channels;ch++) {
  704. allocation[ch][i] = get_bits(&s->gb, 4);
  705. }
  706. }
  707. for(i=bound;i<SBLIMIT;i++) {
  708. allocation[0][i] = get_bits(&s->gb, 4);
  709. }
  710. /* scale factors */
  711. for(i=0;i<bound;i++) {
  712. for(ch=0;ch<s->nb_channels;ch++) {
  713. if (allocation[ch][i])
  714. scale_factors[ch][i] = get_bits(&s->gb, 6);
  715. }
  716. }
  717. for(i=bound;i<SBLIMIT;i++) {
  718. if (allocation[0][i]) {
  719. scale_factors[0][i] = get_bits(&s->gb, 6);
  720. scale_factors[1][i] = get_bits(&s->gb, 6);
  721. }
  722. }
  723. /* compute samples */
  724. for(j=0;j<12;j++) {
  725. for(i=0;i<bound;i++) {
  726. for(ch=0;ch<s->nb_channels;ch++) {
  727. n = allocation[ch][i];
  728. if (n) {
  729. mant = get_bits(&s->gb, n + 1);
  730. v = l1_unscale(n, mant, scale_factors[ch][i]);
  731. } else {
  732. v = 0;
  733. }
  734. s->sb_samples[ch][j][i] = v;
  735. }
  736. }
  737. for(i=bound;i<SBLIMIT;i++) {
  738. n = allocation[0][i];
  739. if (n) {
  740. mant = get_bits(&s->gb, n + 1);
  741. v = l1_unscale(n, mant, scale_factors[0][i]);
  742. s->sb_samples[0][j][i] = v;
  743. v = l1_unscale(n, mant, scale_factors[1][i]);
  744. s->sb_samples[1][j][i] = v;
  745. } else {
  746. s->sb_samples[0][j][i] = 0;
  747. s->sb_samples[1][j][i] = 0;
  748. }
  749. }
  750. }
  751. return 12;
  752. }
  753. static int mp_decode_layer2(MPADecodeContext *s)
  754. {
  755. int sblimit; /* number of used subbands */
  756. const unsigned char *alloc_table;
  757. int table, bit_alloc_bits, i, j, ch, bound, v;
  758. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  759. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  760. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  761. int scale, qindex, bits, steps, k, l, m, b;
  762. /* select decoding table */
  763. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  764. s->sample_rate, s->lsf);
  765. sblimit = ff_mpa_sblimit_table[table];
  766. alloc_table = ff_mpa_alloc_tables[table];
  767. if (s->mode == MPA_JSTEREO)
  768. bound = (s->mode_ext + 1) * 4;
  769. else
  770. bound = sblimit;
  771. av_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  772. /* sanity check */
  773. if( bound > sblimit ) bound = sblimit;
  774. /* parse bit allocation */
  775. j = 0;
  776. for(i=0;i<bound;i++) {
  777. bit_alloc_bits = alloc_table[j];
  778. for(ch=0;ch<s->nb_channels;ch++) {
  779. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  780. }
  781. j += 1 << bit_alloc_bits;
  782. }
  783. for(i=bound;i<sblimit;i++) {
  784. bit_alloc_bits = alloc_table[j];
  785. v = get_bits(&s->gb, bit_alloc_bits);
  786. bit_alloc[0][i] = v;
  787. bit_alloc[1][i] = v;
  788. j += 1 << bit_alloc_bits;
  789. }
  790. /* scale codes */
  791. for(i=0;i<sblimit;i++) {
  792. for(ch=0;ch<s->nb_channels;ch++) {
  793. if (bit_alloc[ch][i])
  794. scale_code[ch][i] = get_bits(&s->gb, 2);
  795. }
  796. }
  797. /* scale factors */
  798. for(i=0;i<sblimit;i++) {
  799. for(ch=0;ch<s->nb_channels;ch++) {
  800. if (bit_alloc[ch][i]) {
  801. sf = scale_factors[ch][i];
  802. switch(scale_code[ch][i]) {
  803. default:
  804. case 0:
  805. sf[0] = get_bits(&s->gb, 6);
  806. sf[1] = get_bits(&s->gb, 6);
  807. sf[2] = get_bits(&s->gb, 6);
  808. break;
  809. case 2:
  810. sf[0] = get_bits(&s->gb, 6);
  811. sf[1] = sf[0];
  812. sf[2] = sf[0];
  813. break;
  814. case 1:
  815. sf[0] = get_bits(&s->gb, 6);
  816. sf[2] = get_bits(&s->gb, 6);
  817. sf[1] = sf[0];
  818. break;
  819. case 3:
  820. sf[0] = get_bits(&s->gb, 6);
  821. sf[2] = get_bits(&s->gb, 6);
  822. sf[1] = sf[2];
  823. break;
  824. }
  825. }
  826. }
  827. }
  828. /* samples */
  829. for(k=0;k<3;k++) {
  830. for(l=0;l<12;l+=3) {
  831. j = 0;
  832. for(i=0;i<bound;i++) {
  833. bit_alloc_bits = alloc_table[j];
  834. for(ch=0;ch<s->nb_channels;ch++) {
  835. b = bit_alloc[ch][i];
  836. if (b) {
  837. scale = scale_factors[ch][i][k];
  838. qindex = alloc_table[j+b];
  839. bits = ff_mpa_quant_bits[qindex];
  840. if (bits < 0) {
  841. int v2;
  842. /* 3 values at the same time */
  843. v = get_bits(&s->gb, -bits);
  844. v2 = division_tabs[qindex][v];
  845. steps = ff_mpa_quant_steps[qindex];
  846. s->sb_samples[ch][k * 12 + l + 0][i] =
  847. l2_unscale_group(steps, v2 & 15, scale);
  848. s->sb_samples[ch][k * 12 + l + 1][i] =
  849. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  850. s->sb_samples[ch][k * 12 + l + 2][i] =
  851. l2_unscale_group(steps, v2 >> 8 , scale);
  852. } else {
  853. for(m=0;m<3;m++) {
  854. v = get_bits(&s->gb, bits);
  855. v = l1_unscale(bits - 1, v, scale);
  856. s->sb_samples[ch][k * 12 + l + m][i] = v;
  857. }
  858. }
  859. } else {
  860. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  861. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  862. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  863. }
  864. }
  865. /* next subband in alloc table */
  866. j += 1 << bit_alloc_bits;
  867. }
  868. /* XXX: find a way to avoid this duplication of code */
  869. for(i=bound;i<sblimit;i++) {
  870. bit_alloc_bits = alloc_table[j];
  871. b = bit_alloc[0][i];
  872. if (b) {
  873. int mant, scale0, scale1;
  874. scale0 = scale_factors[0][i][k];
  875. scale1 = scale_factors[1][i][k];
  876. qindex = alloc_table[j+b];
  877. bits = ff_mpa_quant_bits[qindex];
  878. if (bits < 0) {
  879. /* 3 values at the same time */
  880. v = get_bits(&s->gb, -bits);
  881. steps = ff_mpa_quant_steps[qindex];
  882. mant = v % steps;
  883. v = v / steps;
  884. s->sb_samples[0][k * 12 + l + 0][i] =
  885. l2_unscale_group(steps, mant, scale0);
  886. s->sb_samples[1][k * 12 + l + 0][i] =
  887. l2_unscale_group(steps, mant, scale1);
  888. mant = v % steps;
  889. v = v / steps;
  890. s->sb_samples[0][k * 12 + l + 1][i] =
  891. l2_unscale_group(steps, mant, scale0);
  892. s->sb_samples[1][k * 12 + l + 1][i] =
  893. l2_unscale_group(steps, mant, scale1);
  894. s->sb_samples[0][k * 12 + l + 2][i] =
  895. l2_unscale_group(steps, v, scale0);
  896. s->sb_samples[1][k * 12 + l + 2][i] =
  897. l2_unscale_group(steps, v, scale1);
  898. } else {
  899. for(m=0;m<3;m++) {
  900. mant = get_bits(&s->gb, bits);
  901. s->sb_samples[0][k * 12 + l + m][i] =
  902. l1_unscale(bits - 1, mant, scale0);
  903. s->sb_samples[1][k * 12 + l + m][i] =
  904. l1_unscale(bits - 1, mant, scale1);
  905. }
  906. }
  907. } else {
  908. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  909. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  910. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  911. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  912. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  913. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  914. }
  915. /* next subband in alloc table */
  916. j += 1 << bit_alloc_bits;
  917. }
  918. /* fill remaining samples to zero */
  919. for(i=sblimit;i<SBLIMIT;i++) {
  920. for(ch=0;ch<s->nb_channels;ch++) {
  921. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  922. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  923. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  924. }
  925. }
  926. }
  927. }
  928. return 3 * 12;
  929. }
  930. #define SPLIT(dst,sf,n)\
  931. if(n==3){\
  932. int m= (sf*171)>>9;\
  933. dst= sf - 3*m;\
  934. sf=m;\
  935. }else if(n==4){\
  936. dst= sf&3;\
  937. sf>>=2;\
  938. }else if(n==5){\
  939. int m= (sf*205)>>10;\
  940. dst= sf - 5*m;\
  941. sf=m;\
  942. }else if(n==6){\
  943. int m= (sf*171)>>10;\
  944. dst= sf - 6*m;\
  945. sf=m;\
  946. }else{\
  947. dst=0;\
  948. }
  949. static av_always_inline void lsf_sf_expand(int *slen,
  950. int sf, int n1, int n2, int n3)
  951. {
  952. SPLIT(slen[3], sf, n3)
  953. SPLIT(slen[2], sf, n2)
  954. SPLIT(slen[1], sf, n1)
  955. slen[0] = sf;
  956. }
  957. static void exponents_from_scale_factors(MPADecodeContext *s,
  958. GranuleDef *g,
  959. int16_t *exponents)
  960. {
  961. const uint8_t *bstab, *pretab;
  962. int len, i, j, k, l, v0, shift, gain, gains[3];
  963. int16_t *exp_ptr;
  964. exp_ptr = exponents;
  965. gain = g->global_gain - 210;
  966. shift = g->scalefac_scale + 1;
  967. bstab = band_size_long[s->sample_rate_index];
  968. pretab = mpa_pretab[g->preflag];
  969. for(i=0;i<g->long_end;i++) {
  970. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  971. len = bstab[i];
  972. for(j=len;j>0;j--)
  973. *exp_ptr++ = v0;
  974. }
  975. if (g->short_start < 13) {
  976. bstab = band_size_short[s->sample_rate_index];
  977. gains[0] = gain - (g->subblock_gain[0] << 3);
  978. gains[1] = gain - (g->subblock_gain[1] << 3);
  979. gains[2] = gain - (g->subblock_gain[2] << 3);
  980. k = g->long_end;
  981. for(i=g->short_start;i<13;i++) {
  982. len = bstab[i];
  983. for(l=0;l<3;l++) {
  984. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  985. for(j=len;j>0;j--)
  986. *exp_ptr++ = v0;
  987. }
  988. }
  989. }
  990. }
  991. /* handle n = 0 too */
  992. static inline int get_bitsz(GetBitContext *s, int n)
  993. {
  994. if (n == 0)
  995. return 0;
  996. else
  997. return get_bits(s, n);
  998. }
  999. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){
  1000. if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){
  1001. s->gb= s->in_gb;
  1002. s->in_gb.buffer=NULL;
  1003. assert((get_bits_count(&s->gb) & 7) == 0);
  1004. skip_bits_long(&s->gb, *pos - *end_pos);
  1005. *end_pos2=
  1006. *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos;
  1007. *pos= get_bits_count(&s->gb);
  1008. }
  1009. }
  1010. /* Following is a optimized code for
  1011. INTFLOAT v = *src
  1012. if(get_bits1(&s->gb))
  1013. v = -v;
  1014. *dst = v;
  1015. */
  1016. #if CONFIG_FLOAT
  1017. #define READ_FLIP_SIGN(dst,src)\
  1018. v = AV_RN32A(src) ^ (get_bits1(&s->gb)<<31);\
  1019. AV_WN32A(dst, v);
  1020. #else
  1021. #define READ_FLIP_SIGN(dst,src)\
  1022. v= -get_bits1(&s->gb);\
  1023. *(dst) = (*(src) ^ v) - v;
  1024. #endif
  1025. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  1026. int16_t *exponents, int end_pos2)
  1027. {
  1028. int s_index;
  1029. int i;
  1030. int last_pos, bits_left;
  1031. VLC *vlc;
  1032. int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
  1033. /* low frequencies (called big values) */
  1034. s_index = 0;
  1035. for(i=0;i<3;i++) {
  1036. int j, k, l, linbits;
  1037. j = g->region_size[i];
  1038. if (j == 0)
  1039. continue;
  1040. /* select vlc table */
  1041. k = g->table_select[i];
  1042. l = mpa_huff_data[k][0];
  1043. linbits = mpa_huff_data[k][1];
  1044. vlc = &huff_vlc[l];
  1045. if(!l){
  1046. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
  1047. s_index += 2*j;
  1048. continue;
  1049. }
  1050. /* read huffcode and compute each couple */
  1051. for(;j>0;j--) {
  1052. int exponent, x, y;
  1053. int v;
  1054. int pos= get_bits_count(&s->gb);
  1055. if (pos >= end_pos){
  1056. // av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1057. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1058. // av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
  1059. if(pos >= end_pos)
  1060. break;
  1061. }
  1062. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  1063. if(!y){
  1064. g->sb_hybrid[s_index ] =
  1065. g->sb_hybrid[s_index+1] = 0;
  1066. s_index += 2;
  1067. continue;
  1068. }
  1069. exponent= exponents[s_index];
  1070. av_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  1071. i, g->region_size[i] - j, x, y, exponent);
  1072. if(y&16){
  1073. x = y >> 5;
  1074. y = y & 0x0f;
  1075. if (x < 15){
  1076. READ_FLIP_SIGN(g->sb_hybrid+s_index, RENAME(expval_table)[ exponent ]+x)
  1077. }else{
  1078. x += get_bitsz(&s->gb, linbits);
  1079. v = l3_unscale(x, exponent);
  1080. if (get_bits1(&s->gb))
  1081. v = -v;
  1082. g->sb_hybrid[s_index] = v;
  1083. }
  1084. if (y < 15){
  1085. READ_FLIP_SIGN(g->sb_hybrid+s_index+1, RENAME(expval_table)[ exponent ]+y)
  1086. }else{
  1087. y += get_bitsz(&s->gb, linbits);
  1088. v = l3_unscale(y, exponent);
  1089. if (get_bits1(&s->gb))
  1090. v = -v;
  1091. g->sb_hybrid[s_index+1] = v;
  1092. }
  1093. }else{
  1094. x = y >> 5;
  1095. y = y & 0x0f;
  1096. x += y;
  1097. if (x < 15){
  1098. READ_FLIP_SIGN(g->sb_hybrid+s_index+!!y, RENAME(expval_table)[ exponent ]+x)
  1099. }else{
  1100. x += get_bitsz(&s->gb, linbits);
  1101. v = l3_unscale(x, exponent);
  1102. if (get_bits1(&s->gb))
  1103. v = -v;
  1104. g->sb_hybrid[s_index+!!y] = v;
  1105. }
  1106. g->sb_hybrid[s_index+ !y] = 0;
  1107. }
  1108. s_index+=2;
  1109. }
  1110. }
  1111. /* high frequencies */
  1112. vlc = &huff_quad_vlc[g->count1table_select];
  1113. last_pos=0;
  1114. while (s_index <= 572) {
  1115. int pos, code;
  1116. pos = get_bits_count(&s->gb);
  1117. if (pos >= end_pos) {
  1118. if (pos > end_pos2 && last_pos){
  1119. /* some encoders generate an incorrect size for this
  1120. part. We must go back into the data */
  1121. s_index -= 4;
  1122. skip_bits_long(&s->gb, last_pos - pos);
  1123. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  1124. if(s->error_recognition >= FF_ER_COMPLIANT)
  1125. s_index=0;
  1126. break;
  1127. }
  1128. // av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1129. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1130. // av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
  1131. if(pos >= end_pos)
  1132. break;
  1133. }
  1134. last_pos= pos;
  1135. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  1136. av_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  1137. g->sb_hybrid[s_index+0]=
  1138. g->sb_hybrid[s_index+1]=
  1139. g->sb_hybrid[s_index+2]=
  1140. g->sb_hybrid[s_index+3]= 0;
  1141. while(code){
  1142. static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
  1143. int v;
  1144. int pos= s_index+idxtab[code];
  1145. code ^= 8>>idxtab[code];
  1146. READ_FLIP_SIGN(g->sb_hybrid+pos, RENAME(exp_table)+exponents[pos])
  1147. }
  1148. s_index+=4;
  1149. }
  1150. /* skip extension bits */
  1151. bits_left = end_pos2 - get_bits_count(&s->gb);
  1152. //av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
  1153. if (bits_left < 0 && s->error_recognition >= FF_ER_COMPLIANT) {
  1154. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1155. s_index=0;
  1156. }else if(bits_left > 0 && s->error_recognition >= FF_ER_AGGRESSIVE){
  1157. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1158. s_index=0;
  1159. }
  1160. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
  1161. skip_bits_long(&s->gb, bits_left);
  1162. i= get_bits_count(&s->gb);
  1163. switch_buffer(s, &i, &end_pos, &end_pos2);
  1164. return 0;
  1165. }
  1166. /* Reorder short blocks from bitstream order to interleaved order. It
  1167. would be faster to do it in parsing, but the code would be far more
  1168. complicated */
  1169. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  1170. {
  1171. int i, j, len;
  1172. INTFLOAT *ptr, *dst, *ptr1;
  1173. INTFLOAT tmp[576];
  1174. if (g->block_type != 2)
  1175. return;
  1176. if (g->switch_point) {
  1177. if (s->sample_rate_index != 8) {
  1178. ptr = g->sb_hybrid + 36;
  1179. } else {
  1180. ptr = g->sb_hybrid + 48;
  1181. }
  1182. } else {
  1183. ptr = g->sb_hybrid;
  1184. }
  1185. for(i=g->short_start;i<13;i++) {
  1186. len = band_size_short[s->sample_rate_index][i];
  1187. ptr1 = ptr;
  1188. dst = tmp;
  1189. for(j=len;j>0;j--) {
  1190. *dst++ = ptr[0*len];
  1191. *dst++ = ptr[1*len];
  1192. *dst++ = ptr[2*len];
  1193. ptr++;
  1194. }
  1195. ptr+=2*len;
  1196. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  1197. }
  1198. }
  1199. #define ISQRT2 FIXR(0.70710678118654752440)
  1200. static void compute_stereo(MPADecodeContext *s,
  1201. GranuleDef *g0, GranuleDef *g1)
  1202. {
  1203. int i, j, k, l;
  1204. int sf_max, sf, len, non_zero_found;
  1205. INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2;
  1206. int non_zero_found_short[3];
  1207. /* intensity stereo */
  1208. if (s->mode_ext & MODE_EXT_I_STEREO) {
  1209. if (!s->lsf) {
  1210. is_tab = is_table;
  1211. sf_max = 7;
  1212. } else {
  1213. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  1214. sf_max = 16;
  1215. }
  1216. tab0 = g0->sb_hybrid + 576;
  1217. tab1 = g1->sb_hybrid + 576;
  1218. non_zero_found_short[0] = 0;
  1219. non_zero_found_short[1] = 0;
  1220. non_zero_found_short[2] = 0;
  1221. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  1222. for(i = 12;i >= g1->short_start;i--) {
  1223. /* for last band, use previous scale factor */
  1224. if (i != 11)
  1225. k -= 3;
  1226. len = band_size_short[s->sample_rate_index][i];
  1227. for(l=2;l>=0;l--) {
  1228. tab0 -= len;
  1229. tab1 -= len;
  1230. if (!non_zero_found_short[l]) {
  1231. /* test if non zero band. if so, stop doing i-stereo */
  1232. for(j=0;j<len;j++) {
  1233. if (tab1[j] != 0) {
  1234. non_zero_found_short[l] = 1;
  1235. goto found1;
  1236. }
  1237. }
  1238. sf = g1->scale_factors[k + l];
  1239. if (sf >= sf_max)
  1240. goto found1;
  1241. v1 = is_tab[0][sf];
  1242. v2 = is_tab[1][sf];
  1243. for(j=0;j<len;j++) {
  1244. tmp0 = tab0[j];
  1245. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1246. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1247. }
  1248. } else {
  1249. found1:
  1250. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1251. /* lower part of the spectrum : do ms stereo
  1252. if enabled */
  1253. for(j=0;j<len;j++) {
  1254. tmp0 = tab0[j];
  1255. tmp1 = tab1[j];
  1256. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1257. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1258. }
  1259. }
  1260. }
  1261. }
  1262. }
  1263. non_zero_found = non_zero_found_short[0] |
  1264. non_zero_found_short[1] |
  1265. non_zero_found_short[2];
  1266. for(i = g1->long_end - 1;i >= 0;i--) {
  1267. len = band_size_long[s->sample_rate_index][i];
  1268. tab0 -= len;
  1269. tab1 -= len;
  1270. /* test if non zero band. if so, stop doing i-stereo */
  1271. if (!non_zero_found) {
  1272. for(j=0;j<len;j++) {
  1273. if (tab1[j] != 0) {
  1274. non_zero_found = 1;
  1275. goto found2;
  1276. }
  1277. }
  1278. /* for last band, use previous scale factor */
  1279. k = (i == 21) ? 20 : i;
  1280. sf = g1->scale_factors[k];
  1281. if (sf >= sf_max)
  1282. goto found2;
  1283. v1 = is_tab[0][sf];
  1284. v2 = is_tab[1][sf];
  1285. for(j=0;j<len;j++) {
  1286. tmp0 = tab0[j];
  1287. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1288. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1289. }
  1290. } else {
  1291. found2:
  1292. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1293. /* lower part of the spectrum : do ms stereo
  1294. if enabled */
  1295. for(j=0;j<len;j++) {
  1296. tmp0 = tab0[j];
  1297. tmp1 = tab1[j];
  1298. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1299. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1300. }
  1301. }
  1302. }
  1303. }
  1304. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1305. /* ms stereo ONLY */
  1306. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1307. global gain */
  1308. tab0 = g0->sb_hybrid;
  1309. tab1 = g1->sb_hybrid;
  1310. for(i=0;i<576;i++) {
  1311. tmp0 = tab0[i];
  1312. tmp1 = tab1[i];
  1313. tab0[i] = tmp0 + tmp1;
  1314. tab1[i] = tmp0 - tmp1;
  1315. }
  1316. }
  1317. }
  1318. #if !CONFIG_FLOAT
  1319. static void compute_antialias_integer(MPADecodeContext *s,
  1320. GranuleDef *g)
  1321. {
  1322. int32_t *ptr, *csa;
  1323. int n, i;
  1324. /* we antialias only "long" bands */
  1325. if (g->block_type == 2) {
  1326. if (!g->switch_point)
  1327. return;
  1328. /* XXX: check this for 8000Hz case */
  1329. n = 1;
  1330. } else {
  1331. n = SBLIMIT - 1;
  1332. }
  1333. ptr = g->sb_hybrid + 18;
  1334. for(i = n;i > 0;i--) {
  1335. int tmp0, tmp1, tmp2;
  1336. csa = &csa_table[0][0];
  1337. #define INT_AA(j) \
  1338. tmp0 = ptr[-1-j];\
  1339. tmp1 = ptr[ j];\
  1340. tmp2= MULH(tmp0 + tmp1, csa[0+4*j]);\
  1341. ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\
  1342. ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j]));
  1343. INT_AA(0)
  1344. INT_AA(1)
  1345. INT_AA(2)
  1346. INT_AA(3)
  1347. INT_AA(4)
  1348. INT_AA(5)
  1349. INT_AA(6)
  1350. INT_AA(7)
  1351. ptr += 18;
  1352. }
  1353. }
  1354. #endif
  1355. static void compute_imdct(MPADecodeContext *s,
  1356. GranuleDef *g,
  1357. INTFLOAT *sb_samples,
  1358. INTFLOAT *mdct_buf)
  1359. {
  1360. INTFLOAT *win, *win1, *out_ptr, *ptr, *buf, *ptr1;
  1361. INTFLOAT out2[12];
  1362. int i, j, mdct_long_end, sblimit;
  1363. /* find last non zero block */
  1364. ptr = g->sb_hybrid + 576;
  1365. ptr1 = g->sb_hybrid + 2 * 18;
  1366. while (ptr >= ptr1) {
  1367. int32_t *p;
  1368. ptr -= 6;
  1369. p= (int32_t*)ptr;
  1370. if(p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1371. break;
  1372. }
  1373. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1374. if (g->block_type == 2) {
  1375. /* XXX: check for 8000 Hz */
  1376. if (g->switch_point)
  1377. mdct_long_end = 2;
  1378. else
  1379. mdct_long_end = 0;
  1380. } else {
  1381. mdct_long_end = sblimit;
  1382. }
  1383. buf = mdct_buf;
  1384. ptr = g->sb_hybrid;
  1385. for(j=0;j<mdct_long_end;j++) {
  1386. /* apply window & overlap with previous buffer */
  1387. out_ptr = sb_samples + j;
  1388. /* select window */
  1389. if (g->switch_point && j < 2)
  1390. win1 = mdct_win[0];
  1391. else
  1392. win1 = mdct_win[g->block_type];
  1393. /* select frequency inversion */
  1394. win = win1 + ((4 * 36) & -(j & 1));
  1395. imdct36(out_ptr, buf, ptr, win);
  1396. out_ptr += 18*SBLIMIT;
  1397. ptr += 18;
  1398. buf += 18;
  1399. }
  1400. for(j=mdct_long_end;j<sblimit;j++) {
  1401. /* select frequency inversion */
  1402. win = mdct_win[2] + ((4 * 36) & -(j & 1));
  1403. out_ptr = sb_samples + j;
  1404. for(i=0; i<6; i++){
  1405. *out_ptr = buf[i];
  1406. out_ptr += SBLIMIT;
  1407. }
  1408. imdct12(out2, ptr + 0);
  1409. for(i=0;i<6;i++) {
  1410. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*1];
  1411. buf[i + 6*2] = MULH3(out2[i + 6], win[i + 6], 1);
  1412. out_ptr += SBLIMIT;
  1413. }
  1414. imdct12(out2, ptr + 1);
  1415. for(i=0;i<6;i++) {
  1416. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*2];
  1417. buf[i + 6*0] = MULH3(out2[i + 6], win[i + 6], 1);
  1418. out_ptr += SBLIMIT;
  1419. }
  1420. imdct12(out2, ptr + 2);
  1421. for(i=0;i<6;i++) {
  1422. buf[i + 6*0] = MULH3(out2[i ], win[i ], 1) + buf[i + 6*0];
  1423. buf[i + 6*1] = MULH3(out2[i + 6], win[i + 6], 1);
  1424. buf[i + 6*2] = 0;
  1425. }
  1426. ptr += 18;
  1427. buf += 18;
  1428. }
  1429. /* zero bands */
  1430. for(j=sblimit;j<SBLIMIT;j++) {
  1431. /* overlap */
  1432. out_ptr = sb_samples + j;
  1433. for(i=0;i<18;i++) {
  1434. *out_ptr = buf[i];
  1435. buf[i] = 0;
  1436. out_ptr += SBLIMIT;
  1437. }
  1438. buf += 18;
  1439. }
  1440. }
  1441. /* main layer3 decoding function */
  1442. static int mp_decode_layer3(MPADecodeContext *s)
  1443. {
  1444. int nb_granules, main_data_begin, private_bits;
  1445. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1446. GranuleDef *g;
  1447. int16_t exponents[576]; //FIXME try INTFLOAT
  1448. /* read side info */
  1449. if (s->lsf) {
  1450. main_data_begin = get_bits(&s->gb, 8);
  1451. private_bits = get_bits(&s->gb, s->nb_channels);
  1452. nb_granules = 1;
  1453. } else {
  1454. main_data_begin = get_bits(&s->gb, 9);
  1455. if (s->nb_channels == 2)
  1456. private_bits = get_bits(&s->gb, 3);
  1457. else
  1458. private_bits = get_bits(&s->gb, 5);
  1459. nb_granules = 2;
  1460. for(ch=0;ch<s->nb_channels;ch++) {
  1461. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1462. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1463. }
  1464. }
  1465. for(gr=0;gr<nb_granules;gr++) {
  1466. for(ch=0;ch<s->nb_channels;ch++) {
  1467. av_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1468. g = &s->granules[ch][gr];
  1469. g->part2_3_length = get_bits(&s->gb, 12);
  1470. g->big_values = get_bits(&s->gb, 9);
  1471. if(g->big_values > 288){
  1472. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1473. return -1;
  1474. }
  1475. g->global_gain = get_bits(&s->gb, 8);
  1476. /* if MS stereo only is selected, we precompute the
  1477. 1/sqrt(2) renormalization factor */
  1478. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1479. MODE_EXT_MS_STEREO)
  1480. g->global_gain -= 2;
  1481. if (s->lsf)
  1482. g->scalefac_compress = get_bits(&s->gb, 9);
  1483. else
  1484. g->scalefac_compress = get_bits(&s->gb, 4);
  1485. blocksplit_flag = get_bits1(&s->gb);
  1486. if (blocksplit_flag) {
  1487. g->block_type = get_bits(&s->gb, 2);
  1488. if (g->block_type == 0){
  1489. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1490. return -1;
  1491. }
  1492. g->switch_point = get_bits1(&s->gb);
  1493. for(i=0;i<2;i++)
  1494. g->table_select[i] = get_bits(&s->gb, 5);
  1495. for(i=0;i<3;i++)
  1496. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1497. ff_init_short_region(s, g);
  1498. } else {
  1499. int region_address1, region_address2;
  1500. g->block_type = 0;
  1501. g->switch_point = 0;
  1502. for(i=0;i<3;i++)
  1503. g->table_select[i] = get_bits(&s->gb, 5);
  1504. /* compute huffman coded region sizes */
  1505. region_address1 = get_bits(&s->gb, 4);
  1506. region_address2 = get_bits(&s->gb, 3);
  1507. av_dlog(s->avctx, "region1=%d region2=%d\n",
  1508. region_address1, region_address2);
  1509. ff_init_long_region(s, g, region_address1, region_address2);
  1510. }
  1511. ff_region_offset2size(g);
  1512. ff_compute_band_indexes(s, g);
  1513. g->preflag = 0;
  1514. if (!s->lsf)
  1515. g->preflag = get_bits1(&s->gb);
  1516. g->scalefac_scale = get_bits1(&s->gb);
  1517. g->count1table_select = get_bits1(&s->gb);
  1518. av_dlog(s->avctx, "block_type=%d switch_point=%d\n",
  1519. g->block_type, g->switch_point);
  1520. }
  1521. }
  1522. if (!s->adu_mode) {
  1523. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1524. assert((get_bits_count(&s->gb) & 7) == 0);
  1525. /* now we get bits from the main_data_begin offset */
  1526. av_dlog(s->avctx, "seekback: %d\n", main_data_begin);
  1527. //av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
  1528. memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
  1529. s->in_gb= s->gb;
  1530. init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
  1531. skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
  1532. }
  1533. for(gr=0;gr<nb_granules;gr++) {
  1534. for(ch=0;ch<s->nb_channels;ch++) {
  1535. g = &s->granules[ch][gr];
  1536. if(get_bits_count(&s->gb)<0){
  1537. av_log(s->avctx, AV_LOG_DEBUG, "mdb:%d, lastbuf:%d skipping granule %d\n",
  1538. main_data_begin, s->last_buf_size, gr);
  1539. skip_bits_long(&s->gb, g->part2_3_length);
  1540. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1541. if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
  1542. skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
  1543. s->gb= s->in_gb;
  1544. s->in_gb.buffer=NULL;
  1545. }
  1546. continue;
  1547. }
  1548. bits_pos = get_bits_count(&s->gb);
  1549. if (!s->lsf) {
  1550. uint8_t *sc;
  1551. int slen, slen1, slen2;
  1552. /* MPEG1 scale factors */
  1553. slen1 = slen_table[0][g->scalefac_compress];
  1554. slen2 = slen_table[1][g->scalefac_compress];
  1555. av_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1556. if (g->block_type == 2) {
  1557. n = g->switch_point ? 17 : 18;
  1558. j = 0;
  1559. if(slen1){
  1560. for(i=0;i<n;i++)
  1561. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1562. }else{
  1563. for(i=0;i<n;i++)
  1564. g->scale_factors[j++] = 0;
  1565. }
  1566. if(slen2){
  1567. for(i=0;i<18;i++)
  1568. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1569. for(i=0;i<3;i++)
  1570. g->scale_factors[j++] = 0;
  1571. }else{
  1572. for(i=0;i<21;i++)
  1573. g->scale_factors[j++] = 0;
  1574. }
  1575. } else {
  1576. sc = s->granules[ch][0].scale_factors;
  1577. j = 0;
  1578. for(k=0;k<4;k++) {
  1579. n = (k == 0 ? 6 : 5);
  1580. if ((g->scfsi & (0x8 >> k)) == 0) {
  1581. slen = (k < 2) ? slen1 : slen2;
  1582. if(slen){
  1583. for(i=0;i<n;i++)
  1584. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1585. }else{
  1586. for(i=0;i<n;i++)
  1587. g->scale_factors[j++] = 0;
  1588. }
  1589. } else {
  1590. /* simply copy from last granule */
  1591. for(i=0;i<n;i++) {
  1592. g->scale_factors[j] = sc[j];
  1593. j++;
  1594. }
  1595. }
  1596. }
  1597. g->scale_factors[j++] = 0;
  1598. }
  1599. } else {
  1600. int tindex, tindex2, slen[4], sl, sf;
  1601. /* LSF scale factors */
  1602. if (g->block_type == 2) {
  1603. tindex = g->switch_point ? 2 : 1;
  1604. } else {
  1605. tindex = 0;
  1606. }
  1607. sf = g->scalefac_compress;
  1608. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1609. /* intensity stereo case */
  1610. sf >>= 1;
  1611. if (sf < 180) {
  1612. lsf_sf_expand(slen, sf, 6, 6, 0);
  1613. tindex2 = 3;
  1614. } else if (sf < 244) {
  1615. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1616. tindex2 = 4;
  1617. } else {
  1618. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1619. tindex2 = 5;
  1620. }
  1621. } else {
  1622. /* normal case */
  1623. if (sf < 400) {
  1624. lsf_sf_expand(slen, sf, 5, 4, 4);
  1625. tindex2 = 0;
  1626. } else if (sf < 500) {
  1627. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1628. tindex2 = 1;
  1629. } else {
  1630. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1631. tindex2 = 2;
  1632. g->preflag = 1;
  1633. }
  1634. }
  1635. j = 0;
  1636. for(k=0;k<4;k++) {
  1637. n = lsf_nsf_table[tindex2][tindex][k];
  1638. sl = slen[k];
  1639. if(sl){
  1640. for(i=0;i<n;i++)
  1641. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1642. }else{
  1643. for(i=0;i<n;i++)
  1644. g->scale_factors[j++] = 0;
  1645. }
  1646. }
  1647. /* XXX: should compute exact size */
  1648. for(;j<40;j++)
  1649. g->scale_factors[j] = 0;
  1650. }
  1651. exponents_from_scale_factors(s, g, exponents);
  1652. /* read Huffman coded residue */
  1653. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1654. } /* ch */
  1655. if (s->nb_channels == 2)
  1656. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1657. for(ch=0;ch<s->nb_channels;ch++) {
  1658. g = &s->granules[ch][gr];
  1659. reorder_block(s, g);
  1660. compute_antialias(s, g);
  1661. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1662. }
  1663. } /* gr */
  1664. if(get_bits_count(&s->gb)<0)
  1665. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1666. return nb_granules * 18;
  1667. }
  1668. static int mp_decode_frame(MPADecodeContext *s,
  1669. OUT_INT *samples, const uint8_t *buf, int buf_size)
  1670. {
  1671. int i, nb_frames, ch;
  1672. OUT_INT *samples_ptr;
  1673. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);
  1674. /* skip error protection field */
  1675. if (s->error_protection)
  1676. skip_bits(&s->gb, 16);
  1677. av_dlog(s->avctx, "frame %d:\n", s->frame_count);
  1678. switch(s->layer) {
  1679. case 1:
  1680. s->avctx->frame_size = 384;
  1681. nb_frames = mp_decode_layer1(s);
  1682. break;
  1683. case 2:
  1684. s->avctx->frame_size = 1152;
  1685. nb_frames = mp_decode_layer2(s);
  1686. break;
  1687. case 3:
  1688. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1689. default:
  1690. nb_frames = mp_decode_layer3(s);
  1691. s->last_buf_size=0;
  1692. if(s->in_gb.buffer){
  1693. align_get_bits(&s->gb);
  1694. i= get_bits_left(&s->gb)>>3;
  1695. if(i >= 0 && i <= BACKSTEP_SIZE){
  1696. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  1697. s->last_buf_size=i;
  1698. }else
  1699. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  1700. s->gb= s->in_gb;
  1701. s->in_gb.buffer= NULL;
  1702. }
  1703. align_get_bits(&s->gb);
  1704. assert((get_bits_count(&s->gb) & 7) == 0);
  1705. i= get_bits_left(&s->gb)>>3;
  1706. if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){
  1707. if(i<0)
  1708. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  1709. i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  1710. }
  1711. assert(i <= buf_size - HEADER_SIZE && i>= 0);
  1712. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  1713. s->last_buf_size += i;
  1714. break;
  1715. }
  1716. /* apply the synthesis filter */
  1717. for(ch=0;ch<s->nb_channels;ch++) {
  1718. samples_ptr = samples + ch;
  1719. for(i=0;i<nb_frames;i++) {
  1720. RENAME(ff_mpa_synth_filter)(
  1721. #if CONFIG_FLOAT
  1722. s,
  1723. #endif
  1724. s->synth_buf[ch], &(s->synth_buf_offset[ch]),
  1725. RENAME(ff_mpa_synth_window), &s->dither_state,
  1726. samples_ptr, s->nb_channels,
  1727. s->sb_samples[ch][i]);
  1728. samples_ptr += 32 * s->nb_channels;
  1729. }
  1730. }
  1731. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  1732. }
  1733. static int decode_frame(AVCodecContext * avctx,
  1734. void *data, int *data_size,
  1735. AVPacket *avpkt)
  1736. {
  1737. const uint8_t *buf = avpkt->data;
  1738. int buf_size = avpkt->size;
  1739. MPADecodeContext *s = avctx->priv_data;
  1740. uint32_t header;
  1741. int out_size;
  1742. OUT_INT *out_samples = data;
  1743. if(buf_size < HEADER_SIZE)
  1744. return -1;
  1745. header = AV_RB32(buf);
  1746. if(ff_mpa_check_header(header) < 0){
  1747. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  1748. return -1;
  1749. }
  1750. if (ff_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) {
  1751. /* free format: prepare to compute frame size */
  1752. s->frame_size = -1;
  1753. return -1;
  1754. }
  1755. /* update codec info */
  1756. avctx->channels = s->nb_channels;
  1757. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1758. if (!avctx->bit_rate)
  1759. avctx->bit_rate = s->bit_rate;
  1760. avctx->sub_id = s->layer;
  1761. if(*data_size < 1152*avctx->channels*sizeof(OUT_INT))
  1762. return -1;
  1763. *data_size = 0;
  1764. if(s->frame_size<=0 || s->frame_size > buf_size){
  1765. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  1766. return -1;
  1767. }else if(s->frame_size < buf_size){
  1768. av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n");
  1769. buf_size= s->frame_size;
  1770. }
  1771. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  1772. if(out_size>=0){
  1773. *data_size = out_size;
  1774. avctx->sample_rate = s->sample_rate;
  1775. //FIXME maybe move the other codec info stuff from above here too
  1776. }else
  1777. av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed
  1778. s->frame_size = 0;
  1779. return buf_size;
  1780. }
  1781. static void flush(AVCodecContext *avctx){
  1782. MPADecodeContext *s = avctx->priv_data;
  1783. memset(s->synth_buf, 0, sizeof(s->synth_buf));
  1784. s->last_buf_size= 0;
  1785. }
  1786. #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
  1787. static int decode_frame_adu(AVCodecContext * avctx,
  1788. void *data, int *data_size,
  1789. AVPacket *avpkt)
  1790. {
  1791. const uint8_t *buf = avpkt->data;
  1792. int buf_size = avpkt->size;
  1793. MPADecodeContext *s = avctx->priv_data;
  1794. uint32_t header;
  1795. int len, out_size;
  1796. OUT_INT *out_samples = data;
  1797. len = buf_size;
  1798. // Discard too short frames
  1799. if (buf_size < HEADER_SIZE) {
  1800. *data_size = 0;
  1801. return buf_size;
  1802. }
  1803. if (len > MPA_MAX_CODED_FRAME_SIZE)
  1804. len = MPA_MAX_CODED_FRAME_SIZE;
  1805. // Get header and restore sync word
  1806. header = AV_RB32(buf) | 0xffe00000;
  1807. if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
  1808. *data_size = 0;
  1809. return buf_size;
  1810. }
  1811. ff_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1812. /* update codec info */
  1813. avctx->sample_rate = s->sample_rate;
  1814. avctx->channels = s->nb_channels;
  1815. if (!avctx->bit_rate)
  1816. avctx->bit_rate = s->bit_rate;
  1817. avctx->sub_id = s->layer;
  1818. s->frame_size = len;
  1819. if (avctx->parse_only) {
  1820. out_size = buf_size;
  1821. } else {
  1822. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  1823. }
  1824. *data_size = out_size;
  1825. return buf_size;
  1826. }
  1827. #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
  1828. #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
  1829. /**
  1830. * Context for MP3On4 decoder
  1831. */
  1832. typedef struct MP3On4DecodeContext {
  1833. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  1834. int syncword; ///< syncword patch
  1835. const uint8_t *coff; ///< channels offsets in output buffer
  1836. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  1837. } MP3On4DecodeContext;
  1838. #include "mpeg4audio.h"
  1839. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  1840. static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */
  1841. /* offsets into output buffer, assume output order is FL FR BL BR C LFE */
  1842. static const uint8_t chan_offset[8][5] = {
  1843. {0},
  1844. {0}, // C
  1845. {0}, // FLR
  1846. {2,0}, // C FLR
  1847. {2,0,3}, // C FLR BS
  1848. {4,0,2}, // C FLR BLRS
  1849. {4,0,2,5}, // C FLR BLRS LFE
  1850. {4,0,2,6,5}, // C FLR BLRS BLR LFE
  1851. };
  1852. static int decode_init_mp3on4(AVCodecContext * avctx)
  1853. {
  1854. MP3On4DecodeContext *s = avctx->priv_data;
  1855. MPEG4AudioConfig cfg;
  1856. int i;
  1857. if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
  1858. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  1859. return -1;
  1860. }
  1861. ff_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size);
  1862. if (!cfg.chan_config || cfg.chan_config > 7) {
  1863. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  1864. return -1;
  1865. }
  1866. s->frames = mp3Frames[cfg.chan_config];
  1867. s->coff = chan_offset[cfg.chan_config];
  1868. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  1869. if (cfg.sample_rate < 16000)
  1870. s->syncword = 0xffe00000;
  1871. else
  1872. s->syncword = 0xfff00000;
  1873. /* Init the first mp3 decoder in standard way, so that all tables get builded
  1874. * We replace avctx->priv_data with the context of the first decoder so that
  1875. * decode_init() does not have to be changed.
  1876. * Other decoders will be initialized here copying data from the first context
  1877. */
  1878. // Allocate zeroed memory for the first decoder context
  1879. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  1880. // Put decoder context in place to make init_decode() happy
  1881. avctx->priv_data = s->mp3decctx[0];
  1882. decode_init(avctx);
  1883. // Restore mp3on4 context pointer
  1884. avctx->priv_data = s;
  1885. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  1886. /* Create a separate codec/context for each frame (first is already ok).
  1887. * Each frame is 1 or 2 channels - up to 5 frames allowed
  1888. */
  1889. for (i = 1; i < s->frames; i++) {
  1890. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  1891. s->mp3decctx[i]->adu_mode = 1;
  1892. s->mp3decctx[i]->avctx = avctx;
  1893. }
  1894. return 0;
  1895. }
  1896. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  1897. {
  1898. MP3On4DecodeContext *s = avctx->priv_data;
  1899. int i;
  1900. for (i = 0; i < s->frames; i++)
  1901. av_free(s->mp3decctx[i]);
  1902. return 0;
  1903. }
  1904. static int decode_frame_mp3on4(AVCodecContext * avctx,
  1905. void *data, int *data_size,
  1906. AVPacket *avpkt)
  1907. {
  1908. const uint8_t *buf = avpkt->data;
  1909. int buf_size = avpkt->size;
  1910. MP3On4DecodeContext *s = avctx->priv_data;
  1911. MPADecodeContext *m;
  1912. int fsize, len = buf_size, out_size = 0;
  1913. uint32_t header;
  1914. OUT_INT *out_samples = data;
  1915. OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS];
  1916. OUT_INT *outptr, *bp;
  1917. int fr, j, n;
  1918. if(*data_size < MPA_FRAME_SIZE * MPA_MAX_CHANNELS * s->frames * sizeof(OUT_INT))
  1919. return -1;
  1920. *data_size = 0;
  1921. // Discard too short frames
  1922. if (buf_size < HEADER_SIZE)
  1923. return -1;
  1924. // If only one decoder interleave is not needed
  1925. outptr = s->frames == 1 ? out_samples : decoded_buf;
  1926. avctx->bit_rate = 0;
  1927. for (fr = 0; fr < s->frames; fr++) {
  1928. fsize = AV_RB16(buf) >> 4;
  1929. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  1930. m = s->mp3decctx[fr];
  1931. assert (m != NULL);
  1932. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  1933. if (ff_mpa_check_header(header) < 0) // Bad header, discard block
  1934. break;
  1935. ff_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  1936. out_size += mp_decode_frame(m, outptr, buf, fsize);
  1937. buf += fsize;
  1938. len -= fsize;
  1939. if(s->frames > 1) {
  1940. n = m->avctx->frame_size*m->nb_channels;
  1941. /* interleave output data */
  1942. bp = out_samples + s->coff[fr];
  1943. if(m->nb_channels == 1) {
  1944. for(j = 0; j < n; j++) {
  1945. *bp = decoded_buf[j];
  1946. bp += avctx->channels;
  1947. }
  1948. } else {
  1949. for(j = 0; j < n; j++) {
  1950. bp[0] = decoded_buf[j++];
  1951. bp[1] = decoded_buf[j];
  1952. bp += avctx->channels;
  1953. }
  1954. }
  1955. }
  1956. avctx->bit_rate += m->bit_rate;
  1957. }
  1958. /* update codec info */
  1959. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  1960. *data_size = out_size;
  1961. return buf_size;
  1962. }
  1963. #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */
  1964. #if !CONFIG_FLOAT
  1965. #if CONFIG_MP1_DECODER
  1966. AVCodec ff_mp1_decoder =
  1967. {
  1968. "mp1",
  1969. AVMEDIA_TYPE_AUDIO,
  1970. CODEC_ID_MP1,
  1971. sizeof(MPADecodeContext),
  1972. decode_init,
  1973. NULL,
  1974. NULL,
  1975. decode_frame,
  1976. CODEC_CAP_PARSE_ONLY,
  1977. .flush= flush,
  1978. .long_name= NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
  1979. };
  1980. #endif
  1981. #if CONFIG_MP2_DECODER
  1982. AVCodec ff_mp2_decoder =
  1983. {
  1984. "mp2",
  1985. AVMEDIA_TYPE_AUDIO,
  1986. CODEC_ID_MP2,
  1987. sizeof(MPADecodeContext),
  1988. decode_init,
  1989. NULL,
  1990. NULL,
  1991. decode_frame,
  1992. CODEC_CAP_PARSE_ONLY,
  1993. .flush= flush,
  1994. .long_name= NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
  1995. };
  1996. #endif
  1997. #if CONFIG_MP3_DECODER
  1998. AVCodec ff_mp3_decoder =
  1999. {
  2000. "mp3",
  2001. AVMEDIA_TYPE_AUDIO,
  2002. CODEC_ID_MP3,
  2003. sizeof(MPADecodeContext),
  2004. decode_init,
  2005. NULL,
  2006. NULL,
  2007. decode_frame,
  2008. CODEC_CAP_PARSE_ONLY,
  2009. .flush= flush,
  2010. .long_name= NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
  2011. };
  2012. #endif
  2013. #if CONFIG_MP3ADU_DECODER
  2014. AVCodec ff_mp3adu_decoder =
  2015. {
  2016. "mp3adu",
  2017. AVMEDIA_TYPE_AUDIO,
  2018. CODEC_ID_MP3ADU,
  2019. sizeof(MPADecodeContext),
  2020. decode_init,
  2021. NULL,
  2022. NULL,
  2023. decode_frame_adu,
  2024. CODEC_CAP_PARSE_ONLY,
  2025. .flush= flush,
  2026. .long_name= NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
  2027. };
  2028. #endif
  2029. #if CONFIG_MP3ON4_DECODER
  2030. AVCodec ff_mp3on4_decoder =
  2031. {
  2032. "mp3on4",
  2033. AVMEDIA_TYPE_AUDIO,
  2034. CODEC_ID_MP3ON4,
  2035. sizeof(MP3On4DecodeContext),
  2036. decode_init_mp3on4,
  2037. NULL,
  2038. decode_close_mp3on4,
  2039. decode_frame_mp3on4,
  2040. .flush= flush,
  2041. .long_name= NULL_IF_CONFIG_SMALL("MP3onMP4"),
  2042. };
  2043. #endif
  2044. #endif