You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2654 lines
79KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder.
  24. */
  25. #include "avcodec.h"
  26. #include "get_bits.h"
  27. #include "dsputil.h"
  28. /*
  29. * TODO:
  30. * - in low precision mode, use more 16 bit multiplies in synth filter
  31. * - test lsf / mpeg25 extensively.
  32. */
  33. #include "mpegaudio.h"
  34. #include "mpegaudiodecheader.h"
  35. #include "mathops.h"
  36. #if CONFIG_FLOAT
  37. # define SHR(a,b) ((a)*(1.0f/(1<<(b))))
  38. # define compute_antialias compute_antialias_float
  39. # define FIXR_OLD(a) ((int)((a) * FRAC_ONE + 0.5))
  40. # define FIXR(x) ((float)(x))
  41. # define FIXHR(x) ((float)(x))
  42. # define MULH3(x, y, s) ((s)*(y)*(x))
  43. # define MULLx(x, y, s) ((y)*(x))
  44. # define RENAME(a) a ## _float
  45. #else
  46. # define SHR(a,b) ((a)>>(b))
  47. # define compute_antialias compute_antialias_integer
  48. /* WARNING: only correct for posititive numbers */
  49. # define FIXR_OLD(a) ((int)((a) * FRAC_ONE + 0.5))
  50. # define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
  51. # define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
  52. # define MULH3(x, y, s) MULH((s)*(x), y)
  53. # define MULLx(x, y, s) MULL(x,y,s)
  54. # define RENAME(a) a
  55. #endif
  56. /****************/
  57. #define HEADER_SIZE 4
  58. #include "mpegaudiodata.h"
  59. #include "mpegaudiodectab.h"
  60. static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g);
  61. static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g);
  62. static void apply_window_mp3_c(MPA_INT *synth_buf, MPA_INT *window,
  63. int *dither_state, OUT_INT *samples, int incr);
  64. /* vlc structure for decoding layer 3 huffman tables */
  65. static VLC huff_vlc[16];
  66. static VLC_TYPE huff_vlc_tables[
  67. 0+128+128+128+130+128+154+166+
  68. 142+204+190+170+542+460+662+414
  69. ][2];
  70. static const int huff_vlc_tables_sizes[16] = {
  71. 0, 128, 128, 128, 130, 128, 154, 166,
  72. 142, 204, 190, 170, 542, 460, 662, 414
  73. };
  74. static VLC huff_quad_vlc[2];
  75. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  76. static const int huff_quad_vlc_tables_sizes[2] = {
  77. 128, 16
  78. };
  79. /* computed from band_size_long */
  80. static uint16_t band_index_long[9][23];
  81. #include "mpegaudio_tablegen.h"
  82. /* intensity stereo coef table */
  83. static INTFLOAT is_table[2][16];
  84. static INTFLOAT is_table_lsf[2][2][16];
  85. static int32_t csa_table[8][4];
  86. static float csa_table_float[8][4];
  87. static INTFLOAT mdct_win[8][36];
  88. static int16_t division_tab3[1<<6 ];
  89. static int16_t division_tab5[1<<8 ];
  90. static int16_t division_tab9[1<<11];
  91. static int16_t * const division_tabs[4] = {
  92. division_tab3, division_tab5, NULL, division_tab9
  93. };
  94. /* lower 2 bits: modulo 3, higher bits: shift */
  95. static uint16_t scale_factor_modshift[64];
  96. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  97. static int32_t scale_factor_mult[15][3];
  98. /* mult table for layer 2 group quantization */
  99. #define SCALE_GEN(v) \
  100. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  101. static const int32_t scale_factor_mult2[3][3] = {
  102. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  103. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  104. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  105. };
  106. DECLARE_ALIGNED(16, MPA_INT, RENAME(ff_mpa_synth_window))[512+256];
  107. /**
  108. * Convert region offsets to region sizes and truncate
  109. * size to big_values.
  110. */
  111. static void ff_region_offset2size(GranuleDef *g){
  112. int i, k, j=0;
  113. g->region_size[2] = (576 / 2);
  114. for(i=0;i<3;i++) {
  115. k = FFMIN(g->region_size[i], g->big_values);
  116. g->region_size[i] = k - j;
  117. j = k;
  118. }
  119. }
  120. static void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){
  121. if (g->block_type == 2)
  122. g->region_size[0] = (36 / 2);
  123. else {
  124. if (s->sample_rate_index <= 2)
  125. g->region_size[0] = (36 / 2);
  126. else if (s->sample_rate_index != 8)
  127. g->region_size[0] = (54 / 2);
  128. else
  129. g->region_size[0] = (108 / 2);
  130. }
  131. g->region_size[1] = (576 / 2);
  132. }
  133. static void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){
  134. int l;
  135. g->region_size[0] =
  136. band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  137. /* should not overflow */
  138. l = FFMIN(ra1 + ra2 + 2, 22);
  139. g->region_size[1] =
  140. band_index_long[s->sample_rate_index][l] >> 1;
  141. }
  142. static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
  143. if (g->block_type == 2) {
  144. if (g->switch_point) {
  145. /* if switched mode, we handle the 36 first samples as
  146. long blocks. For 8000Hz, we handle the 48 first
  147. exponents as long blocks (XXX: check this!) */
  148. if (s->sample_rate_index <= 2)
  149. g->long_end = 8;
  150. else if (s->sample_rate_index != 8)
  151. g->long_end = 6;
  152. else
  153. g->long_end = 4; /* 8000 Hz */
  154. g->short_start = 2 + (s->sample_rate_index != 8);
  155. } else {
  156. g->long_end = 0;
  157. g->short_start = 0;
  158. }
  159. } else {
  160. g->short_start = 13;
  161. g->long_end = 22;
  162. }
  163. }
  164. /* layer 1 unscaling */
  165. /* n = number of bits of the mantissa minus 1 */
  166. static inline int l1_unscale(int n, int mant, int scale_factor)
  167. {
  168. int shift, mod;
  169. int64_t val;
  170. shift = scale_factor_modshift[scale_factor];
  171. mod = shift & 3;
  172. shift >>= 2;
  173. val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
  174. shift += n;
  175. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  176. return (int)((val + (1LL << (shift - 1))) >> shift);
  177. }
  178. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  179. {
  180. int shift, mod, val;
  181. shift = scale_factor_modshift[scale_factor];
  182. mod = shift & 3;
  183. shift >>= 2;
  184. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  185. /* NOTE: at this point, 0 <= shift <= 21 */
  186. if (shift > 0)
  187. val = (val + (1 << (shift - 1))) >> shift;
  188. return val;
  189. }
  190. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  191. static inline int l3_unscale(int value, int exponent)
  192. {
  193. unsigned int m;
  194. int e;
  195. e = table_4_3_exp [4*value + (exponent&3)];
  196. m = table_4_3_value[4*value + (exponent&3)];
  197. e -= (exponent >> 2);
  198. assert(e>=1);
  199. if (e > 31)
  200. return 0;
  201. m = (m + (1 << (e-1))) >> e;
  202. return m;
  203. }
  204. /* all integer n^(4/3) computation code */
  205. #define DEV_ORDER 13
  206. #define POW_FRAC_BITS 24
  207. #define POW_FRAC_ONE (1 << POW_FRAC_BITS)
  208. #define POW_FIX(a) ((int)((a) * POW_FRAC_ONE))
  209. #define POW_MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> POW_FRAC_BITS)
  210. static int dev_4_3_coefs[DEV_ORDER];
  211. #if 0 /* unused */
  212. static int pow_mult3[3] = {
  213. POW_FIX(1.0),
  214. POW_FIX(1.25992104989487316476),
  215. POW_FIX(1.58740105196819947474),
  216. };
  217. #endif
  218. static av_cold void int_pow_init(void)
  219. {
  220. int i, a;
  221. a = POW_FIX(1.0);
  222. for(i=0;i<DEV_ORDER;i++) {
  223. a = POW_MULL(a, POW_FIX(4.0 / 3.0) - i * POW_FIX(1.0)) / (i + 1);
  224. dev_4_3_coefs[i] = a;
  225. }
  226. }
  227. #if 0 /* unused, remove? */
  228. /* return the mantissa and the binary exponent */
  229. static int int_pow(int i, int *exp_ptr)
  230. {
  231. int e, er, eq, j;
  232. int a, a1;
  233. /* renormalize */
  234. a = i;
  235. e = POW_FRAC_BITS;
  236. while (a < (1 << (POW_FRAC_BITS - 1))) {
  237. a = a << 1;
  238. e--;
  239. }
  240. a -= (1 << POW_FRAC_BITS);
  241. a1 = 0;
  242. for(j = DEV_ORDER - 1; j >= 0; j--)
  243. a1 = POW_MULL(a, dev_4_3_coefs[j] + a1);
  244. a = (1 << POW_FRAC_BITS) + a1;
  245. /* exponent compute (exact) */
  246. e = e * 4;
  247. er = e % 3;
  248. eq = e / 3;
  249. a = POW_MULL(a, pow_mult3[er]);
  250. while (a >= 2 * POW_FRAC_ONE) {
  251. a = a >> 1;
  252. eq++;
  253. }
  254. /* convert to float */
  255. while (a < POW_FRAC_ONE) {
  256. a = a << 1;
  257. eq--;
  258. }
  259. /* now POW_FRAC_ONE <= a < 2 * POW_FRAC_ONE */
  260. #if POW_FRAC_BITS > FRAC_BITS
  261. a = (a + (1 << (POW_FRAC_BITS - FRAC_BITS - 1))) >> (POW_FRAC_BITS - FRAC_BITS);
  262. /* correct overflow */
  263. if (a >= 2 * (1 << FRAC_BITS)) {
  264. a = a >> 1;
  265. eq++;
  266. }
  267. #endif
  268. *exp_ptr = eq;
  269. return a;
  270. }
  271. #endif
  272. static av_cold int decode_init(AVCodecContext * avctx)
  273. {
  274. MPADecodeContext *s = avctx->priv_data;
  275. static int init=0;
  276. int i, j, k;
  277. s->avctx = avctx;
  278. s->apply_window_mp3 = apply_window_mp3_c;
  279. #if HAVE_MMX
  280. ff_mpegaudiodec_init_mmx(s);
  281. #endif
  282. avctx->sample_fmt= OUT_FMT;
  283. s->error_recognition= avctx->error_recognition;
  284. if (!init && !avctx->parse_only) {
  285. int offset;
  286. /* scale factors table for layer 1/2 */
  287. for(i=0;i<64;i++) {
  288. int shift, mod;
  289. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  290. shift = (i / 3);
  291. mod = i % 3;
  292. scale_factor_modshift[i] = mod | (shift << 2);
  293. }
  294. /* scale factor multiply for layer 1 */
  295. for(i=0;i<15;i++) {
  296. int n, norm;
  297. n = i + 2;
  298. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  299. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  300. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  301. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  302. dprintf(avctx, "%d: norm=%x s=%x %x %x\n",
  303. i, norm,
  304. scale_factor_mult[i][0],
  305. scale_factor_mult[i][1],
  306. scale_factor_mult[i][2]);
  307. }
  308. RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window));
  309. /* huffman decode tables */
  310. offset = 0;
  311. for(i=1;i<16;i++) {
  312. const HuffTable *h = &mpa_huff_tables[i];
  313. int xsize, x, y;
  314. uint8_t tmp_bits [512];
  315. uint16_t tmp_codes[512];
  316. memset(tmp_bits , 0, sizeof(tmp_bits ));
  317. memset(tmp_codes, 0, sizeof(tmp_codes));
  318. xsize = h->xsize;
  319. j = 0;
  320. for(x=0;x<xsize;x++) {
  321. for(y=0;y<xsize;y++){
  322. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  323. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  324. }
  325. }
  326. /* XXX: fail test */
  327. huff_vlc[i].table = huff_vlc_tables+offset;
  328. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  329. init_vlc(&huff_vlc[i], 7, 512,
  330. tmp_bits, 1, 1, tmp_codes, 2, 2,
  331. INIT_VLC_USE_NEW_STATIC);
  332. offset += huff_vlc_tables_sizes[i];
  333. }
  334. assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  335. offset = 0;
  336. for(i=0;i<2;i++) {
  337. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  338. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  339. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  340. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  341. INIT_VLC_USE_NEW_STATIC);
  342. offset += huff_quad_vlc_tables_sizes[i];
  343. }
  344. assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  345. for(i=0;i<9;i++) {
  346. k = 0;
  347. for(j=0;j<22;j++) {
  348. band_index_long[i][j] = k;
  349. k += band_size_long[i][j];
  350. }
  351. band_index_long[i][22] = k;
  352. }
  353. /* compute n ^ (4/3) and store it in mantissa/exp format */
  354. int_pow_init();
  355. mpegaudio_tableinit();
  356. for (i = 0; i < 4; i++)
  357. if (ff_mpa_quant_bits[i] < 0)
  358. for (j = 0; j < (1<<(-ff_mpa_quant_bits[i]+1)); j++) {
  359. int val1, val2, val3, steps;
  360. int val = j;
  361. steps = ff_mpa_quant_steps[i];
  362. val1 = val % steps;
  363. val /= steps;
  364. val2 = val % steps;
  365. val3 = val / steps;
  366. division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
  367. }
  368. for(i=0;i<7;i++) {
  369. float f;
  370. INTFLOAT v;
  371. if (i != 6) {
  372. f = tan((double)i * M_PI / 12.0);
  373. v = FIXR(f / (1.0 + f));
  374. } else {
  375. v = FIXR(1.0);
  376. }
  377. is_table[0][i] = v;
  378. is_table[1][6 - i] = v;
  379. }
  380. /* invalid values */
  381. for(i=7;i<16;i++)
  382. is_table[0][i] = is_table[1][i] = 0.0;
  383. for(i=0;i<16;i++) {
  384. double f;
  385. int e, k;
  386. for(j=0;j<2;j++) {
  387. e = -(j + 1) * ((i + 1) >> 1);
  388. f = pow(2.0, e / 4.0);
  389. k = i & 1;
  390. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  391. is_table_lsf[j][k][i] = FIXR(1.0);
  392. dprintf(avctx, "is_table_lsf %d %d: %x %x\n",
  393. i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]);
  394. }
  395. }
  396. for(i=0;i<8;i++) {
  397. float ci, cs, ca;
  398. ci = ci_table[i];
  399. cs = 1.0 / sqrt(1.0 + ci * ci);
  400. ca = cs * ci;
  401. csa_table[i][0] = FIXHR(cs/4);
  402. csa_table[i][1] = FIXHR(ca/4);
  403. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  404. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  405. csa_table_float[i][0] = cs;
  406. csa_table_float[i][1] = ca;
  407. csa_table_float[i][2] = ca + cs;
  408. csa_table_float[i][3] = ca - cs;
  409. }
  410. /* compute mdct windows */
  411. for(i=0;i<36;i++) {
  412. for(j=0; j<4; j++){
  413. double d;
  414. if(j==2 && i%3 != 1)
  415. continue;
  416. d= sin(M_PI * (i + 0.5) / 36.0);
  417. if(j==1){
  418. if (i>=30) d= 0;
  419. else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0);
  420. else if(i>=18) d= 1;
  421. }else if(j==3){
  422. if (i< 6) d= 0;
  423. else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0);
  424. else if(i< 18) d= 1;
  425. }
  426. //merge last stage of imdct into the window coefficients
  427. d*= 0.5 / cos(M_PI*(2*i + 19)/72);
  428. if(j==2)
  429. mdct_win[j][i/3] = FIXHR((d / (1<<5)));
  430. else
  431. mdct_win[j][i ] = FIXHR((d / (1<<5)));
  432. }
  433. }
  434. /* NOTE: we do frequency inversion adter the MDCT by changing
  435. the sign of the right window coefs */
  436. for(j=0;j<4;j++) {
  437. for(i=0;i<36;i+=2) {
  438. mdct_win[j + 4][i] = mdct_win[j][i];
  439. mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1];
  440. }
  441. }
  442. init = 1;
  443. }
  444. if (avctx->codec_id == CODEC_ID_MP3ADU)
  445. s->adu_mode = 1;
  446. return 0;
  447. }
  448. /* tab[i][j] = 1.0 / (2.0 * cos(pi*(2*k+1) / 2^(6 - j))) */
  449. /* cos(i*pi/64) */
  450. #define COS0_0 FIXHR(0.50060299823519630134/2)
  451. #define COS0_1 FIXHR(0.50547095989754365998/2)
  452. #define COS0_2 FIXHR(0.51544730992262454697/2)
  453. #define COS0_3 FIXHR(0.53104259108978417447/2)
  454. #define COS0_4 FIXHR(0.55310389603444452782/2)
  455. #define COS0_5 FIXHR(0.58293496820613387367/2)
  456. #define COS0_6 FIXHR(0.62250412303566481615/2)
  457. #define COS0_7 FIXHR(0.67480834145500574602/2)
  458. #define COS0_8 FIXHR(0.74453627100229844977/2)
  459. #define COS0_9 FIXHR(0.83934964541552703873/2)
  460. #define COS0_10 FIXHR(0.97256823786196069369/2)
  461. #define COS0_11 FIXHR(1.16943993343288495515/4)
  462. #define COS0_12 FIXHR(1.48416461631416627724/4)
  463. #define COS0_13 FIXHR(2.05778100995341155085/8)
  464. #define COS0_14 FIXHR(3.40760841846871878570/8)
  465. #define COS0_15 FIXHR(10.19000812354805681150/32)
  466. #define COS1_0 FIXHR(0.50241928618815570551/2)
  467. #define COS1_1 FIXHR(0.52249861493968888062/2)
  468. #define COS1_2 FIXHR(0.56694403481635770368/2)
  469. #define COS1_3 FIXHR(0.64682178335999012954/2)
  470. #define COS1_4 FIXHR(0.78815462345125022473/2)
  471. #define COS1_5 FIXHR(1.06067768599034747134/4)
  472. #define COS1_6 FIXHR(1.72244709823833392782/4)
  473. #define COS1_7 FIXHR(5.10114861868916385802/16)
  474. #define COS2_0 FIXHR(0.50979557910415916894/2)
  475. #define COS2_1 FIXHR(0.60134488693504528054/2)
  476. #define COS2_2 FIXHR(0.89997622313641570463/2)
  477. #define COS2_3 FIXHR(2.56291544774150617881/8)
  478. #define COS3_0 FIXHR(0.54119610014619698439/2)
  479. #define COS3_1 FIXHR(1.30656296487637652785/4)
  480. #define COS4_0 FIXHR(0.70710678118654752439/2)
  481. /* butterfly operator */
  482. #define BF(a, b, c, s)\
  483. {\
  484. tmp0 = val##a + val##b;\
  485. tmp1 = val##a - val##b;\
  486. val##a = tmp0;\
  487. val##b = MULH3(tmp1, c, 1<<(s));\
  488. }
  489. #define BF0(a, b, c, s)\
  490. {\
  491. tmp0 = tab[a] + tab[b];\
  492. tmp1 = tab[a] - tab[b];\
  493. val##a = tmp0;\
  494. val##b = MULH3(tmp1, c, 1<<(s));\
  495. }
  496. #define BF1(a, b, c, d)\
  497. {\
  498. BF(a, b, COS4_0, 1);\
  499. BF(c, d,-COS4_0, 1);\
  500. val##c += val##d;\
  501. }
  502. #define BF2(a, b, c, d)\
  503. {\
  504. BF(a, b, COS4_0, 1);\
  505. BF(c, d,-COS4_0, 1);\
  506. val##c += val##d;\
  507. val##a += val##c;\
  508. val##c += val##b;\
  509. val##b += val##d;\
  510. }
  511. #define ADD(a, b) val##a += val##b
  512. /* DCT32 without 1/sqrt(2) coef zero scaling. */
  513. static void dct32(INTFLOAT *out, const INTFLOAT *tab)
  514. {
  515. INTFLOAT tmp0, tmp1;
  516. INTFLOAT val0 , val1 , val2 , val3 , val4 , val5 , val6 , val7 ,
  517. val8 , val9 , val10, val11, val12, val13, val14, val15,
  518. val16, val17, val18, val19, val20, val21, val22, val23,
  519. val24, val25, val26, val27, val28, val29, val30, val31;
  520. /* pass 1 */
  521. BF0( 0, 31, COS0_0 , 1);
  522. BF0(15, 16, COS0_15, 5);
  523. /* pass 2 */
  524. BF( 0, 15, COS1_0 , 1);
  525. BF(16, 31,-COS1_0 , 1);
  526. /* pass 1 */
  527. BF0( 7, 24, COS0_7 , 1);
  528. BF0( 8, 23, COS0_8 , 1);
  529. /* pass 2 */
  530. BF( 7, 8, COS1_7 , 4);
  531. BF(23, 24,-COS1_7 , 4);
  532. /* pass 3 */
  533. BF( 0, 7, COS2_0 , 1);
  534. BF( 8, 15,-COS2_0 , 1);
  535. BF(16, 23, COS2_0 , 1);
  536. BF(24, 31,-COS2_0 , 1);
  537. /* pass 1 */
  538. BF0( 3, 28, COS0_3 , 1);
  539. BF0(12, 19, COS0_12, 2);
  540. /* pass 2 */
  541. BF( 3, 12, COS1_3 , 1);
  542. BF(19, 28,-COS1_3 , 1);
  543. /* pass 1 */
  544. BF0( 4, 27, COS0_4 , 1);
  545. BF0(11, 20, COS0_11, 2);
  546. /* pass 2 */
  547. BF( 4, 11, COS1_4 , 1);
  548. BF(20, 27,-COS1_4 , 1);
  549. /* pass 3 */
  550. BF( 3, 4, COS2_3 , 3);
  551. BF(11, 12,-COS2_3 , 3);
  552. BF(19, 20, COS2_3 , 3);
  553. BF(27, 28,-COS2_3 , 3);
  554. /* pass 4 */
  555. BF( 0, 3, COS3_0 , 1);
  556. BF( 4, 7,-COS3_0 , 1);
  557. BF( 8, 11, COS3_0 , 1);
  558. BF(12, 15,-COS3_0 , 1);
  559. BF(16, 19, COS3_0 , 1);
  560. BF(20, 23,-COS3_0 , 1);
  561. BF(24, 27, COS3_0 , 1);
  562. BF(28, 31,-COS3_0 , 1);
  563. /* pass 1 */
  564. BF0( 1, 30, COS0_1 , 1);
  565. BF0(14, 17, COS0_14, 3);
  566. /* pass 2 */
  567. BF( 1, 14, COS1_1 , 1);
  568. BF(17, 30,-COS1_1 , 1);
  569. /* pass 1 */
  570. BF0( 6, 25, COS0_6 , 1);
  571. BF0( 9, 22, COS0_9 , 1);
  572. /* pass 2 */
  573. BF( 6, 9, COS1_6 , 2);
  574. BF(22, 25,-COS1_6 , 2);
  575. /* pass 3 */
  576. BF( 1, 6, COS2_1 , 1);
  577. BF( 9, 14,-COS2_1 , 1);
  578. BF(17, 22, COS2_1 , 1);
  579. BF(25, 30,-COS2_1 , 1);
  580. /* pass 1 */
  581. BF0( 2, 29, COS0_2 , 1);
  582. BF0(13, 18, COS0_13, 3);
  583. /* pass 2 */
  584. BF( 2, 13, COS1_2 , 1);
  585. BF(18, 29,-COS1_2 , 1);
  586. /* pass 1 */
  587. BF0( 5, 26, COS0_5 , 1);
  588. BF0(10, 21, COS0_10, 1);
  589. /* pass 2 */
  590. BF( 5, 10, COS1_5 , 2);
  591. BF(21, 26,-COS1_5 , 2);
  592. /* pass 3 */
  593. BF( 2, 5, COS2_2 , 1);
  594. BF(10, 13,-COS2_2 , 1);
  595. BF(18, 21, COS2_2 , 1);
  596. BF(26, 29,-COS2_2 , 1);
  597. /* pass 4 */
  598. BF( 1, 2, COS3_1 , 2);
  599. BF( 5, 6,-COS3_1 , 2);
  600. BF( 9, 10, COS3_1 , 2);
  601. BF(13, 14,-COS3_1 , 2);
  602. BF(17, 18, COS3_1 , 2);
  603. BF(21, 22,-COS3_1 , 2);
  604. BF(25, 26, COS3_1 , 2);
  605. BF(29, 30,-COS3_1 , 2);
  606. /* pass 5 */
  607. BF1( 0, 1, 2, 3);
  608. BF2( 4, 5, 6, 7);
  609. BF1( 8, 9, 10, 11);
  610. BF2(12, 13, 14, 15);
  611. BF1(16, 17, 18, 19);
  612. BF2(20, 21, 22, 23);
  613. BF1(24, 25, 26, 27);
  614. BF2(28, 29, 30, 31);
  615. /* pass 6 */
  616. ADD( 8, 12);
  617. ADD(12, 10);
  618. ADD(10, 14);
  619. ADD(14, 9);
  620. ADD( 9, 13);
  621. ADD(13, 11);
  622. ADD(11, 15);
  623. out[ 0] = val0;
  624. out[16] = val1;
  625. out[ 8] = val2;
  626. out[24] = val3;
  627. out[ 4] = val4;
  628. out[20] = val5;
  629. out[12] = val6;
  630. out[28] = val7;
  631. out[ 2] = val8;
  632. out[18] = val9;
  633. out[10] = val10;
  634. out[26] = val11;
  635. out[ 6] = val12;
  636. out[22] = val13;
  637. out[14] = val14;
  638. out[30] = val15;
  639. ADD(24, 28);
  640. ADD(28, 26);
  641. ADD(26, 30);
  642. ADD(30, 25);
  643. ADD(25, 29);
  644. ADD(29, 27);
  645. ADD(27, 31);
  646. out[ 1] = val16 + val24;
  647. out[17] = val17 + val25;
  648. out[ 9] = val18 + val26;
  649. out[25] = val19 + val27;
  650. out[ 5] = val20 + val28;
  651. out[21] = val21 + val29;
  652. out[13] = val22 + val30;
  653. out[29] = val23 + val31;
  654. out[ 3] = val24 + val20;
  655. out[19] = val25 + val21;
  656. out[11] = val26 + val22;
  657. out[27] = val27 + val23;
  658. out[ 7] = val28 + val18;
  659. out[23] = val29 + val19;
  660. out[15] = val30 + val17;
  661. out[31] = val31;
  662. }
  663. #if CONFIG_FLOAT
  664. static inline float round_sample(float *sum)
  665. {
  666. float sum1=*sum;
  667. *sum = 0;
  668. return sum1;
  669. }
  670. /* signed 16x16 -> 32 multiply add accumulate */
  671. #define MACS(rt, ra, rb) rt+=(ra)*(rb)
  672. /* signed 16x16 -> 32 multiply */
  673. #define MULS(ra, rb) ((ra)*(rb))
  674. #define MLSS(rt, ra, rb) rt-=(ra)*(rb)
  675. #elif FRAC_BITS <= 15
  676. static inline int round_sample(int *sum)
  677. {
  678. int sum1;
  679. sum1 = (*sum) >> OUT_SHIFT;
  680. *sum &= (1<<OUT_SHIFT)-1;
  681. return av_clip(sum1, OUT_MIN, OUT_MAX);
  682. }
  683. /* signed 16x16 -> 32 multiply add accumulate */
  684. #define MACS(rt, ra, rb) MAC16(rt, ra, rb)
  685. /* signed 16x16 -> 32 multiply */
  686. #define MULS(ra, rb) MUL16(ra, rb)
  687. #define MLSS(rt, ra, rb) MLS16(rt, ra, rb)
  688. #else
  689. static inline int round_sample(int64_t *sum)
  690. {
  691. int sum1;
  692. sum1 = (int)((*sum) >> OUT_SHIFT);
  693. *sum &= (1<<OUT_SHIFT)-1;
  694. return av_clip(sum1, OUT_MIN, OUT_MAX);
  695. }
  696. # define MULS(ra, rb) MUL64(ra, rb)
  697. # define MACS(rt, ra, rb) MAC64(rt, ra, rb)
  698. # define MLSS(rt, ra, rb) MLS64(rt, ra, rb)
  699. #endif
  700. #define SUM8(op, sum, w, p) \
  701. { \
  702. op(sum, (w)[0 * 64], (p)[0 * 64]); \
  703. op(sum, (w)[1 * 64], (p)[1 * 64]); \
  704. op(sum, (w)[2 * 64], (p)[2 * 64]); \
  705. op(sum, (w)[3 * 64], (p)[3 * 64]); \
  706. op(sum, (w)[4 * 64], (p)[4 * 64]); \
  707. op(sum, (w)[5 * 64], (p)[5 * 64]); \
  708. op(sum, (w)[6 * 64], (p)[6 * 64]); \
  709. op(sum, (w)[7 * 64], (p)[7 * 64]); \
  710. }
  711. #define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \
  712. { \
  713. INTFLOAT tmp;\
  714. tmp = p[0 * 64];\
  715. op1(sum1, (w1)[0 * 64], tmp);\
  716. op2(sum2, (w2)[0 * 64], tmp);\
  717. tmp = p[1 * 64];\
  718. op1(sum1, (w1)[1 * 64], tmp);\
  719. op2(sum2, (w2)[1 * 64], tmp);\
  720. tmp = p[2 * 64];\
  721. op1(sum1, (w1)[2 * 64], tmp);\
  722. op2(sum2, (w2)[2 * 64], tmp);\
  723. tmp = p[3 * 64];\
  724. op1(sum1, (w1)[3 * 64], tmp);\
  725. op2(sum2, (w2)[3 * 64], tmp);\
  726. tmp = p[4 * 64];\
  727. op1(sum1, (w1)[4 * 64], tmp);\
  728. op2(sum2, (w2)[4 * 64], tmp);\
  729. tmp = p[5 * 64];\
  730. op1(sum1, (w1)[5 * 64], tmp);\
  731. op2(sum2, (w2)[5 * 64], tmp);\
  732. tmp = p[6 * 64];\
  733. op1(sum1, (w1)[6 * 64], tmp);\
  734. op2(sum2, (w2)[6 * 64], tmp);\
  735. tmp = p[7 * 64];\
  736. op1(sum1, (w1)[7 * 64], tmp);\
  737. op2(sum2, (w2)[7 * 64], tmp);\
  738. }
  739. void av_cold RENAME(ff_mpa_synth_init)(MPA_INT *window)
  740. {
  741. int i, j;
  742. /* max = 18760, max sum over all 16 coefs : 44736 */
  743. for(i=0;i<257;i++) {
  744. INTFLOAT v;
  745. v = ff_mpa_enwindow[i];
  746. #if CONFIG_FLOAT
  747. v *= 1.0 / (1LL<<(16 + FRAC_BITS));
  748. #elif WFRAC_BITS < 16
  749. v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS);
  750. #endif
  751. window[i] = v;
  752. if ((i & 63) != 0)
  753. v = -v;
  754. if (i != 0)
  755. window[512 - i] = v;
  756. }
  757. // Needed for avoiding shuffles in ASM implementations
  758. for(i=0; i < 8; i++)
  759. for(j=0; j < 16; j++)
  760. window[512+16*i+j] = window[64*i+32-j];
  761. for(i=0; i < 8; i++)
  762. for(j=0; j < 16; j++)
  763. window[512+128+16*i+j] = window[64*i+48-j];
  764. }
  765. static void apply_window_mp3_c(MPA_INT *synth_buf, MPA_INT *window,
  766. int *dither_state, OUT_INT *samples, int incr)
  767. {
  768. register const MPA_INT *w, *w2, *p;
  769. int j;
  770. OUT_INT *samples2;
  771. #if CONFIG_FLOAT
  772. float sum, sum2;
  773. #elif FRAC_BITS <= 15
  774. int sum, sum2;
  775. #else
  776. int64_t sum, sum2;
  777. #endif
  778. /* copy to avoid wrap */
  779. memcpy(synth_buf + 512, synth_buf, 32 * sizeof(*synth_buf));
  780. samples2 = samples + 31 * incr;
  781. w = window;
  782. w2 = window + 31;
  783. sum = *dither_state;
  784. p = synth_buf + 16;
  785. SUM8(MACS, sum, w, p);
  786. p = synth_buf + 48;
  787. SUM8(MLSS, sum, w + 32, p);
  788. *samples = round_sample(&sum);
  789. samples += incr;
  790. w++;
  791. /* we calculate two samples at the same time to avoid one memory
  792. access per two sample */
  793. for(j=1;j<16;j++) {
  794. sum2 = 0;
  795. p = synth_buf + 16 + j;
  796. SUM8P2(sum, MACS, sum2, MLSS, w, w2, p);
  797. p = synth_buf + 48 - j;
  798. SUM8P2(sum, MLSS, sum2, MLSS, w + 32, w2 + 32, p);
  799. *samples = round_sample(&sum);
  800. samples += incr;
  801. sum += sum2;
  802. *samples2 = round_sample(&sum);
  803. samples2 -= incr;
  804. w++;
  805. w2--;
  806. }
  807. p = synth_buf + 32;
  808. SUM8(MLSS, sum, w + 32, p);
  809. *samples = round_sample(&sum);
  810. *dither_state= sum;
  811. }
  812. /* 32 sub band synthesis filter. Input: 32 sub band samples, Output:
  813. 32 samples. */
  814. /* XXX: optimize by avoiding ring buffer usage */
  815. #if !CONFIG_FLOAT
  816. void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset,
  817. MPA_INT *window, int *dither_state,
  818. OUT_INT *samples, int incr,
  819. INTFLOAT sb_samples[SBLIMIT])
  820. {
  821. register MPA_INT *synth_buf;
  822. int offset;
  823. #if FRAC_BITS <= 15
  824. int32_t tmp[32];
  825. int j;
  826. #endif
  827. offset = *synth_buf_offset;
  828. synth_buf = synth_buf_ptr + offset;
  829. #if FRAC_BITS <= 15
  830. dct32(tmp, sb_samples);
  831. for(j=0;j<32;j++) {
  832. /* NOTE: can cause a loss in precision if very high amplitude
  833. sound */
  834. synth_buf[j] = av_clip_int16(tmp[j]);
  835. }
  836. #else
  837. dct32(synth_buf, sb_samples);
  838. #endif
  839. apply_window_mp3_c(synth_buf, window, dither_state, samples, incr);
  840. offset = (offset - 32) & 511;
  841. *synth_buf_offset = offset;
  842. }
  843. #endif
  844. #define C3 FIXHR(0.86602540378443864676/2)
  845. /* 0.5 / cos(pi*(2*i+1)/36) */
  846. static const INTFLOAT icos36[9] = {
  847. FIXR(0.50190991877167369479),
  848. FIXR(0.51763809020504152469), //0
  849. FIXR(0.55168895948124587824),
  850. FIXR(0.61038729438072803416),
  851. FIXR(0.70710678118654752439), //1
  852. FIXR(0.87172339781054900991),
  853. FIXR(1.18310079157624925896),
  854. FIXR(1.93185165257813657349), //2
  855. FIXR(5.73685662283492756461),
  856. };
  857. /* 0.5 / cos(pi*(2*i+1)/36) */
  858. static const INTFLOAT icos36h[9] = {
  859. FIXHR(0.50190991877167369479/2),
  860. FIXHR(0.51763809020504152469/2), //0
  861. FIXHR(0.55168895948124587824/2),
  862. FIXHR(0.61038729438072803416/2),
  863. FIXHR(0.70710678118654752439/2), //1
  864. FIXHR(0.87172339781054900991/2),
  865. FIXHR(1.18310079157624925896/4),
  866. FIXHR(1.93185165257813657349/4), //2
  867. // FIXHR(5.73685662283492756461),
  868. };
  869. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  870. cases. */
  871. static void imdct12(INTFLOAT *out, INTFLOAT *in)
  872. {
  873. INTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  874. in0= in[0*3];
  875. in1= in[1*3] + in[0*3];
  876. in2= in[2*3] + in[1*3];
  877. in3= in[3*3] + in[2*3];
  878. in4= in[4*3] + in[3*3];
  879. in5= in[5*3] + in[4*3];
  880. in5 += in3;
  881. in3 += in1;
  882. in2= MULH3(in2, C3, 2);
  883. in3= MULH3(in3, C3, 4);
  884. t1 = in0 - in4;
  885. t2 = MULH3(in1 - in5, icos36h[4], 2);
  886. out[ 7]=
  887. out[10]= t1 + t2;
  888. out[ 1]=
  889. out[ 4]= t1 - t2;
  890. in0 += SHR(in4, 1);
  891. in4 = in0 + in2;
  892. in5 += 2*in1;
  893. in1 = MULH3(in5 + in3, icos36h[1], 1);
  894. out[ 8]=
  895. out[ 9]= in4 + in1;
  896. out[ 2]=
  897. out[ 3]= in4 - in1;
  898. in0 -= in2;
  899. in5 = MULH3(in5 - in3, icos36h[7], 2);
  900. out[ 0]=
  901. out[ 5]= in0 - in5;
  902. out[ 6]=
  903. out[11]= in0 + in5;
  904. }
  905. /* cos(pi*i/18) */
  906. #define C1 FIXHR(0.98480775301220805936/2)
  907. #define C2 FIXHR(0.93969262078590838405/2)
  908. #define C3 FIXHR(0.86602540378443864676/2)
  909. #define C4 FIXHR(0.76604444311897803520/2)
  910. #define C5 FIXHR(0.64278760968653932632/2)
  911. #define C6 FIXHR(0.5/2)
  912. #define C7 FIXHR(0.34202014332566873304/2)
  913. #define C8 FIXHR(0.17364817766693034885/2)
  914. /* using Lee like decomposition followed by hand coded 9 points DCT */
  915. static void imdct36(INTFLOAT *out, INTFLOAT *buf, INTFLOAT *in, INTFLOAT *win)
  916. {
  917. int i, j;
  918. INTFLOAT t0, t1, t2, t3, s0, s1, s2, s3;
  919. INTFLOAT tmp[18], *tmp1, *in1;
  920. for(i=17;i>=1;i--)
  921. in[i] += in[i-1];
  922. for(i=17;i>=3;i-=2)
  923. in[i] += in[i-2];
  924. for(j=0;j<2;j++) {
  925. tmp1 = tmp + j;
  926. in1 = in + j;
  927. t2 = in1[2*4] + in1[2*8] - in1[2*2];
  928. t3 = in1[2*0] + SHR(in1[2*6],1);
  929. t1 = in1[2*0] - in1[2*6];
  930. tmp1[ 6] = t1 - SHR(t2,1);
  931. tmp1[16] = t1 + t2;
  932. t0 = MULH3(in1[2*2] + in1[2*4] , C2, 2);
  933. t1 = MULH3(in1[2*4] - in1[2*8] , -2*C8, 1);
  934. t2 = MULH3(in1[2*2] + in1[2*8] , -C4, 2);
  935. tmp1[10] = t3 - t0 - t2;
  936. tmp1[ 2] = t3 + t0 + t1;
  937. tmp1[14] = t3 + t2 - t1;
  938. tmp1[ 4] = MULH3(in1[2*5] + in1[2*7] - in1[2*1], -C3, 2);
  939. t2 = MULH3(in1[2*1] + in1[2*5], C1, 2);
  940. t3 = MULH3(in1[2*5] - in1[2*7], -2*C7, 1);
  941. t0 = MULH3(in1[2*3], C3, 2);
  942. t1 = MULH3(in1[2*1] + in1[2*7], -C5, 2);
  943. tmp1[ 0] = t2 + t3 + t0;
  944. tmp1[12] = t2 + t1 - t0;
  945. tmp1[ 8] = t3 - t1 - t0;
  946. }
  947. i = 0;
  948. for(j=0;j<4;j++) {
  949. t0 = tmp[i];
  950. t1 = tmp[i + 2];
  951. s0 = t1 + t0;
  952. s2 = t1 - t0;
  953. t2 = tmp[i + 1];
  954. t3 = tmp[i + 3];
  955. s1 = MULH3(t3 + t2, icos36h[j], 2);
  956. s3 = MULLx(t3 - t2, icos36[8 - j], FRAC_BITS);
  957. t0 = s0 + s1;
  958. t1 = s0 - s1;
  959. out[(9 + j)*SBLIMIT] = MULH3(t1, win[9 + j], 1) + buf[9 + j];
  960. out[(8 - j)*SBLIMIT] = MULH3(t1, win[8 - j], 1) + buf[8 - j];
  961. buf[9 + j] = MULH3(t0, win[18 + 9 + j], 1);
  962. buf[8 - j] = MULH3(t0, win[18 + 8 - j], 1);
  963. t0 = s2 + s3;
  964. t1 = s2 - s3;
  965. out[(9 + 8 - j)*SBLIMIT] = MULH3(t1, win[9 + 8 - j], 1) + buf[9 + 8 - j];
  966. out[( j)*SBLIMIT] = MULH3(t1, win[ j], 1) + buf[ j];
  967. buf[9 + 8 - j] = MULH3(t0, win[18 + 9 + 8 - j], 1);
  968. buf[ + j] = MULH3(t0, win[18 + j], 1);
  969. i += 4;
  970. }
  971. s0 = tmp[16];
  972. s1 = MULH3(tmp[17], icos36h[4], 2);
  973. t0 = s0 + s1;
  974. t1 = s0 - s1;
  975. out[(9 + 4)*SBLIMIT] = MULH3(t1, win[9 + 4], 1) + buf[9 + 4];
  976. out[(8 - 4)*SBLIMIT] = MULH3(t1, win[8 - 4], 1) + buf[8 - 4];
  977. buf[9 + 4] = MULH3(t0, win[18 + 9 + 4], 1);
  978. buf[8 - 4] = MULH3(t0, win[18 + 8 - 4], 1);
  979. }
  980. /* return the number of decoded frames */
  981. static int mp_decode_layer1(MPADecodeContext *s)
  982. {
  983. int bound, i, v, n, ch, j, mant;
  984. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  985. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  986. if (s->mode == MPA_JSTEREO)
  987. bound = (s->mode_ext + 1) * 4;
  988. else
  989. bound = SBLIMIT;
  990. /* allocation bits */
  991. for(i=0;i<bound;i++) {
  992. for(ch=0;ch<s->nb_channels;ch++) {
  993. allocation[ch][i] = get_bits(&s->gb, 4);
  994. }
  995. }
  996. for(i=bound;i<SBLIMIT;i++) {
  997. allocation[0][i] = get_bits(&s->gb, 4);
  998. }
  999. /* scale factors */
  1000. for(i=0;i<bound;i++) {
  1001. for(ch=0;ch<s->nb_channels;ch++) {
  1002. if (allocation[ch][i])
  1003. scale_factors[ch][i] = get_bits(&s->gb, 6);
  1004. }
  1005. }
  1006. for(i=bound;i<SBLIMIT;i++) {
  1007. if (allocation[0][i]) {
  1008. scale_factors[0][i] = get_bits(&s->gb, 6);
  1009. scale_factors[1][i] = get_bits(&s->gb, 6);
  1010. }
  1011. }
  1012. /* compute samples */
  1013. for(j=0;j<12;j++) {
  1014. for(i=0;i<bound;i++) {
  1015. for(ch=0;ch<s->nb_channels;ch++) {
  1016. n = allocation[ch][i];
  1017. if (n) {
  1018. mant = get_bits(&s->gb, n + 1);
  1019. v = l1_unscale(n, mant, scale_factors[ch][i]);
  1020. } else {
  1021. v = 0;
  1022. }
  1023. s->sb_samples[ch][j][i] = v;
  1024. }
  1025. }
  1026. for(i=bound;i<SBLIMIT;i++) {
  1027. n = allocation[0][i];
  1028. if (n) {
  1029. mant = get_bits(&s->gb, n + 1);
  1030. v = l1_unscale(n, mant, scale_factors[0][i]);
  1031. s->sb_samples[0][j][i] = v;
  1032. v = l1_unscale(n, mant, scale_factors[1][i]);
  1033. s->sb_samples[1][j][i] = v;
  1034. } else {
  1035. s->sb_samples[0][j][i] = 0;
  1036. s->sb_samples[1][j][i] = 0;
  1037. }
  1038. }
  1039. }
  1040. return 12;
  1041. }
  1042. static int mp_decode_layer2(MPADecodeContext *s)
  1043. {
  1044. int sblimit; /* number of used subbands */
  1045. const unsigned char *alloc_table;
  1046. int table, bit_alloc_bits, i, j, ch, bound, v;
  1047. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  1048. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  1049. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  1050. int scale, qindex, bits, steps, k, l, m, b;
  1051. /* select decoding table */
  1052. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  1053. s->sample_rate, s->lsf);
  1054. sblimit = ff_mpa_sblimit_table[table];
  1055. alloc_table = ff_mpa_alloc_tables[table];
  1056. if (s->mode == MPA_JSTEREO)
  1057. bound = (s->mode_ext + 1) * 4;
  1058. else
  1059. bound = sblimit;
  1060. dprintf(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  1061. /* sanity check */
  1062. if( bound > sblimit ) bound = sblimit;
  1063. /* parse bit allocation */
  1064. j = 0;
  1065. for(i=0;i<bound;i++) {
  1066. bit_alloc_bits = alloc_table[j];
  1067. for(ch=0;ch<s->nb_channels;ch++) {
  1068. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  1069. }
  1070. j += 1 << bit_alloc_bits;
  1071. }
  1072. for(i=bound;i<sblimit;i++) {
  1073. bit_alloc_bits = alloc_table[j];
  1074. v = get_bits(&s->gb, bit_alloc_bits);
  1075. bit_alloc[0][i] = v;
  1076. bit_alloc[1][i] = v;
  1077. j += 1 << bit_alloc_bits;
  1078. }
  1079. /* scale codes */
  1080. for(i=0;i<sblimit;i++) {
  1081. for(ch=0;ch<s->nb_channels;ch++) {
  1082. if (bit_alloc[ch][i])
  1083. scale_code[ch][i] = get_bits(&s->gb, 2);
  1084. }
  1085. }
  1086. /* scale factors */
  1087. for(i=0;i<sblimit;i++) {
  1088. for(ch=0;ch<s->nb_channels;ch++) {
  1089. if (bit_alloc[ch][i]) {
  1090. sf = scale_factors[ch][i];
  1091. switch(scale_code[ch][i]) {
  1092. default:
  1093. case 0:
  1094. sf[0] = get_bits(&s->gb, 6);
  1095. sf[1] = get_bits(&s->gb, 6);
  1096. sf[2] = get_bits(&s->gb, 6);
  1097. break;
  1098. case 2:
  1099. sf[0] = get_bits(&s->gb, 6);
  1100. sf[1] = sf[0];
  1101. sf[2] = sf[0];
  1102. break;
  1103. case 1:
  1104. sf[0] = get_bits(&s->gb, 6);
  1105. sf[2] = get_bits(&s->gb, 6);
  1106. sf[1] = sf[0];
  1107. break;
  1108. case 3:
  1109. sf[0] = get_bits(&s->gb, 6);
  1110. sf[2] = get_bits(&s->gb, 6);
  1111. sf[1] = sf[2];
  1112. break;
  1113. }
  1114. }
  1115. }
  1116. }
  1117. /* samples */
  1118. for(k=0;k<3;k++) {
  1119. for(l=0;l<12;l+=3) {
  1120. j = 0;
  1121. for(i=0;i<bound;i++) {
  1122. bit_alloc_bits = alloc_table[j];
  1123. for(ch=0;ch<s->nb_channels;ch++) {
  1124. b = bit_alloc[ch][i];
  1125. if (b) {
  1126. scale = scale_factors[ch][i][k];
  1127. qindex = alloc_table[j+b];
  1128. bits = ff_mpa_quant_bits[qindex];
  1129. if (bits < 0) {
  1130. int v2;
  1131. /* 3 values at the same time */
  1132. v = get_bits(&s->gb, -bits);
  1133. v2 = division_tabs[qindex][v];
  1134. steps = ff_mpa_quant_steps[qindex];
  1135. s->sb_samples[ch][k * 12 + l + 0][i] =
  1136. l2_unscale_group(steps, v2 & 15, scale);
  1137. s->sb_samples[ch][k * 12 + l + 1][i] =
  1138. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  1139. s->sb_samples[ch][k * 12 + l + 2][i] =
  1140. l2_unscale_group(steps, v2 >> 8 , scale);
  1141. } else {
  1142. for(m=0;m<3;m++) {
  1143. v = get_bits(&s->gb, bits);
  1144. v = l1_unscale(bits - 1, v, scale);
  1145. s->sb_samples[ch][k * 12 + l + m][i] = v;
  1146. }
  1147. }
  1148. } else {
  1149. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  1150. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  1151. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  1152. }
  1153. }
  1154. /* next subband in alloc table */
  1155. j += 1 << bit_alloc_bits;
  1156. }
  1157. /* XXX: find a way to avoid this duplication of code */
  1158. for(i=bound;i<sblimit;i++) {
  1159. bit_alloc_bits = alloc_table[j];
  1160. b = bit_alloc[0][i];
  1161. if (b) {
  1162. int mant, scale0, scale1;
  1163. scale0 = scale_factors[0][i][k];
  1164. scale1 = scale_factors[1][i][k];
  1165. qindex = alloc_table[j+b];
  1166. bits = ff_mpa_quant_bits[qindex];
  1167. if (bits < 0) {
  1168. /* 3 values at the same time */
  1169. v = get_bits(&s->gb, -bits);
  1170. steps = ff_mpa_quant_steps[qindex];
  1171. mant = v % steps;
  1172. v = v / steps;
  1173. s->sb_samples[0][k * 12 + l + 0][i] =
  1174. l2_unscale_group(steps, mant, scale0);
  1175. s->sb_samples[1][k * 12 + l + 0][i] =
  1176. l2_unscale_group(steps, mant, scale1);
  1177. mant = v % steps;
  1178. v = v / steps;
  1179. s->sb_samples[0][k * 12 + l + 1][i] =
  1180. l2_unscale_group(steps, mant, scale0);
  1181. s->sb_samples[1][k * 12 + l + 1][i] =
  1182. l2_unscale_group(steps, mant, scale1);
  1183. s->sb_samples[0][k * 12 + l + 2][i] =
  1184. l2_unscale_group(steps, v, scale0);
  1185. s->sb_samples[1][k * 12 + l + 2][i] =
  1186. l2_unscale_group(steps, v, scale1);
  1187. } else {
  1188. for(m=0;m<3;m++) {
  1189. mant = get_bits(&s->gb, bits);
  1190. s->sb_samples[0][k * 12 + l + m][i] =
  1191. l1_unscale(bits - 1, mant, scale0);
  1192. s->sb_samples[1][k * 12 + l + m][i] =
  1193. l1_unscale(bits - 1, mant, scale1);
  1194. }
  1195. }
  1196. } else {
  1197. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  1198. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  1199. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  1200. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  1201. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  1202. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  1203. }
  1204. /* next subband in alloc table */
  1205. j += 1 << bit_alloc_bits;
  1206. }
  1207. /* fill remaining samples to zero */
  1208. for(i=sblimit;i<SBLIMIT;i++) {
  1209. for(ch=0;ch<s->nb_channels;ch++) {
  1210. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  1211. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  1212. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  1213. }
  1214. }
  1215. }
  1216. }
  1217. return 3 * 12;
  1218. }
  1219. #define SPLIT(dst,sf,n)\
  1220. if(n==3){\
  1221. int m= (sf*171)>>9;\
  1222. dst= sf - 3*m;\
  1223. sf=m;\
  1224. }else if(n==4){\
  1225. dst= sf&3;\
  1226. sf>>=2;\
  1227. }else if(n==5){\
  1228. int m= (sf*205)>>10;\
  1229. dst= sf - 5*m;\
  1230. sf=m;\
  1231. }else if(n==6){\
  1232. int m= (sf*171)>>10;\
  1233. dst= sf - 6*m;\
  1234. sf=m;\
  1235. }else{\
  1236. dst=0;\
  1237. }
  1238. static av_always_inline void lsf_sf_expand(int *slen,
  1239. int sf, int n1, int n2, int n3)
  1240. {
  1241. SPLIT(slen[3], sf, n3)
  1242. SPLIT(slen[2], sf, n2)
  1243. SPLIT(slen[1], sf, n1)
  1244. slen[0] = sf;
  1245. }
  1246. static void exponents_from_scale_factors(MPADecodeContext *s,
  1247. GranuleDef *g,
  1248. int16_t *exponents)
  1249. {
  1250. const uint8_t *bstab, *pretab;
  1251. int len, i, j, k, l, v0, shift, gain, gains[3];
  1252. int16_t *exp_ptr;
  1253. exp_ptr = exponents;
  1254. gain = g->global_gain - 210;
  1255. shift = g->scalefac_scale + 1;
  1256. bstab = band_size_long[s->sample_rate_index];
  1257. pretab = mpa_pretab[g->preflag];
  1258. for(i=0;i<g->long_end;i++) {
  1259. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  1260. len = bstab[i];
  1261. for(j=len;j>0;j--)
  1262. *exp_ptr++ = v0;
  1263. }
  1264. if (g->short_start < 13) {
  1265. bstab = band_size_short[s->sample_rate_index];
  1266. gains[0] = gain - (g->subblock_gain[0] << 3);
  1267. gains[1] = gain - (g->subblock_gain[1] << 3);
  1268. gains[2] = gain - (g->subblock_gain[2] << 3);
  1269. k = g->long_end;
  1270. for(i=g->short_start;i<13;i++) {
  1271. len = bstab[i];
  1272. for(l=0;l<3;l++) {
  1273. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  1274. for(j=len;j>0;j--)
  1275. *exp_ptr++ = v0;
  1276. }
  1277. }
  1278. }
  1279. }
  1280. /* handle n = 0 too */
  1281. static inline int get_bitsz(GetBitContext *s, int n)
  1282. {
  1283. if (n == 0)
  1284. return 0;
  1285. else
  1286. return get_bits(s, n);
  1287. }
  1288. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){
  1289. if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){
  1290. s->gb= s->in_gb;
  1291. s->in_gb.buffer=NULL;
  1292. assert((get_bits_count(&s->gb) & 7) == 0);
  1293. skip_bits_long(&s->gb, *pos - *end_pos);
  1294. *end_pos2=
  1295. *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos;
  1296. *pos= get_bits_count(&s->gb);
  1297. }
  1298. }
  1299. /* Following is a optimized code for
  1300. INTFLOAT v = *src
  1301. if(get_bits1(&s->gb))
  1302. v = -v;
  1303. *dst = v;
  1304. */
  1305. #if CONFIG_FLOAT
  1306. #define READ_FLIP_SIGN(dst,src)\
  1307. v = AV_RN32A(src) ^ (get_bits1(&s->gb)<<31);\
  1308. AV_WN32A(dst, v);
  1309. #else
  1310. #define READ_FLIP_SIGN(dst,src)\
  1311. v= -get_bits1(&s->gb);\
  1312. *(dst) = (*(src) ^ v) - v;
  1313. #endif
  1314. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  1315. int16_t *exponents, int end_pos2)
  1316. {
  1317. int s_index;
  1318. int i;
  1319. int last_pos, bits_left;
  1320. VLC *vlc;
  1321. int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
  1322. /* low frequencies (called big values) */
  1323. s_index = 0;
  1324. for(i=0;i<3;i++) {
  1325. int j, k, l, linbits;
  1326. j = g->region_size[i];
  1327. if (j == 0)
  1328. continue;
  1329. /* select vlc table */
  1330. k = g->table_select[i];
  1331. l = mpa_huff_data[k][0];
  1332. linbits = mpa_huff_data[k][1];
  1333. vlc = &huff_vlc[l];
  1334. if(!l){
  1335. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
  1336. s_index += 2*j;
  1337. continue;
  1338. }
  1339. /* read huffcode and compute each couple */
  1340. for(;j>0;j--) {
  1341. int exponent, x, y;
  1342. int v;
  1343. int pos= get_bits_count(&s->gb);
  1344. if (pos >= end_pos){
  1345. // av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1346. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1347. // av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
  1348. if(pos >= end_pos)
  1349. break;
  1350. }
  1351. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  1352. if(!y){
  1353. g->sb_hybrid[s_index ] =
  1354. g->sb_hybrid[s_index+1] = 0;
  1355. s_index += 2;
  1356. continue;
  1357. }
  1358. exponent= exponents[s_index];
  1359. dprintf(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  1360. i, g->region_size[i] - j, x, y, exponent);
  1361. if(y&16){
  1362. x = y >> 5;
  1363. y = y & 0x0f;
  1364. if (x < 15){
  1365. READ_FLIP_SIGN(g->sb_hybrid+s_index, RENAME(expval_table)[ exponent ]+x)
  1366. }else{
  1367. x += get_bitsz(&s->gb, linbits);
  1368. v = l3_unscale(x, exponent);
  1369. if (get_bits1(&s->gb))
  1370. v = -v;
  1371. g->sb_hybrid[s_index] = v;
  1372. }
  1373. if (y < 15){
  1374. READ_FLIP_SIGN(g->sb_hybrid+s_index+1, RENAME(expval_table)[ exponent ]+y)
  1375. }else{
  1376. y += get_bitsz(&s->gb, linbits);
  1377. v = l3_unscale(y, exponent);
  1378. if (get_bits1(&s->gb))
  1379. v = -v;
  1380. g->sb_hybrid[s_index+1] = v;
  1381. }
  1382. }else{
  1383. x = y >> 5;
  1384. y = y & 0x0f;
  1385. x += y;
  1386. if (x < 15){
  1387. READ_FLIP_SIGN(g->sb_hybrid+s_index+!!y, RENAME(expval_table)[ exponent ]+x)
  1388. }else{
  1389. x += get_bitsz(&s->gb, linbits);
  1390. v = l3_unscale(x, exponent);
  1391. if (get_bits1(&s->gb))
  1392. v = -v;
  1393. g->sb_hybrid[s_index+!!y] = v;
  1394. }
  1395. g->sb_hybrid[s_index+ !y] = 0;
  1396. }
  1397. s_index+=2;
  1398. }
  1399. }
  1400. /* high frequencies */
  1401. vlc = &huff_quad_vlc[g->count1table_select];
  1402. last_pos=0;
  1403. while (s_index <= 572) {
  1404. int pos, code;
  1405. pos = get_bits_count(&s->gb);
  1406. if (pos >= end_pos) {
  1407. if (pos > end_pos2 && last_pos){
  1408. /* some encoders generate an incorrect size for this
  1409. part. We must go back into the data */
  1410. s_index -= 4;
  1411. skip_bits_long(&s->gb, last_pos - pos);
  1412. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  1413. if(s->error_recognition >= FF_ER_COMPLIANT)
  1414. s_index=0;
  1415. break;
  1416. }
  1417. // av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1418. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1419. // av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
  1420. if(pos >= end_pos)
  1421. break;
  1422. }
  1423. last_pos= pos;
  1424. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  1425. dprintf(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  1426. g->sb_hybrid[s_index+0]=
  1427. g->sb_hybrid[s_index+1]=
  1428. g->sb_hybrid[s_index+2]=
  1429. g->sb_hybrid[s_index+3]= 0;
  1430. while(code){
  1431. static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
  1432. int v;
  1433. int pos= s_index+idxtab[code];
  1434. code ^= 8>>idxtab[code];
  1435. READ_FLIP_SIGN(g->sb_hybrid+pos, RENAME(exp_table)+exponents[pos])
  1436. }
  1437. s_index+=4;
  1438. }
  1439. /* skip extension bits */
  1440. bits_left = end_pos2 - get_bits_count(&s->gb);
  1441. //av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
  1442. if (bits_left < 0 && s->error_recognition >= FF_ER_COMPLIANT) {
  1443. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1444. s_index=0;
  1445. }else if(bits_left > 0 && s->error_recognition >= FF_ER_AGGRESSIVE){
  1446. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1447. s_index=0;
  1448. }
  1449. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
  1450. skip_bits_long(&s->gb, bits_left);
  1451. i= get_bits_count(&s->gb);
  1452. switch_buffer(s, &i, &end_pos, &end_pos2);
  1453. return 0;
  1454. }
  1455. /* Reorder short blocks from bitstream order to interleaved order. It
  1456. would be faster to do it in parsing, but the code would be far more
  1457. complicated */
  1458. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  1459. {
  1460. int i, j, len;
  1461. INTFLOAT *ptr, *dst, *ptr1;
  1462. INTFLOAT tmp[576];
  1463. if (g->block_type != 2)
  1464. return;
  1465. if (g->switch_point) {
  1466. if (s->sample_rate_index != 8) {
  1467. ptr = g->sb_hybrid + 36;
  1468. } else {
  1469. ptr = g->sb_hybrid + 48;
  1470. }
  1471. } else {
  1472. ptr = g->sb_hybrid;
  1473. }
  1474. for(i=g->short_start;i<13;i++) {
  1475. len = band_size_short[s->sample_rate_index][i];
  1476. ptr1 = ptr;
  1477. dst = tmp;
  1478. for(j=len;j>0;j--) {
  1479. *dst++ = ptr[0*len];
  1480. *dst++ = ptr[1*len];
  1481. *dst++ = ptr[2*len];
  1482. ptr++;
  1483. }
  1484. ptr+=2*len;
  1485. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  1486. }
  1487. }
  1488. #define ISQRT2 FIXR(0.70710678118654752440)
  1489. static void compute_stereo(MPADecodeContext *s,
  1490. GranuleDef *g0, GranuleDef *g1)
  1491. {
  1492. int i, j, k, l;
  1493. int sf_max, sf, len, non_zero_found;
  1494. INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2;
  1495. int non_zero_found_short[3];
  1496. /* intensity stereo */
  1497. if (s->mode_ext & MODE_EXT_I_STEREO) {
  1498. if (!s->lsf) {
  1499. is_tab = is_table;
  1500. sf_max = 7;
  1501. } else {
  1502. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  1503. sf_max = 16;
  1504. }
  1505. tab0 = g0->sb_hybrid + 576;
  1506. tab1 = g1->sb_hybrid + 576;
  1507. non_zero_found_short[0] = 0;
  1508. non_zero_found_short[1] = 0;
  1509. non_zero_found_short[2] = 0;
  1510. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  1511. for(i = 12;i >= g1->short_start;i--) {
  1512. /* for last band, use previous scale factor */
  1513. if (i != 11)
  1514. k -= 3;
  1515. len = band_size_short[s->sample_rate_index][i];
  1516. for(l=2;l>=0;l--) {
  1517. tab0 -= len;
  1518. tab1 -= len;
  1519. if (!non_zero_found_short[l]) {
  1520. /* test if non zero band. if so, stop doing i-stereo */
  1521. for(j=0;j<len;j++) {
  1522. if (tab1[j] != 0) {
  1523. non_zero_found_short[l] = 1;
  1524. goto found1;
  1525. }
  1526. }
  1527. sf = g1->scale_factors[k + l];
  1528. if (sf >= sf_max)
  1529. goto found1;
  1530. v1 = is_tab[0][sf];
  1531. v2 = is_tab[1][sf];
  1532. for(j=0;j<len;j++) {
  1533. tmp0 = tab0[j];
  1534. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1535. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1536. }
  1537. } else {
  1538. found1:
  1539. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1540. /* lower part of the spectrum : do ms stereo
  1541. if enabled */
  1542. for(j=0;j<len;j++) {
  1543. tmp0 = tab0[j];
  1544. tmp1 = tab1[j];
  1545. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1546. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1547. }
  1548. }
  1549. }
  1550. }
  1551. }
  1552. non_zero_found = non_zero_found_short[0] |
  1553. non_zero_found_short[1] |
  1554. non_zero_found_short[2];
  1555. for(i = g1->long_end - 1;i >= 0;i--) {
  1556. len = band_size_long[s->sample_rate_index][i];
  1557. tab0 -= len;
  1558. tab1 -= len;
  1559. /* test if non zero band. if so, stop doing i-stereo */
  1560. if (!non_zero_found) {
  1561. for(j=0;j<len;j++) {
  1562. if (tab1[j] != 0) {
  1563. non_zero_found = 1;
  1564. goto found2;
  1565. }
  1566. }
  1567. /* for last band, use previous scale factor */
  1568. k = (i == 21) ? 20 : i;
  1569. sf = g1->scale_factors[k];
  1570. if (sf >= sf_max)
  1571. goto found2;
  1572. v1 = is_tab[0][sf];
  1573. v2 = is_tab[1][sf];
  1574. for(j=0;j<len;j++) {
  1575. tmp0 = tab0[j];
  1576. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1577. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1578. }
  1579. } else {
  1580. found2:
  1581. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1582. /* lower part of the spectrum : do ms stereo
  1583. if enabled */
  1584. for(j=0;j<len;j++) {
  1585. tmp0 = tab0[j];
  1586. tmp1 = tab1[j];
  1587. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1588. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1589. }
  1590. }
  1591. }
  1592. }
  1593. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1594. /* ms stereo ONLY */
  1595. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1596. global gain */
  1597. tab0 = g0->sb_hybrid;
  1598. tab1 = g1->sb_hybrid;
  1599. for(i=0;i<576;i++) {
  1600. tmp0 = tab0[i];
  1601. tmp1 = tab1[i];
  1602. tab0[i] = tmp0 + tmp1;
  1603. tab1[i] = tmp0 - tmp1;
  1604. }
  1605. }
  1606. }
  1607. static void compute_antialias_integer(MPADecodeContext *s,
  1608. GranuleDef *g)
  1609. {
  1610. int32_t *ptr, *csa;
  1611. int n, i;
  1612. /* we antialias only "long" bands */
  1613. if (g->block_type == 2) {
  1614. if (!g->switch_point)
  1615. return;
  1616. /* XXX: check this for 8000Hz case */
  1617. n = 1;
  1618. } else {
  1619. n = SBLIMIT - 1;
  1620. }
  1621. ptr = g->sb_hybrid + 18;
  1622. for(i = n;i > 0;i--) {
  1623. int tmp0, tmp1, tmp2;
  1624. csa = &csa_table[0][0];
  1625. #define INT_AA(j) \
  1626. tmp0 = ptr[-1-j];\
  1627. tmp1 = ptr[ j];\
  1628. tmp2= MULH(tmp0 + tmp1, csa[0+4*j]);\
  1629. ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\
  1630. ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j]));
  1631. INT_AA(0)
  1632. INT_AA(1)
  1633. INT_AA(2)
  1634. INT_AA(3)
  1635. INT_AA(4)
  1636. INT_AA(5)
  1637. INT_AA(6)
  1638. INT_AA(7)
  1639. ptr += 18;
  1640. }
  1641. }
  1642. static void compute_antialias_float(MPADecodeContext *s,
  1643. GranuleDef *g)
  1644. {
  1645. float *ptr;
  1646. int n, i;
  1647. /* we antialias only "long" bands */
  1648. if (g->block_type == 2) {
  1649. if (!g->switch_point)
  1650. return;
  1651. /* XXX: check this for 8000Hz case */
  1652. n = 1;
  1653. } else {
  1654. n = SBLIMIT - 1;
  1655. }
  1656. ptr = g->sb_hybrid + 18;
  1657. for(i = n;i > 0;i--) {
  1658. float tmp0, tmp1;
  1659. float *csa = &csa_table_float[0][0];
  1660. #define FLOAT_AA(j)\
  1661. tmp0= ptr[-1-j];\
  1662. tmp1= ptr[ j];\
  1663. ptr[-1-j] = tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j];\
  1664. ptr[ j] = tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j];
  1665. FLOAT_AA(0)
  1666. FLOAT_AA(1)
  1667. FLOAT_AA(2)
  1668. FLOAT_AA(3)
  1669. FLOAT_AA(4)
  1670. FLOAT_AA(5)
  1671. FLOAT_AA(6)
  1672. FLOAT_AA(7)
  1673. ptr += 18;
  1674. }
  1675. }
  1676. static void compute_imdct(MPADecodeContext *s,
  1677. GranuleDef *g,
  1678. INTFLOAT *sb_samples,
  1679. INTFLOAT *mdct_buf)
  1680. {
  1681. INTFLOAT *win, *win1, *out_ptr, *ptr, *buf, *ptr1;
  1682. INTFLOAT out2[12];
  1683. int i, j, mdct_long_end, sblimit;
  1684. /* find last non zero block */
  1685. ptr = g->sb_hybrid + 576;
  1686. ptr1 = g->sb_hybrid + 2 * 18;
  1687. while (ptr >= ptr1) {
  1688. int32_t *p;
  1689. ptr -= 6;
  1690. p= (int32_t*)ptr;
  1691. if(p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1692. break;
  1693. }
  1694. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1695. if (g->block_type == 2) {
  1696. /* XXX: check for 8000 Hz */
  1697. if (g->switch_point)
  1698. mdct_long_end = 2;
  1699. else
  1700. mdct_long_end = 0;
  1701. } else {
  1702. mdct_long_end = sblimit;
  1703. }
  1704. buf = mdct_buf;
  1705. ptr = g->sb_hybrid;
  1706. for(j=0;j<mdct_long_end;j++) {
  1707. /* apply window & overlap with previous buffer */
  1708. out_ptr = sb_samples + j;
  1709. /* select window */
  1710. if (g->switch_point && j < 2)
  1711. win1 = mdct_win[0];
  1712. else
  1713. win1 = mdct_win[g->block_type];
  1714. /* select frequency inversion */
  1715. win = win1 + ((4 * 36) & -(j & 1));
  1716. imdct36(out_ptr, buf, ptr, win);
  1717. out_ptr += 18*SBLIMIT;
  1718. ptr += 18;
  1719. buf += 18;
  1720. }
  1721. for(j=mdct_long_end;j<sblimit;j++) {
  1722. /* select frequency inversion */
  1723. win = mdct_win[2] + ((4 * 36) & -(j & 1));
  1724. out_ptr = sb_samples + j;
  1725. for(i=0; i<6; i++){
  1726. *out_ptr = buf[i];
  1727. out_ptr += SBLIMIT;
  1728. }
  1729. imdct12(out2, ptr + 0);
  1730. for(i=0;i<6;i++) {
  1731. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*1];
  1732. buf[i + 6*2] = MULH3(out2[i + 6], win[i + 6], 1);
  1733. out_ptr += SBLIMIT;
  1734. }
  1735. imdct12(out2, ptr + 1);
  1736. for(i=0;i<6;i++) {
  1737. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[i + 6*2];
  1738. buf[i + 6*0] = MULH3(out2[i + 6], win[i + 6], 1);
  1739. out_ptr += SBLIMIT;
  1740. }
  1741. imdct12(out2, ptr + 2);
  1742. for(i=0;i<6;i++) {
  1743. buf[i + 6*0] = MULH3(out2[i ], win[i ], 1) + buf[i + 6*0];
  1744. buf[i + 6*1] = MULH3(out2[i + 6], win[i + 6], 1);
  1745. buf[i + 6*2] = 0;
  1746. }
  1747. ptr += 18;
  1748. buf += 18;
  1749. }
  1750. /* zero bands */
  1751. for(j=sblimit;j<SBLIMIT;j++) {
  1752. /* overlap */
  1753. out_ptr = sb_samples + j;
  1754. for(i=0;i<18;i++) {
  1755. *out_ptr = buf[i];
  1756. buf[i] = 0;
  1757. out_ptr += SBLIMIT;
  1758. }
  1759. buf += 18;
  1760. }
  1761. }
  1762. /* main layer3 decoding function */
  1763. static int mp_decode_layer3(MPADecodeContext *s)
  1764. {
  1765. int nb_granules, main_data_begin, private_bits;
  1766. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1767. GranuleDef *g;
  1768. int16_t exponents[576]; //FIXME try INTFLOAT
  1769. /* read side info */
  1770. if (s->lsf) {
  1771. main_data_begin = get_bits(&s->gb, 8);
  1772. private_bits = get_bits(&s->gb, s->nb_channels);
  1773. nb_granules = 1;
  1774. } else {
  1775. main_data_begin = get_bits(&s->gb, 9);
  1776. if (s->nb_channels == 2)
  1777. private_bits = get_bits(&s->gb, 3);
  1778. else
  1779. private_bits = get_bits(&s->gb, 5);
  1780. nb_granules = 2;
  1781. for(ch=0;ch<s->nb_channels;ch++) {
  1782. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1783. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1784. }
  1785. }
  1786. for(gr=0;gr<nb_granules;gr++) {
  1787. for(ch=0;ch<s->nb_channels;ch++) {
  1788. dprintf(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1789. g = &s->granules[ch][gr];
  1790. g->part2_3_length = get_bits(&s->gb, 12);
  1791. g->big_values = get_bits(&s->gb, 9);
  1792. if(g->big_values > 288){
  1793. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1794. return -1;
  1795. }
  1796. g->global_gain = get_bits(&s->gb, 8);
  1797. /* if MS stereo only is selected, we precompute the
  1798. 1/sqrt(2) renormalization factor */
  1799. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1800. MODE_EXT_MS_STEREO)
  1801. g->global_gain -= 2;
  1802. if (s->lsf)
  1803. g->scalefac_compress = get_bits(&s->gb, 9);
  1804. else
  1805. g->scalefac_compress = get_bits(&s->gb, 4);
  1806. blocksplit_flag = get_bits1(&s->gb);
  1807. if (blocksplit_flag) {
  1808. g->block_type = get_bits(&s->gb, 2);
  1809. if (g->block_type == 0){
  1810. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1811. return -1;
  1812. }
  1813. g->switch_point = get_bits1(&s->gb);
  1814. for(i=0;i<2;i++)
  1815. g->table_select[i] = get_bits(&s->gb, 5);
  1816. for(i=0;i<3;i++)
  1817. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1818. ff_init_short_region(s, g);
  1819. } else {
  1820. int region_address1, region_address2;
  1821. g->block_type = 0;
  1822. g->switch_point = 0;
  1823. for(i=0;i<3;i++)
  1824. g->table_select[i] = get_bits(&s->gb, 5);
  1825. /* compute huffman coded region sizes */
  1826. region_address1 = get_bits(&s->gb, 4);
  1827. region_address2 = get_bits(&s->gb, 3);
  1828. dprintf(s->avctx, "region1=%d region2=%d\n",
  1829. region_address1, region_address2);
  1830. ff_init_long_region(s, g, region_address1, region_address2);
  1831. }
  1832. ff_region_offset2size(g);
  1833. ff_compute_band_indexes(s, g);
  1834. g->preflag = 0;
  1835. if (!s->lsf)
  1836. g->preflag = get_bits1(&s->gb);
  1837. g->scalefac_scale = get_bits1(&s->gb);
  1838. g->count1table_select = get_bits1(&s->gb);
  1839. dprintf(s->avctx, "block_type=%d switch_point=%d\n",
  1840. g->block_type, g->switch_point);
  1841. }
  1842. }
  1843. if (!s->adu_mode) {
  1844. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1845. assert((get_bits_count(&s->gb) & 7) == 0);
  1846. /* now we get bits from the main_data_begin offset */
  1847. dprintf(s->avctx, "seekback: %d\n", main_data_begin);
  1848. //av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
  1849. memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
  1850. s->in_gb= s->gb;
  1851. init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
  1852. skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
  1853. }
  1854. for(gr=0;gr<nb_granules;gr++) {
  1855. for(ch=0;ch<s->nb_channels;ch++) {
  1856. g = &s->granules[ch][gr];
  1857. if(get_bits_count(&s->gb)<0){
  1858. av_log(s->avctx, AV_LOG_DEBUG, "mdb:%d, lastbuf:%d skipping granule %d\n",
  1859. main_data_begin, s->last_buf_size, gr);
  1860. skip_bits_long(&s->gb, g->part2_3_length);
  1861. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1862. if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
  1863. skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
  1864. s->gb= s->in_gb;
  1865. s->in_gb.buffer=NULL;
  1866. }
  1867. continue;
  1868. }
  1869. bits_pos = get_bits_count(&s->gb);
  1870. if (!s->lsf) {
  1871. uint8_t *sc;
  1872. int slen, slen1, slen2;
  1873. /* MPEG1 scale factors */
  1874. slen1 = slen_table[0][g->scalefac_compress];
  1875. slen2 = slen_table[1][g->scalefac_compress];
  1876. dprintf(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1877. if (g->block_type == 2) {
  1878. n = g->switch_point ? 17 : 18;
  1879. j = 0;
  1880. if(slen1){
  1881. for(i=0;i<n;i++)
  1882. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1883. }else{
  1884. for(i=0;i<n;i++)
  1885. g->scale_factors[j++] = 0;
  1886. }
  1887. if(slen2){
  1888. for(i=0;i<18;i++)
  1889. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1890. for(i=0;i<3;i++)
  1891. g->scale_factors[j++] = 0;
  1892. }else{
  1893. for(i=0;i<21;i++)
  1894. g->scale_factors[j++] = 0;
  1895. }
  1896. } else {
  1897. sc = s->granules[ch][0].scale_factors;
  1898. j = 0;
  1899. for(k=0;k<4;k++) {
  1900. n = (k == 0 ? 6 : 5);
  1901. if ((g->scfsi & (0x8 >> k)) == 0) {
  1902. slen = (k < 2) ? slen1 : slen2;
  1903. if(slen){
  1904. for(i=0;i<n;i++)
  1905. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1906. }else{
  1907. for(i=0;i<n;i++)
  1908. g->scale_factors[j++] = 0;
  1909. }
  1910. } else {
  1911. /* simply copy from last granule */
  1912. for(i=0;i<n;i++) {
  1913. g->scale_factors[j] = sc[j];
  1914. j++;
  1915. }
  1916. }
  1917. }
  1918. g->scale_factors[j++] = 0;
  1919. }
  1920. } else {
  1921. int tindex, tindex2, slen[4], sl, sf;
  1922. /* LSF scale factors */
  1923. if (g->block_type == 2) {
  1924. tindex = g->switch_point ? 2 : 1;
  1925. } else {
  1926. tindex = 0;
  1927. }
  1928. sf = g->scalefac_compress;
  1929. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1930. /* intensity stereo case */
  1931. sf >>= 1;
  1932. if (sf < 180) {
  1933. lsf_sf_expand(slen, sf, 6, 6, 0);
  1934. tindex2 = 3;
  1935. } else if (sf < 244) {
  1936. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1937. tindex2 = 4;
  1938. } else {
  1939. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1940. tindex2 = 5;
  1941. }
  1942. } else {
  1943. /* normal case */
  1944. if (sf < 400) {
  1945. lsf_sf_expand(slen, sf, 5, 4, 4);
  1946. tindex2 = 0;
  1947. } else if (sf < 500) {
  1948. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1949. tindex2 = 1;
  1950. } else {
  1951. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1952. tindex2 = 2;
  1953. g->preflag = 1;
  1954. }
  1955. }
  1956. j = 0;
  1957. for(k=0;k<4;k++) {
  1958. n = lsf_nsf_table[tindex2][tindex][k];
  1959. sl = slen[k];
  1960. if(sl){
  1961. for(i=0;i<n;i++)
  1962. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1963. }else{
  1964. for(i=0;i<n;i++)
  1965. g->scale_factors[j++] = 0;
  1966. }
  1967. }
  1968. /* XXX: should compute exact size */
  1969. for(;j<40;j++)
  1970. g->scale_factors[j] = 0;
  1971. }
  1972. exponents_from_scale_factors(s, g, exponents);
  1973. /* read Huffman coded residue */
  1974. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1975. } /* ch */
  1976. if (s->nb_channels == 2)
  1977. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1978. for(ch=0;ch<s->nb_channels;ch++) {
  1979. g = &s->granules[ch][gr];
  1980. reorder_block(s, g);
  1981. compute_antialias(s, g);
  1982. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1983. }
  1984. } /* gr */
  1985. if(get_bits_count(&s->gb)<0)
  1986. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1987. return nb_granules * 18;
  1988. }
  1989. static int mp_decode_frame(MPADecodeContext *s,
  1990. OUT_INT *samples, const uint8_t *buf, int buf_size)
  1991. {
  1992. int i, nb_frames, ch;
  1993. OUT_INT *samples_ptr;
  1994. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);
  1995. /* skip error protection field */
  1996. if (s->error_protection)
  1997. skip_bits(&s->gb, 16);
  1998. dprintf(s->avctx, "frame %d:\n", s->frame_count);
  1999. switch(s->layer) {
  2000. case 1:
  2001. s->avctx->frame_size = 384;
  2002. nb_frames = mp_decode_layer1(s);
  2003. break;
  2004. case 2:
  2005. s->avctx->frame_size = 1152;
  2006. nb_frames = mp_decode_layer2(s);
  2007. break;
  2008. case 3:
  2009. s->avctx->frame_size = s->lsf ? 576 : 1152;
  2010. default:
  2011. nb_frames = mp_decode_layer3(s);
  2012. s->last_buf_size=0;
  2013. if(s->in_gb.buffer){
  2014. align_get_bits(&s->gb);
  2015. i= get_bits_left(&s->gb)>>3;
  2016. if(i >= 0 && i <= BACKSTEP_SIZE){
  2017. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  2018. s->last_buf_size=i;
  2019. }else
  2020. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  2021. s->gb= s->in_gb;
  2022. s->in_gb.buffer= NULL;
  2023. }
  2024. align_get_bits(&s->gb);
  2025. assert((get_bits_count(&s->gb) & 7) == 0);
  2026. i= get_bits_left(&s->gb)>>3;
  2027. if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){
  2028. if(i<0)
  2029. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  2030. i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  2031. }
  2032. assert(i <= buf_size - HEADER_SIZE && i>= 0);
  2033. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  2034. s->last_buf_size += i;
  2035. break;
  2036. }
  2037. /* apply the synthesis filter */
  2038. for(ch=0;ch<s->nb_channels;ch++) {
  2039. samples_ptr = samples + ch;
  2040. for(i=0;i<nb_frames;i++) {
  2041. RENAME(ff_mpa_synth_filter)(
  2042. #if CONFIG_FLOAT
  2043. s,
  2044. #endif
  2045. s->synth_buf[ch], &(s->synth_buf_offset[ch]),
  2046. RENAME(ff_mpa_synth_window), &s->dither_state,
  2047. samples_ptr, s->nb_channels,
  2048. s->sb_samples[ch][i]);
  2049. samples_ptr += 32 * s->nb_channels;
  2050. }
  2051. }
  2052. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  2053. }
  2054. static int decode_frame(AVCodecContext * avctx,
  2055. void *data, int *data_size,
  2056. AVPacket *avpkt)
  2057. {
  2058. const uint8_t *buf = avpkt->data;
  2059. int buf_size = avpkt->size;
  2060. MPADecodeContext *s = avctx->priv_data;
  2061. uint32_t header;
  2062. int out_size;
  2063. OUT_INT *out_samples = data;
  2064. if(buf_size < HEADER_SIZE)
  2065. return -1;
  2066. header = AV_RB32(buf);
  2067. if(ff_mpa_check_header(header) < 0){
  2068. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  2069. return -1;
  2070. }
  2071. if (ff_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) {
  2072. /* free format: prepare to compute frame size */
  2073. s->frame_size = -1;
  2074. return -1;
  2075. }
  2076. /* update codec info */
  2077. avctx->channels = s->nb_channels;
  2078. avctx->bit_rate = s->bit_rate;
  2079. avctx->sub_id = s->layer;
  2080. if(*data_size < 1152*avctx->channels*sizeof(OUT_INT))
  2081. return -1;
  2082. *data_size = 0;
  2083. if(s->frame_size<=0 || s->frame_size > buf_size){
  2084. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  2085. return -1;
  2086. }else if(s->frame_size < buf_size){
  2087. av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n");
  2088. buf_size= s->frame_size;
  2089. }
  2090. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  2091. if(out_size>=0){
  2092. *data_size = out_size;
  2093. avctx->sample_rate = s->sample_rate;
  2094. //FIXME maybe move the other codec info stuff from above here too
  2095. }else
  2096. av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed
  2097. s->frame_size = 0;
  2098. return buf_size;
  2099. }
  2100. static void flush(AVCodecContext *avctx){
  2101. MPADecodeContext *s = avctx->priv_data;
  2102. memset(s->synth_buf, 0, sizeof(s->synth_buf));
  2103. s->last_buf_size= 0;
  2104. }
  2105. #if CONFIG_MP3ADU_DECODER
  2106. static int decode_frame_adu(AVCodecContext * avctx,
  2107. void *data, int *data_size,
  2108. AVPacket *avpkt)
  2109. {
  2110. const uint8_t *buf = avpkt->data;
  2111. int buf_size = avpkt->size;
  2112. MPADecodeContext *s = avctx->priv_data;
  2113. uint32_t header;
  2114. int len, out_size;
  2115. OUT_INT *out_samples = data;
  2116. len = buf_size;
  2117. // Discard too short frames
  2118. if (buf_size < HEADER_SIZE) {
  2119. *data_size = 0;
  2120. return buf_size;
  2121. }
  2122. if (len > MPA_MAX_CODED_FRAME_SIZE)
  2123. len = MPA_MAX_CODED_FRAME_SIZE;
  2124. // Get header and restore sync word
  2125. header = AV_RB32(buf) | 0xffe00000;
  2126. if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
  2127. *data_size = 0;
  2128. return buf_size;
  2129. }
  2130. ff_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  2131. /* update codec info */
  2132. avctx->sample_rate = s->sample_rate;
  2133. avctx->channels = s->nb_channels;
  2134. avctx->bit_rate = s->bit_rate;
  2135. avctx->sub_id = s->layer;
  2136. s->frame_size = len;
  2137. if (avctx->parse_only) {
  2138. out_size = buf_size;
  2139. } else {
  2140. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  2141. }
  2142. *data_size = out_size;
  2143. return buf_size;
  2144. }
  2145. #endif /* CONFIG_MP3ADU_DECODER */
  2146. #if CONFIG_MP3ON4_DECODER
  2147. /**
  2148. * Context for MP3On4 decoder
  2149. */
  2150. typedef struct MP3On4DecodeContext {
  2151. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  2152. int syncword; ///< syncword patch
  2153. const uint8_t *coff; ///< channels offsets in output buffer
  2154. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  2155. } MP3On4DecodeContext;
  2156. #include "mpeg4audio.h"
  2157. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  2158. static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */
  2159. /* offsets into output buffer, assume output order is FL FR BL BR C LFE */
  2160. static const uint8_t chan_offset[8][5] = {
  2161. {0},
  2162. {0}, // C
  2163. {0}, // FLR
  2164. {2,0}, // C FLR
  2165. {2,0,3}, // C FLR BS
  2166. {4,0,2}, // C FLR BLRS
  2167. {4,0,2,5}, // C FLR BLRS LFE
  2168. {4,0,2,6,5}, // C FLR BLRS BLR LFE
  2169. };
  2170. static int decode_init_mp3on4(AVCodecContext * avctx)
  2171. {
  2172. MP3On4DecodeContext *s = avctx->priv_data;
  2173. MPEG4AudioConfig cfg;
  2174. int i;
  2175. if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
  2176. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  2177. return -1;
  2178. }
  2179. ff_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size);
  2180. if (!cfg.chan_config || cfg.chan_config > 7) {
  2181. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  2182. return -1;
  2183. }
  2184. s->frames = mp3Frames[cfg.chan_config];
  2185. s->coff = chan_offset[cfg.chan_config];
  2186. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  2187. if (cfg.sample_rate < 16000)
  2188. s->syncword = 0xffe00000;
  2189. else
  2190. s->syncword = 0xfff00000;
  2191. /* Init the first mp3 decoder in standard way, so that all tables get builded
  2192. * We replace avctx->priv_data with the context of the first decoder so that
  2193. * decode_init() does not have to be changed.
  2194. * Other decoders will be initialized here copying data from the first context
  2195. */
  2196. // Allocate zeroed memory for the first decoder context
  2197. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  2198. // Put decoder context in place to make init_decode() happy
  2199. avctx->priv_data = s->mp3decctx[0];
  2200. decode_init(avctx);
  2201. // Restore mp3on4 context pointer
  2202. avctx->priv_data = s;
  2203. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  2204. /* Create a separate codec/context for each frame (first is already ok).
  2205. * Each frame is 1 or 2 channels - up to 5 frames allowed
  2206. */
  2207. for (i = 1; i < s->frames; i++) {
  2208. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  2209. s->mp3decctx[i]->adu_mode = 1;
  2210. s->mp3decctx[i]->avctx = avctx;
  2211. }
  2212. return 0;
  2213. }
  2214. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  2215. {
  2216. MP3On4DecodeContext *s = avctx->priv_data;
  2217. int i;
  2218. for (i = 0; i < s->frames; i++)
  2219. if (s->mp3decctx[i])
  2220. av_free(s->mp3decctx[i]);
  2221. return 0;
  2222. }
  2223. static int decode_frame_mp3on4(AVCodecContext * avctx,
  2224. void *data, int *data_size,
  2225. AVPacket *avpkt)
  2226. {
  2227. const uint8_t *buf = avpkt->data;
  2228. int buf_size = avpkt->size;
  2229. MP3On4DecodeContext *s = avctx->priv_data;
  2230. MPADecodeContext *m;
  2231. int fsize, len = buf_size, out_size = 0;
  2232. uint32_t header;
  2233. OUT_INT *out_samples = data;
  2234. OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS];
  2235. OUT_INT *outptr, *bp;
  2236. int fr, j, n;
  2237. if(*data_size < MPA_FRAME_SIZE * MPA_MAX_CHANNELS * s->frames * sizeof(OUT_INT))
  2238. return -1;
  2239. *data_size = 0;
  2240. // Discard too short frames
  2241. if (buf_size < HEADER_SIZE)
  2242. return -1;
  2243. // If only one decoder interleave is not needed
  2244. outptr = s->frames == 1 ? out_samples : decoded_buf;
  2245. avctx->bit_rate = 0;
  2246. for (fr = 0; fr < s->frames; fr++) {
  2247. fsize = AV_RB16(buf) >> 4;
  2248. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  2249. m = s->mp3decctx[fr];
  2250. assert (m != NULL);
  2251. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  2252. if (ff_mpa_check_header(header) < 0) // Bad header, discard block
  2253. break;
  2254. ff_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  2255. out_size += mp_decode_frame(m, outptr, buf, fsize);
  2256. buf += fsize;
  2257. len -= fsize;
  2258. if(s->frames > 1) {
  2259. n = m->avctx->frame_size*m->nb_channels;
  2260. /* interleave output data */
  2261. bp = out_samples + s->coff[fr];
  2262. if(m->nb_channels == 1) {
  2263. for(j = 0; j < n; j++) {
  2264. *bp = decoded_buf[j];
  2265. bp += avctx->channels;
  2266. }
  2267. } else {
  2268. for(j = 0; j < n; j++) {
  2269. bp[0] = decoded_buf[j++];
  2270. bp[1] = decoded_buf[j];
  2271. bp += avctx->channels;
  2272. }
  2273. }
  2274. }
  2275. avctx->bit_rate += m->bit_rate;
  2276. }
  2277. /* update codec info */
  2278. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  2279. *data_size = out_size;
  2280. return buf_size;
  2281. }
  2282. #endif /* CONFIG_MP3ON4_DECODER */
  2283. #if !CONFIG_FLOAT
  2284. #if CONFIG_MP1_DECODER
  2285. AVCodec mp1_decoder =
  2286. {
  2287. "mp1",
  2288. AVMEDIA_TYPE_AUDIO,
  2289. CODEC_ID_MP1,
  2290. sizeof(MPADecodeContext),
  2291. decode_init,
  2292. NULL,
  2293. NULL,
  2294. decode_frame,
  2295. CODEC_CAP_PARSE_ONLY,
  2296. .flush= flush,
  2297. .long_name= NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
  2298. };
  2299. #endif
  2300. #if CONFIG_MP2_DECODER
  2301. AVCodec mp2_decoder =
  2302. {
  2303. "mp2",
  2304. AVMEDIA_TYPE_AUDIO,
  2305. CODEC_ID_MP2,
  2306. sizeof(MPADecodeContext),
  2307. decode_init,
  2308. NULL,
  2309. NULL,
  2310. decode_frame,
  2311. CODEC_CAP_PARSE_ONLY,
  2312. .flush= flush,
  2313. .long_name= NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
  2314. };
  2315. #endif
  2316. #if CONFIG_MP3_DECODER
  2317. AVCodec mp3_decoder =
  2318. {
  2319. "mp3",
  2320. AVMEDIA_TYPE_AUDIO,
  2321. CODEC_ID_MP3,
  2322. sizeof(MPADecodeContext),
  2323. decode_init,
  2324. NULL,
  2325. NULL,
  2326. decode_frame,
  2327. CODEC_CAP_PARSE_ONLY,
  2328. .flush= flush,
  2329. .long_name= NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
  2330. };
  2331. #endif
  2332. #if CONFIG_MP3ADU_DECODER
  2333. AVCodec mp3adu_decoder =
  2334. {
  2335. "mp3adu",
  2336. AVMEDIA_TYPE_AUDIO,
  2337. CODEC_ID_MP3ADU,
  2338. sizeof(MPADecodeContext),
  2339. decode_init,
  2340. NULL,
  2341. NULL,
  2342. decode_frame_adu,
  2343. CODEC_CAP_PARSE_ONLY,
  2344. .flush= flush,
  2345. .long_name= NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
  2346. };
  2347. #endif
  2348. #if CONFIG_MP3ON4_DECODER
  2349. AVCodec mp3on4_decoder =
  2350. {
  2351. "mp3on4",
  2352. AVMEDIA_TYPE_AUDIO,
  2353. CODEC_ID_MP3ON4,
  2354. sizeof(MP3On4DecodeContext),
  2355. decode_init_mp3on4,
  2356. NULL,
  2357. decode_close_mp3on4,
  2358. decode_frame_mp3on4,
  2359. .flush= flush,
  2360. .long_name= NULL_IF_CONFIG_SMALL("MP3onMP4"),
  2361. };
  2362. #endif
  2363. #endif