You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2600 lines
78KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file mpegaudiodec.c
  23. * MPEG Audio decoder.
  24. */
  25. #include "avcodec.h"
  26. #include "bitstream.h"
  27. #include "dsputil.h"
  28. /*
  29. * TODO:
  30. * - in low precision mode, use more 16 bit multiplies in synth filter
  31. * - test lsf / mpeg25 extensively.
  32. */
  33. /* define USE_HIGHPRECISION to have a bit exact (but slower) mpeg
  34. audio decoder */
  35. #ifdef CONFIG_MPEGAUDIO_HP
  36. # define USE_HIGHPRECISION
  37. #endif
  38. #include "mpegaudio.h"
  39. #include "mpegaudiodecheader.h"
  40. #include "mathops.h"
  41. /* WARNING: only correct for posititive numbers */
  42. #define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
  43. #define FRAC_RND(a) (((a) + (FRAC_ONE/2)) >> FRAC_BITS)
  44. #define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
  45. /****************/
  46. #define HEADER_SIZE 4
  47. /* layer 3 "granule" */
  48. typedef struct GranuleDef {
  49. uint8_t scfsi;
  50. int part2_3_length;
  51. int big_values;
  52. int global_gain;
  53. int scalefac_compress;
  54. uint8_t block_type;
  55. uint8_t switch_point;
  56. int table_select[3];
  57. int subblock_gain[3];
  58. uint8_t scalefac_scale;
  59. uint8_t count1table_select;
  60. int region_size[3]; /* number of huffman codes in each region */
  61. int preflag;
  62. int short_start, long_end; /* long/short band indexes */
  63. uint8_t scale_factors[40];
  64. int32_t sb_hybrid[SBLIMIT * 18]; /* 576 samples */
  65. } GranuleDef;
  66. #include "mpegaudiodata.h"
  67. #include "mpegaudiodectab.h"
  68. static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g);
  69. static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g);
  70. /* vlc structure for decoding layer 3 huffman tables */
  71. static VLC huff_vlc[16];
  72. static VLC_TYPE huff_vlc_tables[
  73. 0+128+128+128+130+128+154+166+
  74. 142+204+190+170+542+460+662+414
  75. ][2];
  76. static const int huff_vlc_tables_sizes[16] = {
  77. 0, 128, 128, 128, 130, 128, 154, 166,
  78. 142, 204, 190, 170, 542, 460, 662, 414
  79. };
  80. static VLC huff_quad_vlc[2];
  81. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  82. static const int huff_quad_vlc_tables_sizes[2] = {
  83. 128, 16
  84. };
  85. /* computed from band_size_long */
  86. static uint16_t band_index_long[9][23];
  87. /* XXX: free when all decoders are closed */
  88. #define TABLE_4_3_SIZE (8191 + 16)*4
  89. static int8_t table_4_3_exp[TABLE_4_3_SIZE];
  90. static uint32_t table_4_3_value[TABLE_4_3_SIZE];
  91. static uint32_t exp_table[512];
  92. static uint32_t expval_table[512][16];
  93. /* intensity stereo coef table */
  94. static int32_t is_table[2][16];
  95. static int32_t is_table_lsf[2][2][16];
  96. static int32_t csa_table[8][4];
  97. static float csa_table_float[8][4];
  98. static int32_t mdct_win[8][36];
  99. /* lower 2 bits: modulo 3, higher bits: shift */
  100. static uint16_t scale_factor_modshift[64];
  101. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  102. static int32_t scale_factor_mult[15][3];
  103. /* mult table for layer 2 group quantization */
  104. #define SCALE_GEN(v) \
  105. { FIXR(1.0 * (v)), FIXR(0.7937005259 * (v)), FIXR(0.6299605249 * (v)) }
  106. static const int32_t scale_factor_mult2[3][3] = {
  107. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  108. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  109. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  110. };
  111. static DECLARE_ALIGNED_16(MPA_INT, window[512]);
  112. /**
  113. * Convert region offsets to region sizes and truncate
  114. * size to big_values.
  115. */
  116. void ff_region_offset2size(GranuleDef *g){
  117. int i, k, j=0;
  118. g->region_size[2] = (576 / 2);
  119. for(i=0;i<3;i++) {
  120. k = FFMIN(g->region_size[i], g->big_values);
  121. g->region_size[i] = k - j;
  122. j = k;
  123. }
  124. }
  125. void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){
  126. if (g->block_type == 2)
  127. g->region_size[0] = (36 / 2);
  128. else {
  129. if (s->sample_rate_index <= 2)
  130. g->region_size[0] = (36 / 2);
  131. else if (s->sample_rate_index != 8)
  132. g->region_size[0] = (54 / 2);
  133. else
  134. g->region_size[0] = (108 / 2);
  135. }
  136. g->region_size[1] = (576 / 2);
  137. }
  138. void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){
  139. int l;
  140. g->region_size[0] =
  141. band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  142. /* should not overflow */
  143. l = FFMIN(ra1 + ra2 + 2, 22);
  144. g->region_size[1] =
  145. band_index_long[s->sample_rate_index][l] >> 1;
  146. }
  147. void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
  148. if (g->block_type == 2) {
  149. if (g->switch_point) {
  150. /* if switched mode, we handle the 36 first samples as
  151. long blocks. For 8000Hz, we handle the 48 first
  152. exponents as long blocks (XXX: check this!) */
  153. if (s->sample_rate_index <= 2)
  154. g->long_end = 8;
  155. else if (s->sample_rate_index != 8)
  156. g->long_end = 6;
  157. else
  158. g->long_end = 4; /* 8000 Hz */
  159. g->short_start = 2 + (s->sample_rate_index != 8);
  160. } else {
  161. g->long_end = 0;
  162. g->short_start = 0;
  163. }
  164. } else {
  165. g->short_start = 13;
  166. g->long_end = 22;
  167. }
  168. }
  169. /* layer 1 unscaling */
  170. /* n = number of bits of the mantissa minus 1 */
  171. static inline int l1_unscale(int n, int mant, int scale_factor)
  172. {
  173. int shift, mod;
  174. int64_t val;
  175. shift = scale_factor_modshift[scale_factor];
  176. mod = shift & 3;
  177. shift >>= 2;
  178. val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
  179. shift += n;
  180. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  181. return (int)((val + (1LL << (shift - 1))) >> shift);
  182. }
  183. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  184. {
  185. int shift, mod, val;
  186. shift = scale_factor_modshift[scale_factor];
  187. mod = shift & 3;
  188. shift >>= 2;
  189. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  190. /* NOTE: at this point, 0 <= shift <= 21 */
  191. if (shift > 0)
  192. val = (val + (1 << (shift - 1))) >> shift;
  193. return val;
  194. }
  195. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  196. static inline int l3_unscale(int value, int exponent)
  197. {
  198. unsigned int m;
  199. int e;
  200. e = table_4_3_exp [4*value + (exponent&3)];
  201. m = table_4_3_value[4*value + (exponent&3)];
  202. e -= (exponent >> 2);
  203. assert(e>=1);
  204. if (e > 31)
  205. return 0;
  206. m = (m + (1 << (e-1))) >> e;
  207. return m;
  208. }
  209. /* all integer n^(4/3) computation code */
  210. #define DEV_ORDER 13
  211. #define POW_FRAC_BITS 24
  212. #define POW_FRAC_ONE (1 << POW_FRAC_BITS)
  213. #define POW_FIX(a) ((int)((a) * POW_FRAC_ONE))
  214. #define POW_MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> POW_FRAC_BITS)
  215. static int dev_4_3_coefs[DEV_ORDER];
  216. #if 0 /* unused */
  217. static int pow_mult3[3] = {
  218. POW_FIX(1.0),
  219. POW_FIX(1.25992104989487316476),
  220. POW_FIX(1.58740105196819947474),
  221. };
  222. #endif
  223. static void int_pow_init(void)
  224. {
  225. int i, a;
  226. a = POW_FIX(1.0);
  227. for(i=0;i<DEV_ORDER;i++) {
  228. a = POW_MULL(a, POW_FIX(4.0 / 3.0) - i * POW_FIX(1.0)) / (i + 1);
  229. dev_4_3_coefs[i] = a;
  230. }
  231. }
  232. #if 0 /* unused, remove? */
  233. /* return the mantissa and the binary exponent */
  234. static int int_pow(int i, int *exp_ptr)
  235. {
  236. int e, er, eq, j;
  237. int a, a1;
  238. /* renormalize */
  239. a = i;
  240. e = POW_FRAC_BITS;
  241. while (a < (1 << (POW_FRAC_BITS - 1))) {
  242. a = a << 1;
  243. e--;
  244. }
  245. a -= (1 << POW_FRAC_BITS);
  246. a1 = 0;
  247. for(j = DEV_ORDER - 1; j >= 0; j--)
  248. a1 = POW_MULL(a, dev_4_3_coefs[j] + a1);
  249. a = (1 << POW_FRAC_BITS) + a1;
  250. /* exponent compute (exact) */
  251. e = e * 4;
  252. er = e % 3;
  253. eq = e / 3;
  254. a = POW_MULL(a, pow_mult3[er]);
  255. while (a >= 2 * POW_FRAC_ONE) {
  256. a = a >> 1;
  257. eq++;
  258. }
  259. /* convert to float */
  260. while (a < POW_FRAC_ONE) {
  261. a = a << 1;
  262. eq--;
  263. }
  264. /* now POW_FRAC_ONE <= a < 2 * POW_FRAC_ONE */
  265. #if POW_FRAC_BITS > FRAC_BITS
  266. a = (a + (1 << (POW_FRAC_BITS - FRAC_BITS - 1))) >> (POW_FRAC_BITS - FRAC_BITS);
  267. /* correct overflow */
  268. if (a >= 2 * (1 << FRAC_BITS)) {
  269. a = a >> 1;
  270. eq++;
  271. }
  272. #endif
  273. *exp_ptr = eq;
  274. return a;
  275. }
  276. #endif
  277. static int decode_init(AVCodecContext * avctx)
  278. {
  279. MPADecodeContext *s = avctx->priv_data;
  280. static int init=0;
  281. int i, j, k;
  282. s->avctx = avctx;
  283. #if defined(USE_HIGHPRECISION) && defined(CONFIG_AUDIO_NONSHORT)
  284. avctx->sample_fmt= SAMPLE_FMT_S32;
  285. #else
  286. avctx->sample_fmt= SAMPLE_FMT_S16;
  287. #endif
  288. s->error_recognition= avctx->error_recognition;
  289. if(avctx->antialias_algo != FF_AA_FLOAT)
  290. s->compute_antialias= compute_antialias_integer;
  291. else
  292. s->compute_antialias= compute_antialias_float;
  293. if (!init && !avctx->parse_only) {
  294. int offset;
  295. /* scale factors table for layer 1/2 */
  296. for(i=0;i<64;i++) {
  297. int shift, mod;
  298. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  299. shift = (i / 3);
  300. mod = i % 3;
  301. scale_factor_modshift[i] = mod | (shift << 2);
  302. }
  303. /* scale factor multiply for layer 1 */
  304. for(i=0;i<15;i++) {
  305. int n, norm;
  306. n = i + 2;
  307. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  308. scale_factor_mult[i][0] = MULL(FIXR(1.0 * 2.0), norm, FRAC_BITS);
  309. scale_factor_mult[i][1] = MULL(FIXR(0.7937005259 * 2.0), norm, FRAC_BITS);
  310. scale_factor_mult[i][2] = MULL(FIXR(0.6299605249 * 2.0), norm, FRAC_BITS);
  311. dprintf(avctx, "%d: norm=%x s=%x %x %x\n",
  312. i, norm,
  313. scale_factor_mult[i][0],
  314. scale_factor_mult[i][1],
  315. scale_factor_mult[i][2]);
  316. }
  317. ff_mpa_synth_init(window);
  318. /* huffman decode tables */
  319. offset = 0;
  320. for(i=1;i<16;i++) {
  321. const HuffTable *h = &mpa_huff_tables[i];
  322. int xsize, x, y;
  323. unsigned int n;
  324. uint8_t tmp_bits [512];
  325. uint16_t tmp_codes[512];
  326. memset(tmp_bits , 0, sizeof(tmp_bits ));
  327. memset(tmp_codes, 0, sizeof(tmp_codes));
  328. xsize = h->xsize;
  329. n = xsize * xsize;
  330. j = 0;
  331. for(x=0;x<xsize;x++) {
  332. for(y=0;y<xsize;y++){
  333. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  334. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  335. }
  336. }
  337. /* XXX: fail test */
  338. huff_vlc[i].table = huff_vlc_tables+offset;
  339. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  340. init_vlc(&huff_vlc[i], 7, 512,
  341. tmp_bits, 1, 1, tmp_codes, 2, 2,
  342. INIT_VLC_USE_NEW_STATIC);
  343. offset += huff_vlc_tables_sizes[i];
  344. }
  345. assert(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  346. offset = 0;
  347. for(i=0;i<2;i++) {
  348. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  349. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  350. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  351. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  352. INIT_VLC_USE_NEW_STATIC);
  353. offset += huff_quad_vlc_tables_sizes[i];
  354. }
  355. assert(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  356. for(i=0;i<9;i++) {
  357. k = 0;
  358. for(j=0;j<22;j++) {
  359. band_index_long[i][j] = k;
  360. k += band_size_long[i][j];
  361. }
  362. band_index_long[i][22] = k;
  363. }
  364. /* compute n ^ (4/3) and store it in mantissa/exp format */
  365. int_pow_init();
  366. for(i=1;i<TABLE_4_3_SIZE;i++) {
  367. double f, fm;
  368. int e, m;
  369. f = pow((double)(i/4), 4.0 / 3.0) * pow(2, (i&3)*0.25);
  370. fm = frexp(f, &e);
  371. m = (uint32_t)(fm*(1LL<<31) + 0.5);
  372. e+= FRAC_BITS - 31 + 5 - 100;
  373. /* normalized to FRAC_BITS */
  374. table_4_3_value[i] = m;
  375. table_4_3_exp[i] = -e;
  376. }
  377. for(i=0; i<512*16; i++){
  378. int exponent= (i>>4);
  379. double f= pow(i&15, 4.0 / 3.0) * pow(2, (exponent-400)*0.25 + FRAC_BITS + 5);
  380. expval_table[exponent][i&15]= llrint(f);
  381. if((i&15)==1)
  382. exp_table[exponent]= llrint(f);
  383. }
  384. for(i=0;i<7;i++) {
  385. float f;
  386. int v;
  387. if (i != 6) {
  388. f = tan((double)i * M_PI / 12.0);
  389. v = FIXR(f / (1.0 + f));
  390. } else {
  391. v = FIXR(1.0);
  392. }
  393. is_table[0][i] = v;
  394. is_table[1][6 - i] = v;
  395. }
  396. /* invalid values */
  397. for(i=7;i<16;i++)
  398. is_table[0][i] = is_table[1][i] = 0.0;
  399. for(i=0;i<16;i++) {
  400. double f;
  401. int e, k;
  402. for(j=0;j<2;j++) {
  403. e = -(j + 1) * ((i + 1) >> 1);
  404. f = pow(2.0, e / 4.0);
  405. k = i & 1;
  406. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  407. is_table_lsf[j][k][i] = FIXR(1.0);
  408. dprintf(avctx, "is_table_lsf %d %d: %x %x\n",
  409. i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]);
  410. }
  411. }
  412. for(i=0;i<8;i++) {
  413. float ci, cs, ca;
  414. ci = ci_table[i];
  415. cs = 1.0 / sqrt(1.0 + ci * ci);
  416. ca = cs * ci;
  417. csa_table[i][0] = FIXHR(cs/4);
  418. csa_table[i][1] = FIXHR(ca/4);
  419. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  420. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  421. csa_table_float[i][0] = cs;
  422. csa_table_float[i][1] = ca;
  423. csa_table_float[i][2] = ca + cs;
  424. csa_table_float[i][3] = ca - cs;
  425. }
  426. /* compute mdct windows */
  427. for(i=0;i<36;i++) {
  428. for(j=0; j<4; j++){
  429. double d;
  430. if(j==2 && i%3 != 1)
  431. continue;
  432. d= sin(M_PI * (i + 0.5) / 36.0);
  433. if(j==1){
  434. if (i>=30) d= 0;
  435. else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0);
  436. else if(i>=18) d= 1;
  437. }else if(j==3){
  438. if (i< 6) d= 0;
  439. else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0);
  440. else if(i< 18) d= 1;
  441. }
  442. //merge last stage of imdct into the window coefficients
  443. d*= 0.5 / cos(M_PI*(2*i + 19)/72);
  444. if(j==2)
  445. mdct_win[j][i/3] = FIXHR((d / (1<<5)));
  446. else
  447. mdct_win[j][i ] = FIXHR((d / (1<<5)));
  448. }
  449. }
  450. /* NOTE: we do frequency inversion adter the MDCT by changing
  451. the sign of the right window coefs */
  452. for(j=0;j<4;j++) {
  453. for(i=0;i<36;i+=2) {
  454. mdct_win[j + 4][i] = mdct_win[j][i];
  455. mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1];
  456. }
  457. }
  458. init = 1;
  459. }
  460. if (avctx->codec_id == CODEC_ID_MP3ADU)
  461. s->adu_mode = 1;
  462. return 0;
  463. }
  464. /* tab[i][j] = 1.0 / (2.0 * cos(pi*(2*k+1) / 2^(6 - j))) */
  465. /* cos(i*pi/64) */
  466. #define COS0_0 FIXHR(0.50060299823519630134/2)
  467. #define COS0_1 FIXHR(0.50547095989754365998/2)
  468. #define COS0_2 FIXHR(0.51544730992262454697/2)
  469. #define COS0_3 FIXHR(0.53104259108978417447/2)
  470. #define COS0_4 FIXHR(0.55310389603444452782/2)
  471. #define COS0_5 FIXHR(0.58293496820613387367/2)
  472. #define COS0_6 FIXHR(0.62250412303566481615/2)
  473. #define COS0_7 FIXHR(0.67480834145500574602/2)
  474. #define COS0_8 FIXHR(0.74453627100229844977/2)
  475. #define COS0_9 FIXHR(0.83934964541552703873/2)
  476. #define COS0_10 FIXHR(0.97256823786196069369/2)
  477. #define COS0_11 FIXHR(1.16943993343288495515/4)
  478. #define COS0_12 FIXHR(1.48416461631416627724/4)
  479. #define COS0_13 FIXHR(2.05778100995341155085/8)
  480. #define COS0_14 FIXHR(3.40760841846871878570/8)
  481. #define COS0_15 FIXHR(10.19000812354805681150/32)
  482. #define COS1_0 FIXHR(0.50241928618815570551/2)
  483. #define COS1_1 FIXHR(0.52249861493968888062/2)
  484. #define COS1_2 FIXHR(0.56694403481635770368/2)
  485. #define COS1_3 FIXHR(0.64682178335999012954/2)
  486. #define COS1_4 FIXHR(0.78815462345125022473/2)
  487. #define COS1_5 FIXHR(1.06067768599034747134/4)
  488. #define COS1_6 FIXHR(1.72244709823833392782/4)
  489. #define COS1_7 FIXHR(5.10114861868916385802/16)
  490. #define COS2_0 FIXHR(0.50979557910415916894/2)
  491. #define COS2_1 FIXHR(0.60134488693504528054/2)
  492. #define COS2_2 FIXHR(0.89997622313641570463/2)
  493. #define COS2_3 FIXHR(2.56291544774150617881/8)
  494. #define COS3_0 FIXHR(0.54119610014619698439/2)
  495. #define COS3_1 FIXHR(1.30656296487637652785/4)
  496. #define COS4_0 FIXHR(0.70710678118654752439/2)
  497. /* butterfly operator */
  498. #define BF(a, b, c, s)\
  499. {\
  500. tmp0 = tab[a] + tab[b];\
  501. tmp1 = tab[a] - tab[b];\
  502. tab[a] = tmp0;\
  503. tab[b] = MULH(tmp1<<(s), c);\
  504. }
  505. #define BF1(a, b, c, d)\
  506. {\
  507. BF(a, b, COS4_0, 1);\
  508. BF(c, d,-COS4_0, 1);\
  509. tab[c] += tab[d];\
  510. }
  511. #define BF2(a, b, c, d)\
  512. {\
  513. BF(a, b, COS4_0, 1);\
  514. BF(c, d,-COS4_0, 1);\
  515. tab[c] += tab[d];\
  516. tab[a] += tab[c];\
  517. tab[c] += tab[b];\
  518. tab[b] += tab[d];\
  519. }
  520. #define ADD(a, b) tab[a] += tab[b]
  521. /* DCT32 without 1/sqrt(2) coef zero scaling. */
  522. static void dct32(int32_t *out, int32_t *tab)
  523. {
  524. int tmp0, tmp1;
  525. /* pass 1 */
  526. BF( 0, 31, COS0_0 , 1);
  527. BF(15, 16, COS0_15, 5);
  528. /* pass 2 */
  529. BF( 0, 15, COS1_0 , 1);
  530. BF(16, 31,-COS1_0 , 1);
  531. /* pass 1 */
  532. BF( 7, 24, COS0_7 , 1);
  533. BF( 8, 23, COS0_8 , 1);
  534. /* pass 2 */
  535. BF( 7, 8, COS1_7 , 4);
  536. BF(23, 24,-COS1_7 , 4);
  537. /* pass 3 */
  538. BF( 0, 7, COS2_0 , 1);
  539. BF( 8, 15,-COS2_0 , 1);
  540. BF(16, 23, COS2_0 , 1);
  541. BF(24, 31,-COS2_0 , 1);
  542. /* pass 1 */
  543. BF( 3, 28, COS0_3 , 1);
  544. BF(12, 19, COS0_12, 2);
  545. /* pass 2 */
  546. BF( 3, 12, COS1_3 , 1);
  547. BF(19, 28,-COS1_3 , 1);
  548. /* pass 1 */
  549. BF( 4, 27, COS0_4 , 1);
  550. BF(11, 20, COS0_11, 2);
  551. /* pass 2 */
  552. BF( 4, 11, COS1_4 , 1);
  553. BF(20, 27,-COS1_4 , 1);
  554. /* pass 3 */
  555. BF( 3, 4, COS2_3 , 3);
  556. BF(11, 12,-COS2_3 , 3);
  557. BF(19, 20, COS2_3 , 3);
  558. BF(27, 28,-COS2_3 , 3);
  559. /* pass 4 */
  560. BF( 0, 3, COS3_0 , 1);
  561. BF( 4, 7,-COS3_0 , 1);
  562. BF( 8, 11, COS3_0 , 1);
  563. BF(12, 15,-COS3_0 , 1);
  564. BF(16, 19, COS3_0 , 1);
  565. BF(20, 23,-COS3_0 , 1);
  566. BF(24, 27, COS3_0 , 1);
  567. BF(28, 31,-COS3_0 , 1);
  568. /* pass 1 */
  569. BF( 1, 30, COS0_1 , 1);
  570. BF(14, 17, COS0_14, 3);
  571. /* pass 2 */
  572. BF( 1, 14, COS1_1 , 1);
  573. BF(17, 30,-COS1_1 , 1);
  574. /* pass 1 */
  575. BF( 6, 25, COS0_6 , 1);
  576. BF( 9, 22, COS0_9 , 1);
  577. /* pass 2 */
  578. BF( 6, 9, COS1_6 , 2);
  579. BF(22, 25,-COS1_6 , 2);
  580. /* pass 3 */
  581. BF( 1, 6, COS2_1 , 1);
  582. BF( 9, 14,-COS2_1 , 1);
  583. BF(17, 22, COS2_1 , 1);
  584. BF(25, 30,-COS2_1 , 1);
  585. /* pass 1 */
  586. BF( 2, 29, COS0_2 , 1);
  587. BF(13, 18, COS0_13, 3);
  588. /* pass 2 */
  589. BF( 2, 13, COS1_2 , 1);
  590. BF(18, 29,-COS1_2 , 1);
  591. /* pass 1 */
  592. BF( 5, 26, COS0_5 , 1);
  593. BF(10, 21, COS0_10, 1);
  594. /* pass 2 */
  595. BF( 5, 10, COS1_5 , 2);
  596. BF(21, 26,-COS1_5 , 2);
  597. /* pass 3 */
  598. BF( 2, 5, COS2_2 , 1);
  599. BF(10, 13,-COS2_2 , 1);
  600. BF(18, 21, COS2_2 , 1);
  601. BF(26, 29,-COS2_2 , 1);
  602. /* pass 4 */
  603. BF( 1, 2, COS3_1 , 2);
  604. BF( 5, 6,-COS3_1 , 2);
  605. BF( 9, 10, COS3_1 , 2);
  606. BF(13, 14,-COS3_1 , 2);
  607. BF(17, 18, COS3_1 , 2);
  608. BF(21, 22,-COS3_1 , 2);
  609. BF(25, 26, COS3_1 , 2);
  610. BF(29, 30,-COS3_1 , 2);
  611. /* pass 5 */
  612. BF1( 0, 1, 2, 3);
  613. BF2( 4, 5, 6, 7);
  614. BF1( 8, 9, 10, 11);
  615. BF2(12, 13, 14, 15);
  616. BF1(16, 17, 18, 19);
  617. BF2(20, 21, 22, 23);
  618. BF1(24, 25, 26, 27);
  619. BF2(28, 29, 30, 31);
  620. /* pass 6 */
  621. ADD( 8, 12);
  622. ADD(12, 10);
  623. ADD(10, 14);
  624. ADD(14, 9);
  625. ADD( 9, 13);
  626. ADD(13, 11);
  627. ADD(11, 15);
  628. out[ 0] = tab[0];
  629. out[16] = tab[1];
  630. out[ 8] = tab[2];
  631. out[24] = tab[3];
  632. out[ 4] = tab[4];
  633. out[20] = tab[5];
  634. out[12] = tab[6];
  635. out[28] = tab[7];
  636. out[ 2] = tab[8];
  637. out[18] = tab[9];
  638. out[10] = tab[10];
  639. out[26] = tab[11];
  640. out[ 6] = tab[12];
  641. out[22] = tab[13];
  642. out[14] = tab[14];
  643. out[30] = tab[15];
  644. ADD(24, 28);
  645. ADD(28, 26);
  646. ADD(26, 30);
  647. ADD(30, 25);
  648. ADD(25, 29);
  649. ADD(29, 27);
  650. ADD(27, 31);
  651. out[ 1] = tab[16] + tab[24];
  652. out[17] = tab[17] + tab[25];
  653. out[ 9] = tab[18] + tab[26];
  654. out[25] = tab[19] + tab[27];
  655. out[ 5] = tab[20] + tab[28];
  656. out[21] = tab[21] + tab[29];
  657. out[13] = tab[22] + tab[30];
  658. out[29] = tab[23] + tab[31];
  659. out[ 3] = tab[24] + tab[20];
  660. out[19] = tab[25] + tab[21];
  661. out[11] = tab[26] + tab[22];
  662. out[27] = tab[27] + tab[23];
  663. out[ 7] = tab[28] + tab[18];
  664. out[23] = tab[29] + tab[19];
  665. out[15] = tab[30] + tab[17];
  666. out[31] = tab[31];
  667. }
  668. #if FRAC_BITS <= 15
  669. static inline int round_sample(int *sum)
  670. {
  671. int sum1;
  672. sum1 = (*sum) >> OUT_SHIFT;
  673. *sum &= (1<<OUT_SHIFT)-1;
  674. if (sum1 < OUT_MIN)
  675. sum1 = OUT_MIN;
  676. else if (sum1 > OUT_MAX)
  677. sum1 = OUT_MAX;
  678. return sum1;
  679. }
  680. /* signed 16x16 -> 32 multiply add accumulate */
  681. #define MACS(rt, ra, rb) MAC16(rt, ra, rb)
  682. /* signed 16x16 -> 32 multiply */
  683. #define MULS(ra, rb) MUL16(ra, rb)
  684. #define MLSS(rt, ra, rb) MLS16(rt, ra, rb)
  685. #else
  686. static inline int round_sample(int64_t *sum)
  687. {
  688. int sum1;
  689. sum1 = (int)((*sum) >> OUT_SHIFT);
  690. *sum &= (1<<OUT_SHIFT)-1;
  691. if (sum1 < OUT_MIN)
  692. sum1 = OUT_MIN;
  693. else if (sum1 > OUT_MAX)
  694. sum1 = OUT_MAX;
  695. return sum1;
  696. }
  697. # define MULS(ra, rb) MUL64(ra, rb)
  698. # define MACS(rt, ra, rb) MAC64(rt, ra, rb)
  699. # define MLSS(rt, ra, rb) MLS64(rt, ra, rb)
  700. #endif
  701. #define SUM8(op, sum, w, p) \
  702. { \
  703. op(sum, (w)[0 * 64], p[0 * 64]); \
  704. op(sum, (w)[1 * 64], p[1 * 64]); \
  705. op(sum, (w)[2 * 64], p[2 * 64]); \
  706. op(sum, (w)[3 * 64], p[3 * 64]); \
  707. op(sum, (w)[4 * 64], p[4 * 64]); \
  708. op(sum, (w)[5 * 64], p[5 * 64]); \
  709. op(sum, (w)[6 * 64], p[6 * 64]); \
  710. op(sum, (w)[7 * 64], p[7 * 64]); \
  711. }
  712. #define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \
  713. { \
  714. int tmp;\
  715. tmp = p[0 * 64];\
  716. op1(sum1, (w1)[0 * 64], tmp);\
  717. op2(sum2, (w2)[0 * 64], tmp);\
  718. tmp = p[1 * 64];\
  719. op1(sum1, (w1)[1 * 64], tmp);\
  720. op2(sum2, (w2)[1 * 64], tmp);\
  721. tmp = p[2 * 64];\
  722. op1(sum1, (w1)[2 * 64], tmp);\
  723. op2(sum2, (w2)[2 * 64], tmp);\
  724. tmp = p[3 * 64];\
  725. op1(sum1, (w1)[3 * 64], tmp);\
  726. op2(sum2, (w2)[3 * 64], tmp);\
  727. tmp = p[4 * 64];\
  728. op1(sum1, (w1)[4 * 64], tmp);\
  729. op2(sum2, (w2)[4 * 64], tmp);\
  730. tmp = p[5 * 64];\
  731. op1(sum1, (w1)[5 * 64], tmp);\
  732. op2(sum2, (w2)[5 * 64], tmp);\
  733. tmp = p[6 * 64];\
  734. op1(sum1, (w1)[6 * 64], tmp);\
  735. op2(sum2, (w2)[6 * 64], tmp);\
  736. tmp = p[7 * 64];\
  737. op1(sum1, (w1)[7 * 64], tmp);\
  738. op2(sum2, (w2)[7 * 64], tmp);\
  739. }
  740. void ff_mpa_synth_init(MPA_INT *window)
  741. {
  742. int i;
  743. /* max = 18760, max sum over all 16 coefs : 44736 */
  744. for(i=0;i<257;i++) {
  745. int v;
  746. v = ff_mpa_enwindow[i];
  747. #if WFRAC_BITS < 16
  748. v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS);
  749. #endif
  750. window[i] = v;
  751. if ((i & 63) != 0)
  752. v = -v;
  753. if (i != 0)
  754. window[512 - i] = v;
  755. }
  756. }
  757. /* 32 sub band synthesis filter. Input: 32 sub band samples, Output:
  758. 32 samples. */
  759. /* XXX: optimize by avoiding ring buffer usage */
  760. void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset,
  761. MPA_INT *window, int *dither_state,
  762. OUT_INT *samples, int incr,
  763. int32_t sb_samples[SBLIMIT])
  764. {
  765. int32_t tmp[32];
  766. register MPA_INT *synth_buf;
  767. register const MPA_INT *w, *w2, *p;
  768. int j, offset, v;
  769. OUT_INT *samples2;
  770. #if FRAC_BITS <= 15
  771. int sum, sum2;
  772. #else
  773. int64_t sum, sum2;
  774. #endif
  775. dct32(tmp, sb_samples);
  776. offset = *synth_buf_offset;
  777. synth_buf = synth_buf_ptr + offset;
  778. for(j=0;j<32;j++) {
  779. v = tmp[j];
  780. #if FRAC_BITS <= 15
  781. /* NOTE: can cause a loss in precision if very high amplitude
  782. sound */
  783. v = av_clip_int16(v);
  784. #endif
  785. synth_buf[j] = v;
  786. }
  787. /* copy to avoid wrap */
  788. memcpy(synth_buf + 512, synth_buf, 32 * sizeof(MPA_INT));
  789. samples2 = samples + 31 * incr;
  790. w = window;
  791. w2 = window + 31;
  792. sum = *dither_state;
  793. p = synth_buf + 16;
  794. SUM8(MACS, sum, w, p);
  795. p = synth_buf + 48;
  796. SUM8(MLSS, sum, w + 32, p);
  797. *samples = round_sample(&sum);
  798. samples += incr;
  799. w++;
  800. /* we calculate two samples at the same time to avoid one memory
  801. access per two sample */
  802. for(j=1;j<16;j++) {
  803. sum2 = 0;
  804. p = synth_buf + 16 + j;
  805. SUM8P2(sum, MACS, sum2, MLSS, w, w2, p);
  806. p = synth_buf + 48 - j;
  807. SUM8P2(sum, MLSS, sum2, MLSS, w + 32, w2 + 32, p);
  808. *samples = round_sample(&sum);
  809. samples += incr;
  810. sum += sum2;
  811. *samples2 = round_sample(&sum);
  812. samples2 -= incr;
  813. w++;
  814. w2--;
  815. }
  816. p = synth_buf + 32;
  817. SUM8(MLSS, sum, w + 32, p);
  818. *samples = round_sample(&sum);
  819. *dither_state= sum;
  820. offset = (offset - 32) & 511;
  821. *synth_buf_offset = offset;
  822. }
  823. #define C3 FIXHR(0.86602540378443864676/2)
  824. /* 0.5 / cos(pi*(2*i+1)/36) */
  825. static const int icos36[9] = {
  826. FIXR(0.50190991877167369479),
  827. FIXR(0.51763809020504152469), //0
  828. FIXR(0.55168895948124587824),
  829. FIXR(0.61038729438072803416),
  830. FIXR(0.70710678118654752439), //1
  831. FIXR(0.87172339781054900991),
  832. FIXR(1.18310079157624925896),
  833. FIXR(1.93185165257813657349), //2
  834. FIXR(5.73685662283492756461),
  835. };
  836. /* 0.5 / cos(pi*(2*i+1)/36) */
  837. static const int icos36h[9] = {
  838. FIXHR(0.50190991877167369479/2),
  839. FIXHR(0.51763809020504152469/2), //0
  840. FIXHR(0.55168895948124587824/2),
  841. FIXHR(0.61038729438072803416/2),
  842. FIXHR(0.70710678118654752439/2), //1
  843. FIXHR(0.87172339781054900991/2),
  844. FIXHR(1.18310079157624925896/4),
  845. FIXHR(1.93185165257813657349/4), //2
  846. // FIXHR(5.73685662283492756461),
  847. };
  848. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  849. cases. */
  850. static void imdct12(int *out, int *in)
  851. {
  852. int in0, in1, in2, in3, in4, in5, t1, t2;
  853. in0= in[0*3];
  854. in1= in[1*3] + in[0*3];
  855. in2= in[2*3] + in[1*3];
  856. in3= in[3*3] + in[2*3];
  857. in4= in[4*3] + in[3*3];
  858. in5= in[5*3] + in[4*3];
  859. in5 += in3;
  860. in3 += in1;
  861. in2= MULH(2*in2, C3);
  862. in3= MULH(4*in3, C3);
  863. t1 = in0 - in4;
  864. t2 = MULH(2*(in1 - in5), icos36h[4]);
  865. out[ 7]=
  866. out[10]= t1 + t2;
  867. out[ 1]=
  868. out[ 4]= t1 - t2;
  869. in0 += in4>>1;
  870. in4 = in0 + in2;
  871. in5 += 2*in1;
  872. in1 = MULH(in5 + in3, icos36h[1]);
  873. out[ 8]=
  874. out[ 9]= in4 + in1;
  875. out[ 2]=
  876. out[ 3]= in4 - in1;
  877. in0 -= in2;
  878. in5 = MULH(2*(in5 - in3), icos36h[7]);
  879. out[ 0]=
  880. out[ 5]= in0 - in5;
  881. out[ 6]=
  882. out[11]= in0 + in5;
  883. }
  884. /* cos(pi*i/18) */
  885. #define C1 FIXHR(0.98480775301220805936/2)
  886. #define C2 FIXHR(0.93969262078590838405/2)
  887. #define C3 FIXHR(0.86602540378443864676/2)
  888. #define C4 FIXHR(0.76604444311897803520/2)
  889. #define C5 FIXHR(0.64278760968653932632/2)
  890. #define C6 FIXHR(0.5/2)
  891. #define C7 FIXHR(0.34202014332566873304/2)
  892. #define C8 FIXHR(0.17364817766693034885/2)
  893. /* using Lee like decomposition followed by hand coded 9 points DCT */
  894. static void imdct36(int *out, int *buf, int *in, int *win)
  895. {
  896. int i, j, t0, t1, t2, t3, s0, s1, s2, s3;
  897. int tmp[18], *tmp1, *in1;
  898. for(i=17;i>=1;i--)
  899. in[i] += in[i-1];
  900. for(i=17;i>=3;i-=2)
  901. in[i] += in[i-2];
  902. for(j=0;j<2;j++) {
  903. tmp1 = tmp + j;
  904. in1 = in + j;
  905. #if 0
  906. //more accurate but slower
  907. int64_t t0, t1, t2, t3;
  908. t2 = in1[2*4] + in1[2*8] - in1[2*2];
  909. t3 = (in1[2*0] + (int64_t)(in1[2*6]>>1))<<32;
  910. t1 = in1[2*0] - in1[2*6];
  911. tmp1[ 6] = t1 - (t2>>1);
  912. tmp1[16] = t1 + t2;
  913. t0 = MUL64(2*(in1[2*2] + in1[2*4]), C2);
  914. t1 = MUL64( in1[2*4] - in1[2*8] , -2*C8);
  915. t2 = MUL64(2*(in1[2*2] + in1[2*8]), -C4);
  916. tmp1[10] = (t3 - t0 - t2) >> 32;
  917. tmp1[ 2] = (t3 + t0 + t1) >> 32;
  918. tmp1[14] = (t3 + t2 - t1) >> 32;
  919. tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3);
  920. t2 = MUL64(2*(in1[2*1] + in1[2*5]), C1);
  921. t3 = MUL64( in1[2*5] - in1[2*7] , -2*C7);
  922. t0 = MUL64(2*in1[2*3], C3);
  923. t1 = MUL64(2*(in1[2*1] + in1[2*7]), -C5);
  924. tmp1[ 0] = (t2 + t3 + t0) >> 32;
  925. tmp1[12] = (t2 + t1 - t0) >> 32;
  926. tmp1[ 8] = (t3 - t1 - t0) >> 32;
  927. #else
  928. t2 = in1[2*4] + in1[2*8] - in1[2*2];
  929. t3 = in1[2*0] + (in1[2*6]>>1);
  930. t1 = in1[2*0] - in1[2*6];
  931. tmp1[ 6] = t1 - (t2>>1);
  932. tmp1[16] = t1 + t2;
  933. t0 = MULH(2*(in1[2*2] + in1[2*4]), C2);
  934. t1 = MULH( in1[2*4] - in1[2*8] , -2*C8);
  935. t2 = MULH(2*(in1[2*2] + in1[2*8]), -C4);
  936. tmp1[10] = t3 - t0 - t2;
  937. tmp1[ 2] = t3 + t0 + t1;
  938. tmp1[14] = t3 + t2 - t1;
  939. tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3);
  940. t2 = MULH(2*(in1[2*1] + in1[2*5]), C1);
  941. t3 = MULH( in1[2*5] - in1[2*7] , -2*C7);
  942. t0 = MULH(2*in1[2*3], C3);
  943. t1 = MULH(2*(in1[2*1] + in1[2*7]), -C5);
  944. tmp1[ 0] = t2 + t3 + t0;
  945. tmp1[12] = t2 + t1 - t0;
  946. tmp1[ 8] = t3 - t1 - t0;
  947. #endif
  948. }
  949. i = 0;
  950. for(j=0;j<4;j++) {
  951. t0 = tmp[i];
  952. t1 = tmp[i + 2];
  953. s0 = t1 + t0;
  954. s2 = t1 - t0;
  955. t2 = tmp[i + 1];
  956. t3 = tmp[i + 3];
  957. s1 = MULH(2*(t3 + t2), icos36h[j]);
  958. s3 = MULL(t3 - t2, icos36[8 - j], FRAC_BITS);
  959. t0 = s0 + s1;
  960. t1 = s0 - s1;
  961. out[(9 + j)*SBLIMIT] = MULH(t1, win[9 + j]) + buf[9 + j];
  962. out[(8 - j)*SBLIMIT] = MULH(t1, win[8 - j]) + buf[8 - j];
  963. buf[9 + j] = MULH(t0, win[18 + 9 + j]);
  964. buf[8 - j] = MULH(t0, win[18 + 8 - j]);
  965. t0 = s2 + s3;
  966. t1 = s2 - s3;
  967. out[(9 + 8 - j)*SBLIMIT] = MULH(t1, win[9 + 8 - j]) + buf[9 + 8 - j];
  968. out[( j)*SBLIMIT] = MULH(t1, win[ j]) + buf[ j];
  969. buf[9 + 8 - j] = MULH(t0, win[18 + 9 + 8 - j]);
  970. buf[ + j] = MULH(t0, win[18 + j]);
  971. i += 4;
  972. }
  973. s0 = tmp[16];
  974. s1 = MULH(2*tmp[17], icos36h[4]);
  975. t0 = s0 + s1;
  976. t1 = s0 - s1;
  977. out[(9 + 4)*SBLIMIT] = MULH(t1, win[9 + 4]) + buf[9 + 4];
  978. out[(8 - 4)*SBLIMIT] = MULH(t1, win[8 - 4]) + buf[8 - 4];
  979. buf[9 + 4] = MULH(t0, win[18 + 9 + 4]);
  980. buf[8 - 4] = MULH(t0, win[18 + 8 - 4]);
  981. }
  982. /* return the number of decoded frames */
  983. static int mp_decode_layer1(MPADecodeContext *s)
  984. {
  985. int bound, i, v, n, ch, j, mant;
  986. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  987. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  988. if (s->mode == MPA_JSTEREO)
  989. bound = (s->mode_ext + 1) * 4;
  990. else
  991. bound = SBLIMIT;
  992. /* allocation bits */
  993. for(i=0;i<bound;i++) {
  994. for(ch=0;ch<s->nb_channels;ch++) {
  995. allocation[ch][i] = get_bits(&s->gb, 4);
  996. }
  997. }
  998. for(i=bound;i<SBLIMIT;i++) {
  999. allocation[0][i] = get_bits(&s->gb, 4);
  1000. }
  1001. /* scale factors */
  1002. for(i=0;i<bound;i++) {
  1003. for(ch=0;ch<s->nb_channels;ch++) {
  1004. if (allocation[ch][i])
  1005. scale_factors[ch][i] = get_bits(&s->gb, 6);
  1006. }
  1007. }
  1008. for(i=bound;i<SBLIMIT;i++) {
  1009. if (allocation[0][i]) {
  1010. scale_factors[0][i] = get_bits(&s->gb, 6);
  1011. scale_factors[1][i] = get_bits(&s->gb, 6);
  1012. }
  1013. }
  1014. /* compute samples */
  1015. for(j=0;j<12;j++) {
  1016. for(i=0;i<bound;i++) {
  1017. for(ch=0;ch<s->nb_channels;ch++) {
  1018. n = allocation[ch][i];
  1019. if (n) {
  1020. mant = get_bits(&s->gb, n + 1);
  1021. v = l1_unscale(n, mant, scale_factors[ch][i]);
  1022. } else {
  1023. v = 0;
  1024. }
  1025. s->sb_samples[ch][j][i] = v;
  1026. }
  1027. }
  1028. for(i=bound;i<SBLIMIT;i++) {
  1029. n = allocation[0][i];
  1030. if (n) {
  1031. mant = get_bits(&s->gb, n + 1);
  1032. v = l1_unscale(n, mant, scale_factors[0][i]);
  1033. s->sb_samples[0][j][i] = v;
  1034. v = l1_unscale(n, mant, scale_factors[1][i]);
  1035. s->sb_samples[1][j][i] = v;
  1036. } else {
  1037. s->sb_samples[0][j][i] = 0;
  1038. s->sb_samples[1][j][i] = 0;
  1039. }
  1040. }
  1041. }
  1042. return 12;
  1043. }
  1044. static int mp_decode_layer2(MPADecodeContext *s)
  1045. {
  1046. int sblimit; /* number of used subbands */
  1047. const unsigned char *alloc_table;
  1048. int table, bit_alloc_bits, i, j, ch, bound, v;
  1049. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  1050. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  1051. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  1052. int scale, qindex, bits, steps, k, l, m, b;
  1053. /* select decoding table */
  1054. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  1055. s->sample_rate, s->lsf);
  1056. sblimit = ff_mpa_sblimit_table[table];
  1057. alloc_table = ff_mpa_alloc_tables[table];
  1058. if (s->mode == MPA_JSTEREO)
  1059. bound = (s->mode_ext + 1) * 4;
  1060. else
  1061. bound = sblimit;
  1062. dprintf(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  1063. /* sanity check */
  1064. if( bound > sblimit ) bound = sblimit;
  1065. /* parse bit allocation */
  1066. j = 0;
  1067. for(i=0;i<bound;i++) {
  1068. bit_alloc_bits = alloc_table[j];
  1069. for(ch=0;ch<s->nb_channels;ch++) {
  1070. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  1071. }
  1072. j += 1 << bit_alloc_bits;
  1073. }
  1074. for(i=bound;i<sblimit;i++) {
  1075. bit_alloc_bits = alloc_table[j];
  1076. v = get_bits(&s->gb, bit_alloc_bits);
  1077. bit_alloc[0][i] = v;
  1078. bit_alloc[1][i] = v;
  1079. j += 1 << bit_alloc_bits;
  1080. }
  1081. /* scale codes */
  1082. for(i=0;i<sblimit;i++) {
  1083. for(ch=0;ch<s->nb_channels;ch++) {
  1084. if (bit_alloc[ch][i])
  1085. scale_code[ch][i] = get_bits(&s->gb, 2);
  1086. }
  1087. }
  1088. /* scale factors */
  1089. for(i=0;i<sblimit;i++) {
  1090. for(ch=0;ch<s->nb_channels;ch++) {
  1091. if (bit_alloc[ch][i]) {
  1092. sf = scale_factors[ch][i];
  1093. switch(scale_code[ch][i]) {
  1094. default:
  1095. case 0:
  1096. sf[0] = get_bits(&s->gb, 6);
  1097. sf[1] = get_bits(&s->gb, 6);
  1098. sf[2] = get_bits(&s->gb, 6);
  1099. break;
  1100. case 2:
  1101. sf[0] = get_bits(&s->gb, 6);
  1102. sf[1] = sf[0];
  1103. sf[2] = sf[0];
  1104. break;
  1105. case 1:
  1106. sf[0] = get_bits(&s->gb, 6);
  1107. sf[2] = get_bits(&s->gb, 6);
  1108. sf[1] = sf[0];
  1109. break;
  1110. case 3:
  1111. sf[0] = get_bits(&s->gb, 6);
  1112. sf[2] = get_bits(&s->gb, 6);
  1113. sf[1] = sf[2];
  1114. break;
  1115. }
  1116. }
  1117. }
  1118. }
  1119. /* samples */
  1120. for(k=0;k<3;k++) {
  1121. for(l=0;l<12;l+=3) {
  1122. j = 0;
  1123. for(i=0;i<bound;i++) {
  1124. bit_alloc_bits = alloc_table[j];
  1125. for(ch=0;ch<s->nb_channels;ch++) {
  1126. b = bit_alloc[ch][i];
  1127. if (b) {
  1128. scale = scale_factors[ch][i][k];
  1129. qindex = alloc_table[j+b];
  1130. bits = ff_mpa_quant_bits[qindex];
  1131. if (bits < 0) {
  1132. /* 3 values at the same time */
  1133. v = get_bits(&s->gb, -bits);
  1134. steps = ff_mpa_quant_steps[qindex];
  1135. s->sb_samples[ch][k * 12 + l + 0][i] =
  1136. l2_unscale_group(steps, v % steps, scale);
  1137. v = v / steps;
  1138. s->sb_samples[ch][k * 12 + l + 1][i] =
  1139. l2_unscale_group(steps, v % steps, scale);
  1140. v = v / steps;
  1141. s->sb_samples[ch][k * 12 + l + 2][i] =
  1142. l2_unscale_group(steps, v, scale);
  1143. } else {
  1144. for(m=0;m<3;m++) {
  1145. v = get_bits(&s->gb, bits);
  1146. v = l1_unscale(bits - 1, v, scale);
  1147. s->sb_samples[ch][k * 12 + l + m][i] = v;
  1148. }
  1149. }
  1150. } else {
  1151. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  1152. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  1153. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  1154. }
  1155. }
  1156. /* next subband in alloc table */
  1157. j += 1 << bit_alloc_bits;
  1158. }
  1159. /* XXX: find a way to avoid this duplication of code */
  1160. for(i=bound;i<sblimit;i++) {
  1161. bit_alloc_bits = alloc_table[j];
  1162. b = bit_alloc[0][i];
  1163. if (b) {
  1164. int mant, scale0, scale1;
  1165. scale0 = scale_factors[0][i][k];
  1166. scale1 = scale_factors[1][i][k];
  1167. qindex = alloc_table[j+b];
  1168. bits = ff_mpa_quant_bits[qindex];
  1169. if (bits < 0) {
  1170. /* 3 values at the same time */
  1171. v = get_bits(&s->gb, -bits);
  1172. steps = ff_mpa_quant_steps[qindex];
  1173. mant = v % steps;
  1174. v = v / steps;
  1175. s->sb_samples[0][k * 12 + l + 0][i] =
  1176. l2_unscale_group(steps, mant, scale0);
  1177. s->sb_samples[1][k * 12 + l + 0][i] =
  1178. l2_unscale_group(steps, mant, scale1);
  1179. mant = v % steps;
  1180. v = v / steps;
  1181. s->sb_samples[0][k * 12 + l + 1][i] =
  1182. l2_unscale_group(steps, mant, scale0);
  1183. s->sb_samples[1][k * 12 + l + 1][i] =
  1184. l2_unscale_group(steps, mant, scale1);
  1185. s->sb_samples[0][k * 12 + l + 2][i] =
  1186. l2_unscale_group(steps, v, scale0);
  1187. s->sb_samples[1][k * 12 + l + 2][i] =
  1188. l2_unscale_group(steps, v, scale1);
  1189. } else {
  1190. for(m=0;m<3;m++) {
  1191. mant = get_bits(&s->gb, bits);
  1192. s->sb_samples[0][k * 12 + l + m][i] =
  1193. l1_unscale(bits - 1, mant, scale0);
  1194. s->sb_samples[1][k * 12 + l + m][i] =
  1195. l1_unscale(bits - 1, mant, scale1);
  1196. }
  1197. }
  1198. } else {
  1199. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  1200. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  1201. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  1202. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  1203. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  1204. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  1205. }
  1206. /* next subband in alloc table */
  1207. j += 1 << bit_alloc_bits;
  1208. }
  1209. /* fill remaining samples to zero */
  1210. for(i=sblimit;i<SBLIMIT;i++) {
  1211. for(ch=0;ch<s->nb_channels;ch++) {
  1212. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  1213. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  1214. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  1215. }
  1216. }
  1217. }
  1218. }
  1219. return 3 * 12;
  1220. }
  1221. static inline void lsf_sf_expand(int *slen,
  1222. int sf, int n1, int n2, int n3)
  1223. {
  1224. if (n3) {
  1225. slen[3] = sf % n3;
  1226. sf /= n3;
  1227. } else {
  1228. slen[3] = 0;
  1229. }
  1230. if (n2) {
  1231. slen[2] = sf % n2;
  1232. sf /= n2;
  1233. } else {
  1234. slen[2] = 0;
  1235. }
  1236. slen[1] = sf % n1;
  1237. sf /= n1;
  1238. slen[0] = sf;
  1239. }
  1240. static void exponents_from_scale_factors(MPADecodeContext *s,
  1241. GranuleDef *g,
  1242. int16_t *exponents)
  1243. {
  1244. const uint8_t *bstab, *pretab;
  1245. int len, i, j, k, l, v0, shift, gain, gains[3];
  1246. int16_t *exp_ptr;
  1247. exp_ptr = exponents;
  1248. gain = g->global_gain - 210;
  1249. shift = g->scalefac_scale + 1;
  1250. bstab = band_size_long[s->sample_rate_index];
  1251. pretab = mpa_pretab[g->preflag];
  1252. for(i=0;i<g->long_end;i++) {
  1253. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  1254. len = bstab[i];
  1255. for(j=len;j>0;j--)
  1256. *exp_ptr++ = v0;
  1257. }
  1258. if (g->short_start < 13) {
  1259. bstab = band_size_short[s->sample_rate_index];
  1260. gains[0] = gain - (g->subblock_gain[0] << 3);
  1261. gains[1] = gain - (g->subblock_gain[1] << 3);
  1262. gains[2] = gain - (g->subblock_gain[2] << 3);
  1263. k = g->long_end;
  1264. for(i=g->short_start;i<13;i++) {
  1265. len = bstab[i];
  1266. for(l=0;l<3;l++) {
  1267. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  1268. for(j=len;j>0;j--)
  1269. *exp_ptr++ = v0;
  1270. }
  1271. }
  1272. }
  1273. }
  1274. /* handle n = 0 too */
  1275. static inline int get_bitsz(GetBitContext *s, int n)
  1276. {
  1277. if (n == 0)
  1278. return 0;
  1279. else
  1280. return get_bits(s, n);
  1281. }
  1282. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){
  1283. if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){
  1284. s->gb= s->in_gb;
  1285. s->in_gb.buffer=NULL;
  1286. assert((get_bits_count(&s->gb) & 7) == 0);
  1287. skip_bits_long(&s->gb, *pos - *end_pos);
  1288. *end_pos2=
  1289. *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos;
  1290. *pos= get_bits_count(&s->gb);
  1291. }
  1292. }
  1293. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  1294. int16_t *exponents, int end_pos2)
  1295. {
  1296. int s_index;
  1297. int i;
  1298. int last_pos, bits_left;
  1299. VLC *vlc;
  1300. int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
  1301. /* low frequencies (called big values) */
  1302. s_index = 0;
  1303. for(i=0;i<3;i++) {
  1304. int j, k, l, linbits;
  1305. j = g->region_size[i];
  1306. if (j == 0)
  1307. continue;
  1308. /* select vlc table */
  1309. k = g->table_select[i];
  1310. l = mpa_huff_data[k][0];
  1311. linbits = mpa_huff_data[k][1];
  1312. vlc = &huff_vlc[l];
  1313. if(!l){
  1314. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
  1315. s_index += 2*j;
  1316. continue;
  1317. }
  1318. /* read huffcode and compute each couple */
  1319. for(;j>0;j--) {
  1320. int exponent, x, y, v;
  1321. int pos= get_bits_count(&s->gb);
  1322. if (pos >= end_pos){
  1323. // av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1324. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1325. // av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
  1326. if(pos >= end_pos)
  1327. break;
  1328. }
  1329. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  1330. if(!y){
  1331. g->sb_hybrid[s_index ] =
  1332. g->sb_hybrid[s_index+1] = 0;
  1333. s_index += 2;
  1334. continue;
  1335. }
  1336. exponent= exponents[s_index];
  1337. dprintf(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  1338. i, g->region_size[i] - j, x, y, exponent);
  1339. if(y&16){
  1340. x = y >> 5;
  1341. y = y & 0x0f;
  1342. if (x < 15){
  1343. v = expval_table[ exponent ][ x ];
  1344. // v = expval_table[ (exponent&3) ][ x ] >> FFMIN(0 - (exponent>>2), 31);
  1345. }else{
  1346. x += get_bitsz(&s->gb, linbits);
  1347. v = l3_unscale(x, exponent);
  1348. }
  1349. if (get_bits1(&s->gb))
  1350. v = -v;
  1351. g->sb_hybrid[s_index] = v;
  1352. if (y < 15){
  1353. v = expval_table[ exponent ][ y ];
  1354. }else{
  1355. y += get_bitsz(&s->gb, linbits);
  1356. v = l3_unscale(y, exponent);
  1357. }
  1358. if (get_bits1(&s->gb))
  1359. v = -v;
  1360. g->sb_hybrid[s_index+1] = v;
  1361. }else{
  1362. x = y >> 5;
  1363. y = y & 0x0f;
  1364. x += y;
  1365. if (x < 15){
  1366. v = expval_table[ exponent ][ x ];
  1367. }else{
  1368. x += get_bitsz(&s->gb, linbits);
  1369. v = l3_unscale(x, exponent);
  1370. }
  1371. if (get_bits1(&s->gb))
  1372. v = -v;
  1373. g->sb_hybrid[s_index+!!y] = v;
  1374. g->sb_hybrid[s_index+ !y] = 0;
  1375. }
  1376. s_index+=2;
  1377. }
  1378. }
  1379. /* high frequencies */
  1380. vlc = &huff_quad_vlc[g->count1table_select];
  1381. last_pos=0;
  1382. while (s_index <= 572) {
  1383. int pos, code;
  1384. pos = get_bits_count(&s->gb);
  1385. if (pos >= end_pos) {
  1386. if (pos > end_pos2 && last_pos){
  1387. /* some encoders generate an incorrect size for this
  1388. part. We must go back into the data */
  1389. s_index -= 4;
  1390. skip_bits_long(&s->gb, last_pos - pos);
  1391. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  1392. if(s->error_recognition >= FF_ER_COMPLIANT)
  1393. s_index=0;
  1394. break;
  1395. }
  1396. // av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1397. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1398. // av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
  1399. if(pos >= end_pos)
  1400. break;
  1401. }
  1402. last_pos= pos;
  1403. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  1404. dprintf(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  1405. g->sb_hybrid[s_index+0]=
  1406. g->sb_hybrid[s_index+1]=
  1407. g->sb_hybrid[s_index+2]=
  1408. g->sb_hybrid[s_index+3]= 0;
  1409. while(code){
  1410. static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
  1411. int v;
  1412. int pos= s_index+idxtab[code];
  1413. code ^= 8>>idxtab[code];
  1414. v = exp_table[ exponents[pos] ];
  1415. // v = exp_table[ (exponents[pos]&3) ] >> FFMIN(0 - (exponents[pos]>>2), 31);
  1416. if(get_bits1(&s->gb))
  1417. v = -v;
  1418. g->sb_hybrid[pos] = v;
  1419. }
  1420. s_index+=4;
  1421. }
  1422. /* skip extension bits */
  1423. bits_left = end_pos2 - get_bits_count(&s->gb);
  1424. //av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
  1425. if (bits_left < 0 && s->error_recognition >= FF_ER_COMPLIANT) {
  1426. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1427. s_index=0;
  1428. }else if(bits_left > 0 && s->error_recognition >= FF_ER_AGGRESSIVE){
  1429. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1430. s_index=0;
  1431. }
  1432. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
  1433. skip_bits_long(&s->gb, bits_left);
  1434. i= get_bits_count(&s->gb);
  1435. switch_buffer(s, &i, &end_pos, &end_pos2);
  1436. return 0;
  1437. }
  1438. /* Reorder short blocks from bitstream order to interleaved order. It
  1439. would be faster to do it in parsing, but the code would be far more
  1440. complicated */
  1441. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  1442. {
  1443. int i, j, len;
  1444. int32_t *ptr, *dst, *ptr1;
  1445. int32_t tmp[576];
  1446. if (g->block_type != 2)
  1447. return;
  1448. if (g->switch_point) {
  1449. if (s->sample_rate_index != 8) {
  1450. ptr = g->sb_hybrid + 36;
  1451. } else {
  1452. ptr = g->sb_hybrid + 48;
  1453. }
  1454. } else {
  1455. ptr = g->sb_hybrid;
  1456. }
  1457. for(i=g->short_start;i<13;i++) {
  1458. len = band_size_short[s->sample_rate_index][i];
  1459. ptr1 = ptr;
  1460. dst = tmp;
  1461. for(j=len;j>0;j--) {
  1462. *dst++ = ptr[0*len];
  1463. *dst++ = ptr[1*len];
  1464. *dst++ = ptr[2*len];
  1465. ptr++;
  1466. }
  1467. ptr+=2*len;
  1468. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  1469. }
  1470. }
  1471. #define ISQRT2 FIXR(0.70710678118654752440)
  1472. static void compute_stereo(MPADecodeContext *s,
  1473. GranuleDef *g0, GranuleDef *g1)
  1474. {
  1475. int i, j, k, l;
  1476. int32_t v1, v2;
  1477. int sf_max, tmp0, tmp1, sf, len, non_zero_found;
  1478. int32_t (*is_tab)[16];
  1479. int32_t *tab0, *tab1;
  1480. int non_zero_found_short[3];
  1481. /* intensity stereo */
  1482. if (s->mode_ext & MODE_EXT_I_STEREO) {
  1483. if (!s->lsf) {
  1484. is_tab = is_table;
  1485. sf_max = 7;
  1486. } else {
  1487. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  1488. sf_max = 16;
  1489. }
  1490. tab0 = g0->sb_hybrid + 576;
  1491. tab1 = g1->sb_hybrid + 576;
  1492. non_zero_found_short[0] = 0;
  1493. non_zero_found_short[1] = 0;
  1494. non_zero_found_short[2] = 0;
  1495. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  1496. for(i = 12;i >= g1->short_start;i--) {
  1497. /* for last band, use previous scale factor */
  1498. if (i != 11)
  1499. k -= 3;
  1500. len = band_size_short[s->sample_rate_index][i];
  1501. for(l=2;l>=0;l--) {
  1502. tab0 -= len;
  1503. tab1 -= len;
  1504. if (!non_zero_found_short[l]) {
  1505. /* test if non zero band. if so, stop doing i-stereo */
  1506. for(j=0;j<len;j++) {
  1507. if (tab1[j] != 0) {
  1508. non_zero_found_short[l] = 1;
  1509. goto found1;
  1510. }
  1511. }
  1512. sf = g1->scale_factors[k + l];
  1513. if (sf >= sf_max)
  1514. goto found1;
  1515. v1 = is_tab[0][sf];
  1516. v2 = is_tab[1][sf];
  1517. for(j=0;j<len;j++) {
  1518. tmp0 = tab0[j];
  1519. tab0[j] = MULL(tmp0, v1, FRAC_BITS);
  1520. tab1[j] = MULL(tmp0, v2, FRAC_BITS);
  1521. }
  1522. } else {
  1523. found1:
  1524. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1525. /* lower part of the spectrum : do ms stereo
  1526. if enabled */
  1527. for(j=0;j<len;j++) {
  1528. tmp0 = tab0[j];
  1529. tmp1 = tab1[j];
  1530. tab0[j] = MULL(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1531. tab1[j] = MULL(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1532. }
  1533. }
  1534. }
  1535. }
  1536. }
  1537. non_zero_found = non_zero_found_short[0] |
  1538. non_zero_found_short[1] |
  1539. non_zero_found_short[2];
  1540. for(i = g1->long_end - 1;i >= 0;i--) {
  1541. len = band_size_long[s->sample_rate_index][i];
  1542. tab0 -= len;
  1543. tab1 -= len;
  1544. /* test if non zero band. if so, stop doing i-stereo */
  1545. if (!non_zero_found) {
  1546. for(j=0;j<len;j++) {
  1547. if (tab1[j] != 0) {
  1548. non_zero_found = 1;
  1549. goto found2;
  1550. }
  1551. }
  1552. /* for last band, use previous scale factor */
  1553. k = (i == 21) ? 20 : i;
  1554. sf = g1->scale_factors[k];
  1555. if (sf >= sf_max)
  1556. goto found2;
  1557. v1 = is_tab[0][sf];
  1558. v2 = is_tab[1][sf];
  1559. for(j=0;j<len;j++) {
  1560. tmp0 = tab0[j];
  1561. tab0[j] = MULL(tmp0, v1, FRAC_BITS);
  1562. tab1[j] = MULL(tmp0, v2, FRAC_BITS);
  1563. }
  1564. } else {
  1565. found2:
  1566. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1567. /* lower part of the spectrum : do ms stereo
  1568. if enabled */
  1569. for(j=0;j<len;j++) {
  1570. tmp0 = tab0[j];
  1571. tmp1 = tab1[j];
  1572. tab0[j] = MULL(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1573. tab1[j] = MULL(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1574. }
  1575. }
  1576. }
  1577. }
  1578. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1579. /* ms stereo ONLY */
  1580. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1581. global gain */
  1582. tab0 = g0->sb_hybrid;
  1583. tab1 = g1->sb_hybrid;
  1584. for(i=0;i<576;i++) {
  1585. tmp0 = tab0[i];
  1586. tmp1 = tab1[i];
  1587. tab0[i] = tmp0 + tmp1;
  1588. tab1[i] = tmp0 - tmp1;
  1589. }
  1590. }
  1591. }
  1592. static void compute_antialias_integer(MPADecodeContext *s,
  1593. GranuleDef *g)
  1594. {
  1595. int32_t *ptr, *csa;
  1596. int n, i;
  1597. /* we antialias only "long" bands */
  1598. if (g->block_type == 2) {
  1599. if (!g->switch_point)
  1600. return;
  1601. /* XXX: check this for 8000Hz case */
  1602. n = 1;
  1603. } else {
  1604. n = SBLIMIT - 1;
  1605. }
  1606. ptr = g->sb_hybrid + 18;
  1607. for(i = n;i > 0;i--) {
  1608. int tmp0, tmp1, tmp2;
  1609. csa = &csa_table[0][0];
  1610. #define INT_AA(j) \
  1611. tmp0 = ptr[-1-j];\
  1612. tmp1 = ptr[ j];\
  1613. tmp2= MULH(tmp0 + tmp1, csa[0+4*j]);\
  1614. ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\
  1615. ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j]));
  1616. INT_AA(0)
  1617. INT_AA(1)
  1618. INT_AA(2)
  1619. INT_AA(3)
  1620. INT_AA(4)
  1621. INT_AA(5)
  1622. INT_AA(6)
  1623. INT_AA(7)
  1624. ptr += 18;
  1625. }
  1626. }
  1627. static void compute_antialias_float(MPADecodeContext *s,
  1628. GranuleDef *g)
  1629. {
  1630. int32_t *ptr;
  1631. int n, i;
  1632. /* we antialias only "long" bands */
  1633. if (g->block_type == 2) {
  1634. if (!g->switch_point)
  1635. return;
  1636. /* XXX: check this for 8000Hz case */
  1637. n = 1;
  1638. } else {
  1639. n = SBLIMIT - 1;
  1640. }
  1641. ptr = g->sb_hybrid + 18;
  1642. for(i = n;i > 0;i--) {
  1643. float tmp0, tmp1;
  1644. float *csa = &csa_table_float[0][0];
  1645. #define FLOAT_AA(j)\
  1646. tmp0= ptr[-1-j];\
  1647. tmp1= ptr[ j];\
  1648. ptr[-1-j] = lrintf(tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j]);\
  1649. ptr[ j] = lrintf(tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j]);
  1650. FLOAT_AA(0)
  1651. FLOAT_AA(1)
  1652. FLOAT_AA(2)
  1653. FLOAT_AA(3)
  1654. FLOAT_AA(4)
  1655. FLOAT_AA(5)
  1656. FLOAT_AA(6)
  1657. FLOAT_AA(7)
  1658. ptr += 18;
  1659. }
  1660. }
  1661. static void compute_imdct(MPADecodeContext *s,
  1662. GranuleDef *g,
  1663. int32_t *sb_samples,
  1664. int32_t *mdct_buf)
  1665. {
  1666. int32_t *ptr, *win, *win1, *buf, *out_ptr, *ptr1;
  1667. int32_t out2[12];
  1668. int i, j, mdct_long_end, v, sblimit;
  1669. /* find last non zero block */
  1670. ptr = g->sb_hybrid + 576;
  1671. ptr1 = g->sb_hybrid + 2 * 18;
  1672. while (ptr >= ptr1) {
  1673. ptr -= 6;
  1674. v = ptr[0] | ptr[1] | ptr[2] | ptr[3] | ptr[4] | ptr[5];
  1675. if (v != 0)
  1676. break;
  1677. }
  1678. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1679. if (g->block_type == 2) {
  1680. /* XXX: check for 8000 Hz */
  1681. if (g->switch_point)
  1682. mdct_long_end = 2;
  1683. else
  1684. mdct_long_end = 0;
  1685. } else {
  1686. mdct_long_end = sblimit;
  1687. }
  1688. buf = mdct_buf;
  1689. ptr = g->sb_hybrid;
  1690. for(j=0;j<mdct_long_end;j++) {
  1691. /* apply window & overlap with previous buffer */
  1692. out_ptr = sb_samples + j;
  1693. /* select window */
  1694. if (g->switch_point && j < 2)
  1695. win1 = mdct_win[0];
  1696. else
  1697. win1 = mdct_win[g->block_type];
  1698. /* select frequency inversion */
  1699. win = win1 + ((4 * 36) & -(j & 1));
  1700. imdct36(out_ptr, buf, ptr, win);
  1701. out_ptr += 18*SBLIMIT;
  1702. ptr += 18;
  1703. buf += 18;
  1704. }
  1705. for(j=mdct_long_end;j<sblimit;j++) {
  1706. /* select frequency inversion */
  1707. win = mdct_win[2] + ((4 * 36) & -(j & 1));
  1708. out_ptr = sb_samples + j;
  1709. for(i=0; i<6; i++){
  1710. *out_ptr = buf[i];
  1711. out_ptr += SBLIMIT;
  1712. }
  1713. imdct12(out2, ptr + 0);
  1714. for(i=0;i<6;i++) {
  1715. *out_ptr = MULH(out2[i], win[i]) + buf[i + 6*1];
  1716. buf[i + 6*2] = MULH(out2[i + 6], win[i + 6]);
  1717. out_ptr += SBLIMIT;
  1718. }
  1719. imdct12(out2, ptr + 1);
  1720. for(i=0;i<6;i++) {
  1721. *out_ptr = MULH(out2[i], win[i]) + buf[i + 6*2];
  1722. buf[i + 6*0] = MULH(out2[i + 6], win[i + 6]);
  1723. out_ptr += SBLIMIT;
  1724. }
  1725. imdct12(out2, ptr + 2);
  1726. for(i=0;i<6;i++) {
  1727. buf[i + 6*0] = MULH(out2[i], win[i]) + buf[i + 6*0];
  1728. buf[i + 6*1] = MULH(out2[i + 6], win[i + 6]);
  1729. buf[i + 6*2] = 0;
  1730. }
  1731. ptr += 18;
  1732. buf += 18;
  1733. }
  1734. /* zero bands */
  1735. for(j=sblimit;j<SBLIMIT;j++) {
  1736. /* overlap */
  1737. out_ptr = sb_samples + j;
  1738. for(i=0;i<18;i++) {
  1739. *out_ptr = buf[i];
  1740. buf[i] = 0;
  1741. out_ptr += SBLIMIT;
  1742. }
  1743. buf += 18;
  1744. }
  1745. }
  1746. /* main layer3 decoding function */
  1747. static int mp_decode_layer3(MPADecodeContext *s)
  1748. {
  1749. int nb_granules, main_data_begin, private_bits;
  1750. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1751. GranuleDef granules[2][2], *g;
  1752. int16_t exponents[576];
  1753. /* read side info */
  1754. if (s->lsf) {
  1755. main_data_begin = get_bits(&s->gb, 8);
  1756. private_bits = get_bits(&s->gb, s->nb_channels);
  1757. nb_granules = 1;
  1758. } else {
  1759. main_data_begin = get_bits(&s->gb, 9);
  1760. if (s->nb_channels == 2)
  1761. private_bits = get_bits(&s->gb, 3);
  1762. else
  1763. private_bits = get_bits(&s->gb, 5);
  1764. nb_granules = 2;
  1765. for(ch=0;ch<s->nb_channels;ch++) {
  1766. granules[ch][0].scfsi = 0; /* all scale factors are transmitted */
  1767. granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1768. }
  1769. }
  1770. for(gr=0;gr<nb_granules;gr++) {
  1771. for(ch=0;ch<s->nb_channels;ch++) {
  1772. dprintf(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1773. g = &granules[ch][gr];
  1774. g->part2_3_length = get_bits(&s->gb, 12);
  1775. g->big_values = get_bits(&s->gb, 9);
  1776. if(g->big_values > 288){
  1777. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1778. return -1;
  1779. }
  1780. g->global_gain = get_bits(&s->gb, 8);
  1781. /* if MS stereo only is selected, we precompute the
  1782. 1/sqrt(2) renormalization factor */
  1783. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1784. MODE_EXT_MS_STEREO)
  1785. g->global_gain -= 2;
  1786. if (s->lsf)
  1787. g->scalefac_compress = get_bits(&s->gb, 9);
  1788. else
  1789. g->scalefac_compress = get_bits(&s->gb, 4);
  1790. blocksplit_flag = get_bits1(&s->gb);
  1791. if (blocksplit_flag) {
  1792. g->block_type = get_bits(&s->gb, 2);
  1793. if (g->block_type == 0){
  1794. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1795. return -1;
  1796. }
  1797. g->switch_point = get_bits1(&s->gb);
  1798. for(i=0;i<2;i++)
  1799. g->table_select[i] = get_bits(&s->gb, 5);
  1800. for(i=0;i<3;i++)
  1801. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1802. ff_init_short_region(s, g);
  1803. } else {
  1804. int region_address1, region_address2;
  1805. g->block_type = 0;
  1806. g->switch_point = 0;
  1807. for(i=0;i<3;i++)
  1808. g->table_select[i] = get_bits(&s->gb, 5);
  1809. /* compute huffman coded region sizes */
  1810. region_address1 = get_bits(&s->gb, 4);
  1811. region_address2 = get_bits(&s->gb, 3);
  1812. dprintf(s->avctx, "region1=%d region2=%d\n",
  1813. region_address1, region_address2);
  1814. ff_init_long_region(s, g, region_address1, region_address2);
  1815. }
  1816. ff_region_offset2size(g);
  1817. ff_compute_band_indexes(s, g);
  1818. g->preflag = 0;
  1819. if (!s->lsf)
  1820. g->preflag = get_bits1(&s->gb);
  1821. g->scalefac_scale = get_bits1(&s->gb);
  1822. g->count1table_select = get_bits1(&s->gb);
  1823. dprintf(s->avctx, "block_type=%d switch_point=%d\n",
  1824. g->block_type, g->switch_point);
  1825. }
  1826. }
  1827. if (!s->adu_mode) {
  1828. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1829. assert((get_bits_count(&s->gb) & 7) == 0);
  1830. /* now we get bits from the main_data_begin offset */
  1831. dprintf(s->avctx, "seekback: %d\n", main_data_begin);
  1832. //av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
  1833. memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
  1834. s->in_gb= s->gb;
  1835. init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
  1836. skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
  1837. }
  1838. for(gr=0;gr<nb_granules;gr++) {
  1839. for(ch=0;ch<s->nb_channels;ch++) {
  1840. g = &granules[ch][gr];
  1841. if(get_bits_count(&s->gb)<0){
  1842. av_log(s->avctx, AV_LOG_ERROR, "mdb:%d, lastbuf:%d skipping granule %d\n",
  1843. main_data_begin, s->last_buf_size, gr);
  1844. skip_bits_long(&s->gb, g->part2_3_length);
  1845. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1846. if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
  1847. skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
  1848. s->gb= s->in_gb;
  1849. s->in_gb.buffer=NULL;
  1850. }
  1851. continue;
  1852. }
  1853. bits_pos = get_bits_count(&s->gb);
  1854. if (!s->lsf) {
  1855. uint8_t *sc;
  1856. int slen, slen1, slen2;
  1857. /* MPEG1 scale factors */
  1858. slen1 = slen_table[0][g->scalefac_compress];
  1859. slen2 = slen_table[1][g->scalefac_compress];
  1860. dprintf(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1861. if (g->block_type == 2) {
  1862. n = g->switch_point ? 17 : 18;
  1863. j = 0;
  1864. if(slen1){
  1865. for(i=0;i<n;i++)
  1866. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1867. }else{
  1868. for(i=0;i<n;i++)
  1869. g->scale_factors[j++] = 0;
  1870. }
  1871. if(slen2){
  1872. for(i=0;i<18;i++)
  1873. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1874. for(i=0;i<3;i++)
  1875. g->scale_factors[j++] = 0;
  1876. }else{
  1877. for(i=0;i<21;i++)
  1878. g->scale_factors[j++] = 0;
  1879. }
  1880. } else {
  1881. sc = granules[ch][0].scale_factors;
  1882. j = 0;
  1883. for(k=0;k<4;k++) {
  1884. n = (k == 0 ? 6 : 5);
  1885. if ((g->scfsi & (0x8 >> k)) == 0) {
  1886. slen = (k < 2) ? slen1 : slen2;
  1887. if(slen){
  1888. for(i=0;i<n;i++)
  1889. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1890. }else{
  1891. for(i=0;i<n;i++)
  1892. g->scale_factors[j++] = 0;
  1893. }
  1894. } else {
  1895. /* simply copy from last granule */
  1896. for(i=0;i<n;i++) {
  1897. g->scale_factors[j] = sc[j];
  1898. j++;
  1899. }
  1900. }
  1901. }
  1902. g->scale_factors[j++] = 0;
  1903. }
  1904. } else {
  1905. int tindex, tindex2, slen[4], sl, sf;
  1906. /* LSF scale factors */
  1907. if (g->block_type == 2) {
  1908. tindex = g->switch_point ? 2 : 1;
  1909. } else {
  1910. tindex = 0;
  1911. }
  1912. sf = g->scalefac_compress;
  1913. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1914. /* intensity stereo case */
  1915. sf >>= 1;
  1916. if (sf < 180) {
  1917. lsf_sf_expand(slen, sf, 6, 6, 0);
  1918. tindex2 = 3;
  1919. } else if (sf < 244) {
  1920. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1921. tindex2 = 4;
  1922. } else {
  1923. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1924. tindex2 = 5;
  1925. }
  1926. } else {
  1927. /* normal case */
  1928. if (sf < 400) {
  1929. lsf_sf_expand(slen, sf, 5, 4, 4);
  1930. tindex2 = 0;
  1931. } else if (sf < 500) {
  1932. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1933. tindex2 = 1;
  1934. } else {
  1935. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1936. tindex2 = 2;
  1937. g->preflag = 1;
  1938. }
  1939. }
  1940. j = 0;
  1941. for(k=0;k<4;k++) {
  1942. n = lsf_nsf_table[tindex2][tindex][k];
  1943. sl = slen[k];
  1944. if(sl){
  1945. for(i=0;i<n;i++)
  1946. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1947. }else{
  1948. for(i=0;i<n;i++)
  1949. g->scale_factors[j++] = 0;
  1950. }
  1951. }
  1952. /* XXX: should compute exact size */
  1953. for(;j<40;j++)
  1954. g->scale_factors[j] = 0;
  1955. }
  1956. exponents_from_scale_factors(s, g, exponents);
  1957. /* read Huffman coded residue */
  1958. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1959. } /* ch */
  1960. if (s->nb_channels == 2)
  1961. compute_stereo(s, &granules[0][gr], &granules[1][gr]);
  1962. for(ch=0;ch<s->nb_channels;ch++) {
  1963. g = &granules[ch][gr];
  1964. reorder_block(s, g);
  1965. s->compute_antialias(s, g);
  1966. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1967. }
  1968. } /* gr */
  1969. if(get_bits_count(&s->gb)<0)
  1970. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1971. return nb_granules * 18;
  1972. }
  1973. static int mp_decode_frame(MPADecodeContext *s,
  1974. OUT_INT *samples, const uint8_t *buf, int buf_size)
  1975. {
  1976. int i, nb_frames, ch;
  1977. OUT_INT *samples_ptr;
  1978. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);
  1979. /* skip error protection field */
  1980. if (s->error_protection)
  1981. skip_bits(&s->gb, 16);
  1982. dprintf(s->avctx, "frame %d:\n", s->frame_count);
  1983. switch(s->layer) {
  1984. case 1:
  1985. s->avctx->frame_size = 384;
  1986. nb_frames = mp_decode_layer1(s);
  1987. break;
  1988. case 2:
  1989. s->avctx->frame_size = 1152;
  1990. nb_frames = mp_decode_layer2(s);
  1991. break;
  1992. case 3:
  1993. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1994. default:
  1995. nb_frames = mp_decode_layer3(s);
  1996. s->last_buf_size=0;
  1997. if(s->in_gb.buffer){
  1998. align_get_bits(&s->gb);
  1999. i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3;
  2000. if(i >= 0 && i <= BACKSTEP_SIZE){
  2001. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  2002. s->last_buf_size=i;
  2003. }else
  2004. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  2005. s->gb= s->in_gb;
  2006. s->in_gb.buffer= NULL;
  2007. }
  2008. align_get_bits(&s->gb);
  2009. assert((get_bits_count(&s->gb) & 7) == 0);
  2010. i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3;
  2011. if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){
  2012. if(i<0)
  2013. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  2014. i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  2015. }
  2016. assert(i <= buf_size - HEADER_SIZE && i>= 0);
  2017. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  2018. s->last_buf_size += i;
  2019. break;
  2020. }
  2021. /* apply the synthesis filter */
  2022. for(ch=0;ch<s->nb_channels;ch++) {
  2023. samples_ptr = samples + ch;
  2024. for(i=0;i<nb_frames;i++) {
  2025. ff_mpa_synth_filter(s->synth_buf[ch], &(s->synth_buf_offset[ch]),
  2026. window, &s->dither_state,
  2027. samples_ptr, s->nb_channels,
  2028. s->sb_samples[ch][i]);
  2029. samples_ptr += 32 * s->nb_channels;
  2030. }
  2031. }
  2032. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  2033. }
  2034. static int decode_frame(AVCodecContext * avctx,
  2035. void *data, int *data_size,
  2036. const uint8_t * buf, int buf_size)
  2037. {
  2038. MPADecodeContext *s = avctx->priv_data;
  2039. uint32_t header;
  2040. int out_size;
  2041. OUT_INT *out_samples = data;
  2042. retry:
  2043. if(buf_size < HEADER_SIZE)
  2044. return -1;
  2045. header = AV_RB32(buf);
  2046. if(ff_mpa_check_header(header) < 0){
  2047. buf++;
  2048. // buf_size--;
  2049. av_log(avctx, AV_LOG_ERROR, "Header missing skipping one byte.\n");
  2050. goto retry;
  2051. }
  2052. if (ff_mpegaudio_decode_header(s, header) == 1) {
  2053. /* free format: prepare to compute frame size */
  2054. s->frame_size = -1;
  2055. return -1;
  2056. }
  2057. /* update codec info */
  2058. avctx->channels = s->nb_channels;
  2059. avctx->bit_rate = s->bit_rate;
  2060. avctx->sub_id = s->layer;
  2061. if(s->frame_size<=0 || s->frame_size > buf_size){
  2062. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  2063. return -1;
  2064. }else if(s->frame_size < buf_size){
  2065. av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n");
  2066. buf_size= s->frame_size;
  2067. }
  2068. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  2069. if(out_size>=0){
  2070. *data_size = out_size;
  2071. avctx->sample_rate = s->sample_rate;
  2072. //FIXME maybe move the other codec info stuff from above here too
  2073. }else
  2074. av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed
  2075. s->frame_size = 0;
  2076. return buf_size;
  2077. }
  2078. static void flush(AVCodecContext *avctx){
  2079. MPADecodeContext *s = avctx->priv_data;
  2080. memset(s->synth_buf, 0, sizeof(s->synth_buf));
  2081. s->last_buf_size= 0;
  2082. }
  2083. #ifdef CONFIG_MP3ADU_DECODER
  2084. static int decode_frame_adu(AVCodecContext * avctx,
  2085. void *data, int *data_size,
  2086. const uint8_t * buf, int buf_size)
  2087. {
  2088. MPADecodeContext *s = avctx->priv_data;
  2089. uint32_t header;
  2090. int len, out_size;
  2091. OUT_INT *out_samples = data;
  2092. len = buf_size;
  2093. // Discard too short frames
  2094. if (buf_size < HEADER_SIZE) {
  2095. *data_size = 0;
  2096. return buf_size;
  2097. }
  2098. if (len > MPA_MAX_CODED_FRAME_SIZE)
  2099. len = MPA_MAX_CODED_FRAME_SIZE;
  2100. // Get header and restore sync word
  2101. header = AV_RB32(buf) | 0xffe00000;
  2102. if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
  2103. *data_size = 0;
  2104. return buf_size;
  2105. }
  2106. ff_mpegaudio_decode_header(s, header);
  2107. /* update codec info */
  2108. avctx->sample_rate = s->sample_rate;
  2109. avctx->channels = s->nb_channels;
  2110. avctx->bit_rate = s->bit_rate;
  2111. avctx->sub_id = s->layer;
  2112. s->frame_size = len;
  2113. if (avctx->parse_only) {
  2114. out_size = buf_size;
  2115. } else {
  2116. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  2117. }
  2118. *data_size = out_size;
  2119. return buf_size;
  2120. }
  2121. #endif /* CONFIG_MP3ADU_DECODER */
  2122. #ifdef CONFIG_MP3ON4_DECODER
  2123. /**
  2124. * Context for MP3On4 decoder
  2125. */
  2126. typedef struct MP3On4DecodeContext {
  2127. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  2128. int syncword; ///< syncword patch
  2129. const uint8_t *coff; ///< channels offsets in output buffer
  2130. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  2131. } MP3On4DecodeContext;
  2132. #include "mpeg4audio.h"
  2133. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  2134. static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */
  2135. /* offsets into output buffer, assume output order is FL FR BL BR C LFE */
  2136. static const uint8_t chan_offset[8][5] = {
  2137. {0},
  2138. {0}, // C
  2139. {0}, // FLR
  2140. {2,0}, // C FLR
  2141. {2,0,3}, // C FLR BS
  2142. {4,0,2}, // C FLR BLRS
  2143. {4,0,2,5}, // C FLR BLRS LFE
  2144. {4,0,2,6,5}, // C FLR BLRS BLR LFE
  2145. };
  2146. static int decode_init_mp3on4(AVCodecContext * avctx)
  2147. {
  2148. MP3On4DecodeContext *s = avctx->priv_data;
  2149. MPEG4AudioConfig cfg;
  2150. int i;
  2151. if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
  2152. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  2153. return -1;
  2154. }
  2155. ff_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size);
  2156. if (!cfg.chan_config || cfg.chan_config > 7) {
  2157. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  2158. return -1;
  2159. }
  2160. s->frames = mp3Frames[cfg.chan_config];
  2161. s->coff = chan_offset[cfg.chan_config];
  2162. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  2163. if (cfg.sample_rate < 16000)
  2164. s->syncword = 0xffe00000;
  2165. else
  2166. s->syncword = 0xfff00000;
  2167. /* Init the first mp3 decoder in standard way, so that all tables get builded
  2168. * We replace avctx->priv_data with the context of the first decoder so that
  2169. * decode_init() does not have to be changed.
  2170. * Other decoders will be initialized here copying data from the first context
  2171. */
  2172. // Allocate zeroed memory for the first decoder context
  2173. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  2174. // Put decoder context in place to make init_decode() happy
  2175. avctx->priv_data = s->mp3decctx[0];
  2176. decode_init(avctx);
  2177. // Restore mp3on4 context pointer
  2178. avctx->priv_data = s;
  2179. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  2180. /* Create a separate codec/context for each frame (first is already ok).
  2181. * Each frame is 1 or 2 channels - up to 5 frames allowed
  2182. */
  2183. for (i = 1; i < s->frames; i++) {
  2184. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  2185. s->mp3decctx[i]->compute_antialias = s->mp3decctx[0]->compute_antialias;
  2186. s->mp3decctx[i]->adu_mode = 1;
  2187. s->mp3decctx[i]->avctx = avctx;
  2188. }
  2189. return 0;
  2190. }
  2191. static int decode_close_mp3on4(AVCodecContext * avctx)
  2192. {
  2193. MP3On4DecodeContext *s = avctx->priv_data;
  2194. int i;
  2195. for (i = 0; i < s->frames; i++)
  2196. if (s->mp3decctx[i])
  2197. av_free(s->mp3decctx[i]);
  2198. return 0;
  2199. }
  2200. static int decode_frame_mp3on4(AVCodecContext * avctx,
  2201. void *data, int *data_size,
  2202. const uint8_t * buf, int buf_size)
  2203. {
  2204. MP3On4DecodeContext *s = avctx->priv_data;
  2205. MPADecodeContext *m;
  2206. int fsize, len = buf_size, out_size = 0;
  2207. uint32_t header;
  2208. OUT_INT *out_samples = data;
  2209. OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS];
  2210. OUT_INT *outptr, *bp;
  2211. int fr, j, n;
  2212. *data_size = 0;
  2213. // Discard too short frames
  2214. if (buf_size < HEADER_SIZE)
  2215. return -1;
  2216. // If only one decoder interleave is not needed
  2217. outptr = s->frames == 1 ? out_samples : decoded_buf;
  2218. avctx->bit_rate = 0;
  2219. for (fr = 0; fr < s->frames; fr++) {
  2220. fsize = AV_RB16(buf) >> 4;
  2221. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  2222. m = s->mp3decctx[fr];
  2223. assert (m != NULL);
  2224. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  2225. if (ff_mpa_check_header(header) < 0) // Bad header, discard block
  2226. break;
  2227. ff_mpegaudio_decode_header(m, header);
  2228. out_size += mp_decode_frame(m, outptr, buf, fsize);
  2229. buf += fsize;
  2230. len -= fsize;
  2231. if(s->frames > 1) {
  2232. n = m->avctx->frame_size*m->nb_channels;
  2233. /* interleave output data */
  2234. bp = out_samples + s->coff[fr];
  2235. if(m->nb_channels == 1) {
  2236. for(j = 0; j < n; j++) {
  2237. *bp = decoded_buf[j];
  2238. bp += avctx->channels;
  2239. }
  2240. } else {
  2241. for(j = 0; j < n; j++) {
  2242. bp[0] = decoded_buf[j++];
  2243. bp[1] = decoded_buf[j];
  2244. bp += avctx->channels;
  2245. }
  2246. }
  2247. }
  2248. avctx->bit_rate += m->bit_rate;
  2249. }
  2250. /* update codec info */
  2251. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  2252. *data_size = out_size;
  2253. return buf_size;
  2254. }
  2255. #endif /* CONFIG_MP3ON4_DECODER */
  2256. #ifdef CONFIG_MP2_DECODER
  2257. AVCodec mp2_decoder =
  2258. {
  2259. "mp2",
  2260. CODEC_TYPE_AUDIO,
  2261. CODEC_ID_MP2,
  2262. sizeof(MPADecodeContext),
  2263. decode_init,
  2264. NULL,
  2265. NULL,
  2266. decode_frame,
  2267. CODEC_CAP_PARSE_ONLY,
  2268. .flush= flush,
  2269. .long_name= NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
  2270. };
  2271. #endif
  2272. #ifdef CONFIG_MP3_DECODER
  2273. AVCodec mp3_decoder =
  2274. {
  2275. "mp3",
  2276. CODEC_TYPE_AUDIO,
  2277. CODEC_ID_MP3,
  2278. sizeof(MPADecodeContext),
  2279. decode_init,
  2280. NULL,
  2281. NULL,
  2282. decode_frame,
  2283. CODEC_CAP_PARSE_ONLY,
  2284. .flush= flush,
  2285. .long_name= NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
  2286. };
  2287. #endif
  2288. #ifdef CONFIG_MP3ADU_DECODER
  2289. AVCodec mp3adu_decoder =
  2290. {
  2291. "mp3adu",
  2292. CODEC_TYPE_AUDIO,
  2293. CODEC_ID_MP3ADU,
  2294. sizeof(MPADecodeContext),
  2295. decode_init,
  2296. NULL,
  2297. NULL,
  2298. decode_frame_adu,
  2299. CODEC_CAP_PARSE_ONLY,
  2300. .flush= flush,
  2301. .long_name= NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
  2302. };
  2303. #endif
  2304. #ifdef CONFIG_MP3ON4_DECODER
  2305. AVCodec mp3on4_decoder =
  2306. {
  2307. "mp3on4",
  2308. CODEC_TYPE_AUDIO,
  2309. CODEC_ID_MP3ON4,
  2310. sizeof(MP3On4DecodeContext),
  2311. decode_init_mp3on4,
  2312. NULL,
  2313. decode_close_mp3on4,
  2314. decode_frame_mp3on4,
  2315. .flush= flush,
  2316. .long_name= NULL_IF_CONFIG_SMALL("MP3onMP4"),
  2317. };
  2318. #endif