You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2693 lines
80KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file mpegaudiodec.c
  23. * MPEG Audio decoder.
  24. */
  25. //#define DEBUG
  26. #include "avcodec.h"
  27. #include "bitstream.h"
  28. #include "dsputil.h"
  29. /*
  30. * TODO:
  31. * - in low precision mode, use more 16 bit multiplies in synth filter
  32. * - test lsf / mpeg25 extensively.
  33. */
  34. /* define USE_HIGHPRECISION to have a bit exact (but slower) mpeg
  35. audio decoder */
  36. #ifdef CONFIG_MPEGAUDIO_HP
  37. # define USE_HIGHPRECISION
  38. #endif
  39. #include "mpegaudio.h"
  40. #include "mpegaudiodecheader.h"
  41. #include "mathops.h"
  42. /* WARNING: only correct for posititive numbers */
  43. #define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
  44. #define FRAC_RND(a) (((a) + (FRAC_ONE/2)) >> FRAC_BITS)
  45. #define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
  46. /****************/
  47. #define HEADER_SIZE 4
  48. /* layer 3 "granule" */
  49. typedef struct GranuleDef {
  50. uint8_t scfsi;
  51. int part2_3_length;
  52. int big_values;
  53. int global_gain;
  54. int scalefac_compress;
  55. uint8_t block_type;
  56. uint8_t switch_point;
  57. int table_select[3];
  58. int subblock_gain[3];
  59. uint8_t scalefac_scale;
  60. uint8_t count1table_select;
  61. int region_size[3]; /* number of huffman codes in each region */
  62. int preflag;
  63. int short_start, long_end; /* long/short band indexes */
  64. uint8_t scale_factors[40];
  65. int32_t sb_hybrid[SBLIMIT * 18]; /* 576 samples */
  66. } GranuleDef;
  67. #include "mpegaudiodata.h"
  68. #include "mpegaudiodectab.h"
  69. static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g);
  70. static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g);
  71. /* vlc structure for decoding layer 3 huffman tables */
  72. static VLC huff_vlc[16];
  73. static VLC huff_quad_vlc[2];
  74. /* computed from band_size_long */
  75. static uint16_t band_index_long[9][23];
  76. /* XXX: free when all decoders are closed */
  77. #define TABLE_4_3_SIZE (8191 + 16)*4
  78. static int8_t table_4_3_exp[TABLE_4_3_SIZE];
  79. static uint32_t table_4_3_value[TABLE_4_3_SIZE];
  80. static uint32_t exp_table[512];
  81. static uint32_t expval_table[512][16];
  82. /* intensity stereo coef table */
  83. static int32_t is_table[2][16];
  84. static int32_t is_table_lsf[2][2][16];
  85. static int32_t csa_table[8][4];
  86. static float csa_table_float[8][4];
  87. static int32_t mdct_win[8][36];
  88. /* lower 2 bits: modulo 3, higher bits: shift */
  89. static uint16_t scale_factor_modshift[64];
  90. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  91. static int32_t scale_factor_mult[15][3];
  92. /* mult table for layer 2 group quantization */
  93. #define SCALE_GEN(v) \
  94. { FIXR(1.0 * (v)), FIXR(0.7937005259 * (v)), FIXR(0.6299605249 * (v)) }
  95. static const int32_t scale_factor_mult2[3][3] = {
  96. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  97. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  98. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  99. };
  100. static DECLARE_ALIGNED_16(MPA_INT, window[512]);
  101. /**
  102. * Convert region offsets to region sizes and truncate
  103. * size to big_values.
  104. */
  105. void ff_region_offset2size(GranuleDef *g){
  106. int i, k, j=0;
  107. g->region_size[2] = (576 / 2);
  108. for(i=0;i<3;i++) {
  109. k = FFMIN(g->region_size[i], g->big_values);
  110. g->region_size[i] = k - j;
  111. j = k;
  112. }
  113. }
  114. void ff_init_short_region(MPADecodeContext *s, GranuleDef *g){
  115. if (g->block_type == 2)
  116. g->region_size[0] = (36 / 2);
  117. else {
  118. if (s->sample_rate_index <= 2)
  119. g->region_size[0] = (36 / 2);
  120. else if (s->sample_rate_index != 8)
  121. g->region_size[0] = (54 / 2);
  122. else
  123. g->region_size[0] = (108 / 2);
  124. }
  125. g->region_size[1] = (576 / 2);
  126. }
  127. void ff_init_long_region(MPADecodeContext *s, GranuleDef *g, int ra1, int ra2){
  128. int l;
  129. g->region_size[0] =
  130. band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  131. /* should not overflow */
  132. l = FFMIN(ra1 + ra2 + 2, 22);
  133. g->region_size[1] =
  134. band_index_long[s->sample_rate_index][l] >> 1;
  135. }
  136. void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
  137. if (g->block_type == 2) {
  138. if (g->switch_point) {
  139. /* if switched mode, we handle the 36 first samples as
  140. long blocks. For 8000Hz, we handle the 48 first
  141. exponents as long blocks (XXX: check this!) */
  142. if (s->sample_rate_index <= 2)
  143. g->long_end = 8;
  144. else if (s->sample_rate_index != 8)
  145. g->long_end = 6;
  146. else
  147. g->long_end = 4; /* 8000 Hz */
  148. g->short_start = 2 + (s->sample_rate_index != 8);
  149. } else {
  150. g->long_end = 0;
  151. g->short_start = 0;
  152. }
  153. } else {
  154. g->short_start = 13;
  155. g->long_end = 22;
  156. }
  157. }
  158. /* layer 1 unscaling */
  159. /* n = number of bits of the mantissa minus 1 */
  160. static inline int l1_unscale(int n, int mant, int scale_factor)
  161. {
  162. int shift, mod;
  163. int64_t val;
  164. shift = scale_factor_modshift[scale_factor];
  165. mod = shift & 3;
  166. shift >>= 2;
  167. val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
  168. shift += n;
  169. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  170. return (int)((val + (1LL << (shift - 1))) >> shift);
  171. }
  172. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  173. {
  174. int shift, mod, val;
  175. shift = scale_factor_modshift[scale_factor];
  176. mod = shift & 3;
  177. shift >>= 2;
  178. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  179. /* NOTE: at this point, 0 <= shift <= 21 */
  180. if (shift > 0)
  181. val = (val + (1 << (shift - 1))) >> shift;
  182. return val;
  183. }
  184. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  185. static inline int l3_unscale(int value, int exponent)
  186. {
  187. unsigned int m;
  188. int e;
  189. e = table_4_3_exp [4*value + (exponent&3)];
  190. m = table_4_3_value[4*value + (exponent&3)];
  191. e -= (exponent >> 2);
  192. assert(e>=1);
  193. if (e > 31)
  194. return 0;
  195. m = (m + (1 << (e-1))) >> e;
  196. return m;
  197. }
  198. /* all integer n^(4/3) computation code */
  199. #define DEV_ORDER 13
  200. #define POW_FRAC_BITS 24
  201. #define POW_FRAC_ONE (1 << POW_FRAC_BITS)
  202. #define POW_FIX(a) ((int)((a) * POW_FRAC_ONE))
  203. #define POW_MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> POW_FRAC_BITS)
  204. static int dev_4_3_coefs[DEV_ORDER];
  205. #if 0 /* unused */
  206. static int pow_mult3[3] = {
  207. POW_FIX(1.0),
  208. POW_FIX(1.25992104989487316476),
  209. POW_FIX(1.58740105196819947474),
  210. };
  211. #endif
  212. static void int_pow_init(void)
  213. {
  214. int i, a;
  215. a = POW_FIX(1.0);
  216. for(i=0;i<DEV_ORDER;i++) {
  217. a = POW_MULL(a, POW_FIX(4.0 / 3.0) - i * POW_FIX(1.0)) / (i + 1);
  218. dev_4_3_coefs[i] = a;
  219. }
  220. }
  221. #if 0 /* unused, remove? */
  222. /* return the mantissa and the binary exponent */
  223. static int int_pow(int i, int *exp_ptr)
  224. {
  225. int e, er, eq, j;
  226. int a, a1;
  227. /* renormalize */
  228. a = i;
  229. e = POW_FRAC_BITS;
  230. while (a < (1 << (POW_FRAC_BITS - 1))) {
  231. a = a << 1;
  232. e--;
  233. }
  234. a -= (1 << POW_FRAC_BITS);
  235. a1 = 0;
  236. for(j = DEV_ORDER - 1; j >= 0; j--)
  237. a1 = POW_MULL(a, dev_4_3_coefs[j] + a1);
  238. a = (1 << POW_FRAC_BITS) + a1;
  239. /* exponent compute (exact) */
  240. e = e * 4;
  241. er = e % 3;
  242. eq = e / 3;
  243. a = POW_MULL(a, pow_mult3[er]);
  244. while (a >= 2 * POW_FRAC_ONE) {
  245. a = a >> 1;
  246. eq++;
  247. }
  248. /* convert to float */
  249. while (a < POW_FRAC_ONE) {
  250. a = a << 1;
  251. eq--;
  252. }
  253. /* now POW_FRAC_ONE <= a < 2 * POW_FRAC_ONE */
  254. #if POW_FRAC_BITS > FRAC_BITS
  255. a = (a + (1 << (POW_FRAC_BITS - FRAC_BITS - 1))) >> (POW_FRAC_BITS - FRAC_BITS);
  256. /* correct overflow */
  257. if (a >= 2 * (1 << FRAC_BITS)) {
  258. a = a >> 1;
  259. eq++;
  260. }
  261. #endif
  262. *exp_ptr = eq;
  263. return a;
  264. }
  265. #endif
  266. static int decode_init(AVCodecContext * avctx)
  267. {
  268. MPADecodeContext *s = avctx->priv_data;
  269. static int init=0;
  270. int i, j, k;
  271. s->avctx = avctx;
  272. #if defined(USE_HIGHPRECISION) && defined(CONFIG_AUDIO_NONSHORT)
  273. avctx->sample_fmt= SAMPLE_FMT_S32;
  274. #else
  275. avctx->sample_fmt= SAMPLE_FMT_S16;
  276. #endif
  277. s->error_resilience= avctx->error_resilience;
  278. if(avctx->antialias_algo != FF_AA_FLOAT)
  279. s->compute_antialias= compute_antialias_integer;
  280. else
  281. s->compute_antialias= compute_antialias_float;
  282. if (!init && !avctx->parse_only) {
  283. /* scale factors table for layer 1/2 */
  284. for(i=0;i<64;i++) {
  285. int shift, mod;
  286. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  287. shift = (i / 3);
  288. mod = i % 3;
  289. scale_factor_modshift[i] = mod | (shift << 2);
  290. }
  291. /* scale factor multiply for layer 1 */
  292. for(i=0;i<15;i++) {
  293. int n, norm;
  294. n = i + 2;
  295. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  296. scale_factor_mult[i][0] = MULL(FIXR(1.0 * 2.0), norm);
  297. scale_factor_mult[i][1] = MULL(FIXR(0.7937005259 * 2.0), norm);
  298. scale_factor_mult[i][2] = MULL(FIXR(0.6299605249 * 2.0), norm);
  299. dprintf(avctx, "%d: norm=%x s=%x %x %x\n",
  300. i, norm,
  301. scale_factor_mult[i][0],
  302. scale_factor_mult[i][1],
  303. scale_factor_mult[i][2]);
  304. }
  305. ff_mpa_synth_init(window);
  306. /* huffman decode tables */
  307. for(i=1;i<16;i++) {
  308. const HuffTable *h = &mpa_huff_tables[i];
  309. int xsize, x, y;
  310. unsigned int n;
  311. uint8_t tmp_bits [512];
  312. uint16_t tmp_codes[512];
  313. memset(tmp_bits , 0, sizeof(tmp_bits ));
  314. memset(tmp_codes, 0, sizeof(tmp_codes));
  315. xsize = h->xsize;
  316. n = xsize * xsize;
  317. j = 0;
  318. for(x=0;x<xsize;x++) {
  319. for(y=0;y<xsize;y++){
  320. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  321. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  322. }
  323. }
  324. /* XXX: fail test */
  325. init_vlc(&huff_vlc[i], 7, 512,
  326. tmp_bits, 1, 1, tmp_codes, 2, 2, 1);
  327. }
  328. for(i=0;i<2;i++) {
  329. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  330. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1, 1);
  331. }
  332. for(i=0;i<9;i++) {
  333. k = 0;
  334. for(j=0;j<22;j++) {
  335. band_index_long[i][j] = k;
  336. k += band_size_long[i][j];
  337. }
  338. band_index_long[i][22] = k;
  339. }
  340. /* compute n ^ (4/3) and store it in mantissa/exp format */
  341. int_pow_init();
  342. for(i=1;i<TABLE_4_3_SIZE;i++) {
  343. double f, fm;
  344. int e, m;
  345. f = pow((double)(i/4), 4.0 / 3.0) * pow(2, (i&3)*0.25);
  346. fm = frexp(f, &e);
  347. m = (uint32_t)(fm*(1LL<<31) + 0.5);
  348. e+= FRAC_BITS - 31 + 5 - 100;
  349. /* normalized to FRAC_BITS */
  350. table_4_3_value[i] = m;
  351. // av_log(NULL, AV_LOG_DEBUG, "%d %d %f\n", i, m, pow((double)i, 4.0 / 3.0));
  352. table_4_3_exp[i] = -e;
  353. }
  354. for(i=0; i<512*16; i++){
  355. int exponent= (i>>4);
  356. double f= pow(i&15, 4.0 / 3.0) * pow(2, (exponent-400)*0.25 + FRAC_BITS + 5);
  357. expval_table[exponent][i&15]= llrint(f);
  358. if((i&15)==1)
  359. exp_table[exponent]= llrint(f);
  360. }
  361. for(i=0;i<7;i++) {
  362. float f;
  363. int v;
  364. if (i != 6) {
  365. f = tan((double)i * M_PI / 12.0);
  366. v = FIXR(f / (1.0 + f));
  367. } else {
  368. v = FIXR(1.0);
  369. }
  370. is_table[0][i] = v;
  371. is_table[1][6 - i] = v;
  372. }
  373. /* invalid values */
  374. for(i=7;i<16;i++)
  375. is_table[0][i] = is_table[1][i] = 0.0;
  376. for(i=0;i<16;i++) {
  377. double f;
  378. int e, k;
  379. for(j=0;j<2;j++) {
  380. e = -(j + 1) * ((i + 1) >> 1);
  381. f = pow(2.0, e / 4.0);
  382. k = i & 1;
  383. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  384. is_table_lsf[j][k][i] = FIXR(1.0);
  385. dprintf(avctx, "is_table_lsf %d %d: %x %x\n",
  386. i, j, is_table_lsf[j][0][i], is_table_lsf[j][1][i]);
  387. }
  388. }
  389. for(i=0;i<8;i++) {
  390. float ci, cs, ca;
  391. ci = ci_table[i];
  392. cs = 1.0 / sqrt(1.0 + ci * ci);
  393. ca = cs * ci;
  394. csa_table[i][0] = FIXHR(cs/4);
  395. csa_table[i][1] = FIXHR(ca/4);
  396. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  397. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  398. csa_table_float[i][0] = cs;
  399. csa_table_float[i][1] = ca;
  400. csa_table_float[i][2] = ca + cs;
  401. csa_table_float[i][3] = ca - cs;
  402. // printf("%d %d %d %d\n", FIX(cs), FIX(cs-1), FIX(ca), FIX(cs)-FIX(ca));
  403. // av_log(NULL, AV_LOG_DEBUG,"%f %f %f %f\n", cs, ca, ca+cs, ca-cs);
  404. }
  405. /* compute mdct windows */
  406. for(i=0;i<36;i++) {
  407. for(j=0; j<4; j++){
  408. double d;
  409. if(j==2 && i%3 != 1)
  410. continue;
  411. d= sin(M_PI * (i + 0.5) / 36.0);
  412. if(j==1){
  413. if (i>=30) d= 0;
  414. else if(i>=24) d= sin(M_PI * (i - 18 + 0.5) / 12.0);
  415. else if(i>=18) d= 1;
  416. }else if(j==3){
  417. if (i< 6) d= 0;
  418. else if(i< 12) d= sin(M_PI * (i - 6 + 0.5) / 12.0);
  419. else if(i< 18) d= 1;
  420. }
  421. //merge last stage of imdct into the window coefficients
  422. d*= 0.5 / cos(M_PI*(2*i + 19)/72);
  423. if(j==2)
  424. mdct_win[j][i/3] = FIXHR((d / (1<<5)));
  425. else
  426. mdct_win[j][i ] = FIXHR((d / (1<<5)));
  427. // av_log(NULL, AV_LOG_DEBUG, "%2d %d %f\n", i,j,d / (1<<5));
  428. }
  429. }
  430. /* NOTE: we do frequency inversion adter the MDCT by changing
  431. the sign of the right window coefs */
  432. for(j=0;j<4;j++) {
  433. for(i=0;i<36;i+=2) {
  434. mdct_win[j + 4][i] = mdct_win[j][i];
  435. mdct_win[j + 4][i + 1] = -mdct_win[j][i + 1];
  436. }
  437. }
  438. #if defined(DEBUG)
  439. for(j=0;j<8;j++) {
  440. av_log(avctx, AV_LOG_DEBUG, "win%d=\n", j);
  441. for(i=0;i<36;i++)
  442. av_log(avctx, AV_LOG_DEBUG, "%f, ", (double)mdct_win[j][i] / FRAC_ONE);
  443. av_log(avctx, AV_LOG_DEBUG, "\n");
  444. }
  445. #endif
  446. init = 1;
  447. }
  448. #ifdef DEBUG
  449. s->frame_count = 0;
  450. #endif
  451. if (avctx->codec_id == CODEC_ID_MP3ADU)
  452. s->adu_mode = 1;
  453. return 0;
  454. }
  455. /* tab[i][j] = 1.0 / (2.0 * cos(pi*(2*k+1) / 2^(6 - j))) */
  456. /* cos(i*pi/64) */
  457. #define COS0_0 FIXHR(0.50060299823519630134/2)
  458. #define COS0_1 FIXHR(0.50547095989754365998/2)
  459. #define COS0_2 FIXHR(0.51544730992262454697/2)
  460. #define COS0_3 FIXHR(0.53104259108978417447/2)
  461. #define COS0_4 FIXHR(0.55310389603444452782/2)
  462. #define COS0_5 FIXHR(0.58293496820613387367/2)
  463. #define COS0_6 FIXHR(0.62250412303566481615/2)
  464. #define COS0_7 FIXHR(0.67480834145500574602/2)
  465. #define COS0_8 FIXHR(0.74453627100229844977/2)
  466. #define COS0_9 FIXHR(0.83934964541552703873/2)
  467. #define COS0_10 FIXHR(0.97256823786196069369/2)
  468. #define COS0_11 FIXHR(1.16943993343288495515/4)
  469. #define COS0_12 FIXHR(1.48416461631416627724/4)
  470. #define COS0_13 FIXHR(2.05778100995341155085/8)
  471. #define COS0_14 FIXHR(3.40760841846871878570/8)
  472. #define COS0_15 FIXHR(10.19000812354805681150/32)
  473. #define COS1_0 FIXHR(0.50241928618815570551/2)
  474. #define COS1_1 FIXHR(0.52249861493968888062/2)
  475. #define COS1_2 FIXHR(0.56694403481635770368/2)
  476. #define COS1_3 FIXHR(0.64682178335999012954/2)
  477. #define COS1_4 FIXHR(0.78815462345125022473/2)
  478. #define COS1_5 FIXHR(1.06067768599034747134/4)
  479. #define COS1_6 FIXHR(1.72244709823833392782/4)
  480. #define COS1_7 FIXHR(5.10114861868916385802/16)
  481. #define COS2_0 FIXHR(0.50979557910415916894/2)
  482. #define COS2_1 FIXHR(0.60134488693504528054/2)
  483. #define COS2_2 FIXHR(0.89997622313641570463/2)
  484. #define COS2_3 FIXHR(2.56291544774150617881/8)
  485. #define COS3_0 FIXHR(0.54119610014619698439/2)
  486. #define COS3_1 FIXHR(1.30656296487637652785/4)
  487. #define COS4_0 FIXHR(0.70710678118654752439/2)
  488. /* butterfly operator */
  489. #define BF(a, b, c, s)\
  490. {\
  491. tmp0 = tab[a] + tab[b];\
  492. tmp1 = tab[a] - tab[b];\
  493. tab[a] = tmp0;\
  494. tab[b] = MULH(tmp1<<(s), c);\
  495. }
  496. #define BF1(a, b, c, d)\
  497. {\
  498. BF(a, b, COS4_0, 1);\
  499. BF(c, d,-COS4_0, 1);\
  500. tab[c] += tab[d];\
  501. }
  502. #define BF2(a, b, c, d)\
  503. {\
  504. BF(a, b, COS4_0, 1);\
  505. BF(c, d,-COS4_0, 1);\
  506. tab[c] += tab[d];\
  507. tab[a] += tab[c];\
  508. tab[c] += tab[b];\
  509. tab[b] += tab[d];\
  510. }
  511. #define ADD(a, b) tab[a] += tab[b]
  512. /* DCT32 without 1/sqrt(2) coef zero scaling. */
  513. static void dct32(int32_t *out, int32_t *tab)
  514. {
  515. int tmp0, tmp1;
  516. /* pass 1 */
  517. BF( 0, 31, COS0_0 , 1);
  518. BF(15, 16, COS0_15, 5);
  519. /* pass 2 */
  520. BF( 0, 15, COS1_0 , 1);
  521. BF(16, 31,-COS1_0 , 1);
  522. /* pass 1 */
  523. BF( 7, 24, COS0_7 , 1);
  524. BF( 8, 23, COS0_8 , 1);
  525. /* pass 2 */
  526. BF( 7, 8, COS1_7 , 4);
  527. BF(23, 24,-COS1_7 , 4);
  528. /* pass 3 */
  529. BF( 0, 7, COS2_0 , 1);
  530. BF( 8, 15,-COS2_0 , 1);
  531. BF(16, 23, COS2_0 , 1);
  532. BF(24, 31,-COS2_0 , 1);
  533. /* pass 1 */
  534. BF( 3, 28, COS0_3 , 1);
  535. BF(12, 19, COS0_12, 2);
  536. /* pass 2 */
  537. BF( 3, 12, COS1_3 , 1);
  538. BF(19, 28,-COS1_3 , 1);
  539. /* pass 1 */
  540. BF( 4, 27, COS0_4 , 1);
  541. BF(11, 20, COS0_11, 2);
  542. /* pass 2 */
  543. BF( 4, 11, COS1_4 , 1);
  544. BF(20, 27,-COS1_4 , 1);
  545. /* pass 3 */
  546. BF( 3, 4, COS2_3 , 3);
  547. BF(11, 12,-COS2_3 , 3);
  548. BF(19, 20, COS2_3 , 3);
  549. BF(27, 28,-COS2_3 , 3);
  550. /* pass 4 */
  551. BF( 0, 3, COS3_0 , 1);
  552. BF( 4, 7,-COS3_0 , 1);
  553. BF( 8, 11, COS3_0 , 1);
  554. BF(12, 15,-COS3_0 , 1);
  555. BF(16, 19, COS3_0 , 1);
  556. BF(20, 23,-COS3_0 , 1);
  557. BF(24, 27, COS3_0 , 1);
  558. BF(28, 31,-COS3_0 , 1);
  559. /* pass 1 */
  560. BF( 1, 30, COS0_1 , 1);
  561. BF(14, 17, COS0_14, 3);
  562. /* pass 2 */
  563. BF( 1, 14, COS1_1 , 1);
  564. BF(17, 30,-COS1_1 , 1);
  565. /* pass 1 */
  566. BF( 6, 25, COS0_6 , 1);
  567. BF( 9, 22, COS0_9 , 1);
  568. /* pass 2 */
  569. BF( 6, 9, COS1_6 , 2);
  570. BF(22, 25,-COS1_6 , 2);
  571. /* pass 3 */
  572. BF( 1, 6, COS2_1 , 1);
  573. BF( 9, 14,-COS2_1 , 1);
  574. BF(17, 22, COS2_1 , 1);
  575. BF(25, 30,-COS2_1 , 1);
  576. /* pass 1 */
  577. BF( 2, 29, COS0_2 , 1);
  578. BF(13, 18, COS0_13, 3);
  579. /* pass 2 */
  580. BF( 2, 13, COS1_2 , 1);
  581. BF(18, 29,-COS1_2 , 1);
  582. /* pass 1 */
  583. BF( 5, 26, COS0_5 , 1);
  584. BF(10, 21, COS0_10, 1);
  585. /* pass 2 */
  586. BF( 5, 10, COS1_5 , 2);
  587. BF(21, 26,-COS1_5 , 2);
  588. /* pass 3 */
  589. BF( 2, 5, COS2_2 , 1);
  590. BF(10, 13,-COS2_2 , 1);
  591. BF(18, 21, COS2_2 , 1);
  592. BF(26, 29,-COS2_2 , 1);
  593. /* pass 4 */
  594. BF( 1, 2, COS3_1 , 2);
  595. BF( 5, 6,-COS3_1 , 2);
  596. BF( 9, 10, COS3_1 , 2);
  597. BF(13, 14,-COS3_1 , 2);
  598. BF(17, 18, COS3_1 , 2);
  599. BF(21, 22,-COS3_1 , 2);
  600. BF(25, 26, COS3_1 , 2);
  601. BF(29, 30,-COS3_1 , 2);
  602. /* pass 5 */
  603. BF1( 0, 1, 2, 3);
  604. BF2( 4, 5, 6, 7);
  605. BF1( 8, 9, 10, 11);
  606. BF2(12, 13, 14, 15);
  607. BF1(16, 17, 18, 19);
  608. BF2(20, 21, 22, 23);
  609. BF1(24, 25, 26, 27);
  610. BF2(28, 29, 30, 31);
  611. /* pass 6 */
  612. ADD( 8, 12);
  613. ADD(12, 10);
  614. ADD(10, 14);
  615. ADD(14, 9);
  616. ADD( 9, 13);
  617. ADD(13, 11);
  618. ADD(11, 15);
  619. out[ 0] = tab[0];
  620. out[16] = tab[1];
  621. out[ 8] = tab[2];
  622. out[24] = tab[3];
  623. out[ 4] = tab[4];
  624. out[20] = tab[5];
  625. out[12] = tab[6];
  626. out[28] = tab[7];
  627. out[ 2] = tab[8];
  628. out[18] = tab[9];
  629. out[10] = tab[10];
  630. out[26] = tab[11];
  631. out[ 6] = tab[12];
  632. out[22] = tab[13];
  633. out[14] = tab[14];
  634. out[30] = tab[15];
  635. ADD(24, 28);
  636. ADD(28, 26);
  637. ADD(26, 30);
  638. ADD(30, 25);
  639. ADD(25, 29);
  640. ADD(29, 27);
  641. ADD(27, 31);
  642. out[ 1] = tab[16] + tab[24];
  643. out[17] = tab[17] + tab[25];
  644. out[ 9] = tab[18] + tab[26];
  645. out[25] = tab[19] + tab[27];
  646. out[ 5] = tab[20] + tab[28];
  647. out[21] = tab[21] + tab[29];
  648. out[13] = tab[22] + tab[30];
  649. out[29] = tab[23] + tab[31];
  650. out[ 3] = tab[24] + tab[20];
  651. out[19] = tab[25] + tab[21];
  652. out[11] = tab[26] + tab[22];
  653. out[27] = tab[27] + tab[23];
  654. out[ 7] = tab[28] + tab[18];
  655. out[23] = tab[29] + tab[19];
  656. out[15] = tab[30] + tab[17];
  657. out[31] = tab[31];
  658. }
  659. #if FRAC_BITS <= 15
  660. static inline int round_sample(int *sum)
  661. {
  662. int sum1;
  663. sum1 = (*sum) >> OUT_SHIFT;
  664. *sum &= (1<<OUT_SHIFT)-1;
  665. if (sum1 < OUT_MIN)
  666. sum1 = OUT_MIN;
  667. else if (sum1 > OUT_MAX)
  668. sum1 = OUT_MAX;
  669. return sum1;
  670. }
  671. /* signed 16x16 -> 32 multiply add accumulate */
  672. #define MACS(rt, ra, rb) MAC16(rt, ra, rb)
  673. /* signed 16x16 -> 32 multiply */
  674. #define MULS(ra, rb) MUL16(ra, rb)
  675. #else
  676. static inline int round_sample(int64_t *sum)
  677. {
  678. int sum1;
  679. sum1 = (int)((*sum) >> OUT_SHIFT);
  680. *sum &= (1<<OUT_SHIFT)-1;
  681. if (sum1 < OUT_MIN)
  682. sum1 = OUT_MIN;
  683. else if (sum1 > OUT_MAX)
  684. sum1 = OUT_MAX;
  685. return sum1;
  686. }
  687. # define MULS(ra, rb) MUL64(ra, rb)
  688. #endif
  689. #define SUM8(sum, op, w, p) \
  690. { \
  691. sum op MULS((w)[0 * 64], p[0 * 64]);\
  692. sum op MULS((w)[1 * 64], p[1 * 64]);\
  693. sum op MULS((w)[2 * 64], p[2 * 64]);\
  694. sum op MULS((w)[3 * 64], p[3 * 64]);\
  695. sum op MULS((w)[4 * 64], p[4 * 64]);\
  696. sum op MULS((w)[5 * 64], p[5 * 64]);\
  697. sum op MULS((w)[6 * 64], p[6 * 64]);\
  698. sum op MULS((w)[7 * 64], p[7 * 64]);\
  699. }
  700. #define SUM8P2(sum1, op1, sum2, op2, w1, w2, p) \
  701. { \
  702. int tmp;\
  703. tmp = p[0 * 64];\
  704. sum1 op1 MULS((w1)[0 * 64], tmp);\
  705. sum2 op2 MULS((w2)[0 * 64], tmp);\
  706. tmp = p[1 * 64];\
  707. sum1 op1 MULS((w1)[1 * 64], tmp);\
  708. sum2 op2 MULS((w2)[1 * 64], tmp);\
  709. tmp = p[2 * 64];\
  710. sum1 op1 MULS((w1)[2 * 64], tmp);\
  711. sum2 op2 MULS((w2)[2 * 64], tmp);\
  712. tmp = p[3 * 64];\
  713. sum1 op1 MULS((w1)[3 * 64], tmp);\
  714. sum2 op2 MULS((w2)[3 * 64], tmp);\
  715. tmp = p[4 * 64];\
  716. sum1 op1 MULS((w1)[4 * 64], tmp);\
  717. sum2 op2 MULS((w2)[4 * 64], tmp);\
  718. tmp = p[5 * 64];\
  719. sum1 op1 MULS((w1)[5 * 64], tmp);\
  720. sum2 op2 MULS((w2)[5 * 64], tmp);\
  721. tmp = p[6 * 64];\
  722. sum1 op1 MULS((w1)[6 * 64], tmp);\
  723. sum2 op2 MULS((w2)[6 * 64], tmp);\
  724. tmp = p[7 * 64];\
  725. sum1 op1 MULS((w1)[7 * 64], tmp);\
  726. sum2 op2 MULS((w2)[7 * 64], tmp);\
  727. }
  728. void ff_mpa_synth_init(MPA_INT *window)
  729. {
  730. int i;
  731. /* max = 18760, max sum over all 16 coefs : 44736 */
  732. for(i=0;i<257;i++) {
  733. int v;
  734. v = ff_mpa_enwindow[i];
  735. #if WFRAC_BITS < 16
  736. v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS);
  737. #endif
  738. window[i] = v;
  739. if ((i & 63) != 0)
  740. v = -v;
  741. if (i != 0)
  742. window[512 - i] = v;
  743. }
  744. }
  745. /* 32 sub band synthesis filter. Input: 32 sub band samples, Output:
  746. 32 samples. */
  747. /* XXX: optimize by avoiding ring buffer usage */
  748. void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset,
  749. MPA_INT *window, int *dither_state,
  750. OUT_INT *samples, int incr,
  751. int32_t sb_samples[SBLIMIT])
  752. {
  753. int32_t tmp[32];
  754. register MPA_INT *synth_buf;
  755. register const MPA_INT *w, *w2, *p;
  756. int j, offset, v;
  757. OUT_INT *samples2;
  758. #if FRAC_BITS <= 15
  759. int sum, sum2;
  760. #else
  761. int64_t sum, sum2;
  762. #endif
  763. dct32(tmp, sb_samples);
  764. offset = *synth_buf_offset;
  765. synth_buf = synth_buf_ptr + offset;
  766. for(j=0;j<32;j++) {
  767. v = tmp[j];
  768. #if FRAC_BITS <= 15
  769. /* NOTE: can cause a loss in precision if very high amplitude
  770. sound */
  771. v = av_clip_int16(v);
  772. #endif
  773. synth_buf[j] = v;
  774. }
  775. /* copy to avoid wrap */
  776. memcpy(synth_buf + 512, synth_buf, 32 * sizeof(MPA_INT));
  777. samples2 = samples + 31 * incr;
  778. w = window;
  779. w2 = window + 31;
  780. sum = *dither_state;
  781. p = synth_buf + 16;
  782. SUM8(sum, +=, w, p);
  783. p = synth_buf + 48;
  784. SUM8(sum, -=, w + 32, p);
  785. *samples = round_sample(&sum);
  786. samples += incr;
  787. w++;
  788. /* we calculate two samples at the same time to avoid one memory
  789. access per two sample */
  790. for(j=1;j<16;j++) {
  791. sum2 = 0;
  792. p = synth_buf + 16 + j;
  793. SUM8P2(sum, +=, sum2, -=, w, w2, p);
  794. p = synth_buf + 48 - j;
  795. SUM8P2(sum, -=, sum2, -=, w + 32, w2 + 32, p);
  796. *samples = round_sample(&sum);
  797. samples += incr;
  798. sum += sum2;
  799. *samples2 = round_sample(&sum);
  800. samples2 -= incr;
  801. w++;
  802. w2--;
  803. }
  804. p = synth_buf + 32;
  805. SUM8(sum, -=, w + 32, p);
  806. *samples = round_sample(&sum);
  807. *dither_state= sum;
  808. offset = (offset - 32) & 511;
  809. *synth_buf_offset = offset;
  810. }
  811. #define C3 FIXHR(0.86602540378443864676/2)
  812. /* 0.5 / cos(pi*(2*i+1)/36) */
  813. static const int icos36[9] = {
  814. FIXR(0.50190991877167369479),
  815. FIXR(0.51763809020504152469), //0
  816. FIXR(0.55168895948124587824),
  817. FIXR(0.61038729438072803416),
  818. FIXR(0.70710678118654752439), //1
  819. FIXR(0.87172339781054900991),
  820. FIXR(1.18310079157624925896),
  821. FIXR(1.93185165257813657349), //2
  822. FIXR(5.73685662283492756461),
  823. };
  824. /* 0.5 / cos(pi*(2*i+1)/36) */
  825. static const int icos36h[9] = {
  826. FIXHR(0.50190991877167369479/2),
  827. FIXHR(0.51763809020504152469/2), //0
  828. FIXHR(0.55168895948124587824/2),
  829. FIXHR(0.61038729438072803416/2),
  830. FIXHR(0.70710678118654752439/2), //1
  831. FIXHR(0.87172339781054900991/2),
  832. FIXHR(1.18310079157624925896/4),
  833. FIXHR(1.93185165257813657349/4), //2
  834. // FIXHR(5.73685662283492756461),
  835. };
  836. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  837. cases. */
  838. static void imdct12(int *out, int *in)
  839. {
  840. int in0, in1, in2, in3, in4, in5, t1, t2;
  841. in0= in[0*3];
  842. in1= in[1*3] + in[0*3];
  843. in2= in[2*3] + in[1*3];
  844. in3= in[3*3] + in[2*3];
  845. in4= in[4*3] + in[3*3];
  846. in5= in[5*3] + in[4*3];
  847. in5 += in3;
  848. in3 += in1;
  849. in2= MULH(2*in2, C3);
  850. in3= MULH(4*in3, C3);
  851. t1 = in0 - in4;
  852. t2 = MULH(2*(in1 - in5), icos36h[4]);
  853. out[ 7]=
  854. out[10]= t1 + t2;
  855. out[ 1]=
  856. out[ 4]= t1 - t2;
  857. in0 += in4>>1;
  858. in4 = in0 + in2;
  859. in5 += 2*in1;
  860. in1 = MULH(in5 + in3, icos36h[1]);
  861. out[ 8]=
  862. out[ 9]= in4 + in1;
  863. out[ 2]=
  864. out[ 3]= in4 - in1;
  865. in0 -= in2;
  866. in5 = MULH(2*(in5 - in3), icos36h[7]);
  867. out[ 0]=
  868. out[ 5]= in0 - in5;
  869. out[ 6]=
  870. out[11]= in0 + in5;
  871. }
  872. /* cos(pi*i/18) */
  873. #define C1 FIXHR(0.98480775301220805936/2)
  874. #define C2 FIXHR(0.93969262078590838405/2)
  875. #define C3 FIXHR(0.86602540378443864676/2)
  876. #define C4 FIXHR(0.76604444311897803520/2)
  877. #define C5 FIXHR(0.64278760968653932632/2)
  878. #define C6 FIXHR(0.5/2)
  879. #define C7 FIXHR(0.34202014332566873304/2)
  880. #define C8 FIXHR(0.17364817766693034885/2)
  881. /* using Lee like decomposition followed by hand coded 9 points DCT */
  882. static void imdct36(int *out, int *buf, int *in, int *win)
  883. {
  884. int i, j, t0, t1, t2, t3, s0, s1, s2, s3;
  885. int tmp[18], *tmp1, *in1;
  886. for(i=17;i>=1;i--)
  887. in[i] += in[i-1];
  888. for(i=17;i>=3;i-=2)
  889. in[i] += in[i-2];
  890. for(j=0;j<2;j++) {
  891. tmp1 = tmp + j;
  892. in1 = in + j;
  893. #if 0
  894. //more accurate but slower
  895. int64_t t0, t1, t2, t3;
  896. t2 = in1[2*4] + in1[2*8] - in1[2*2];
  897. t3 = (in1[2*0] + (int64_t)(in1[2*6]>>1))<<32;
  898. t1 = in1[2*0] - in1[2*6];
  899. tmp1[ 6] = t1 - (t2>>1);
  900. tmp1[16] = t1 + t2;
  901. t0 = MUL64(2*(in1[2*2] + in1[2*4]), C2);
  902. t1 = MUL64( in1[2*4] - in1[2*8] , -2*C8);
  903. t2 = MUL64(2*(in1[2*2] + in1[2*8]), -C4);
  904. tmp1[10] = (t3 - t0 - t2) >> 32;
  905. tmp1[ 2] = (t3 + t0 + t1) >> 32;
  906. tmp1[14] = (t3 + t2 - t1) >> 32;
  907. tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3);
  908. t2 = MUL64(2*(in1[2*1] + in1[2*5]), C1);
  909. t3 = MUL64( in1[2*5] - in1[2*7] , -2*C7);
  910. t0 = MUL64(2*in1[2*3], C3);
  911. t1 = MUL64(2*(in1[2*1] + in1[2*7]), -C5);
  912. tmp1[ 0] = (t2 + t3 + t0) >> 32;
  913. tmp1[12] = (t2 + t1 - t0) >> 32;
  914. tmp1[ 8] = (t3 - t1 - t0) >> 32;
  915. #else
  916. t2 = in1[2*4] + in1[2*8] - in1[2*2];
  917. t3 = in1[2*0] + (in1[2*6]>>1);
  918. t1 = in1[2*0] - in1[2*6];
  919. tmp1[ 6] = t1 - (t2>>1);
  920. tmp1[16] = t1 + t2;
  921. t0 = MULH(2*(in1[2*2] + in1[2*4]), C2);
  922. t1 = MULH( in1[2*4] - in1[2*8] , -2*C8);
  923. t2 = MULH(2*(in1[2*2] + in1[2*8]), -C4);
  924. tmp1[10] = t3 - t0 - t2;
  925. tmp1[ 2] = t3 + t0 + t1;
  926. tmp1[14] = t3 + t2 - t1;
  927. tmp1[ 4] = MULH(2*(in1[2*5] + in1[2*7] - in1[2*1]), -C3);
  928. t2 = MULH(2*(in1[2*1] + in1[2*5]), C1);
  929. t3 = MULH( in1[2*5] - in1[2*7] , -2*C7);
  930. t0 = MULH(2*in1[2*3], C3);
  931. t1 = MULH(2*(in1[2*1] + in1[2*7]), -C5);
  932. tmp1[ 0] = t2 + t3 + t0;
  933. tmp1[12] = t2 + t1 - t0;
  934. tmp1[ 8] = t3 - t1 - t0;
  935. #endif
  936. }
  937. i = 0;
  938. for(j=0;j<4;j++) {
  939. t0 = tmp[i];
  940. t1 = tmp[i + 2];
  941. s0 = t1 + t0;
  942. s2 = t1 - t0;
  943. t2 = tmp[i + 1];
  944. t3 = tmp[i + 3];
  945. s1 = MULH(2*(t3 + t2), icos36h[j]);
  946. s3 = MULL(t3 - t2, icos36[8 - j]);
  947. t0 = s0 + s1;
  948. t1 = s0 - s1;
  949. out[(9 + j)*SBLIMIT] = MULH(t1, win[9 + j]) + buf[9 + j];
  950. out[(8 - j)*SBLIMIT] = MULH(t1, win[8 - j]) + buf[8 - j];
  951. buf[9 + j] = MULH(t0, win[18 + 9 + j]);
  952. buf[8 - j] = MULH(t0, win[18 + 8 - j]);
  953. t0 = s2 + s3;
  954. t1 = s2 - s3;
  955. out[(9 + 8 - j)*SBLIMIT] = MULH(t1, win[9 + 8 - j]) + buf[9 + 8 - j];
  956. out[( j)*SBLIMIT] = MULH(t1, win[ j]) + buf[ j];
  957. buf[9 + 8 - j] = MULH(t0, win[18 + 9 + 8 - j]);
  958. buf[ + j] = MULH(t0, win[18 + j]);
  959. i += 4;
  960. }
  961. s0 = tmp[16];
  962. s1 = MULH(2*tmp[17], icos36h[4]);
  963. t0 = s0 + s1;
  964. t1 = s0 - s1;
  965. out[(9 + 4)*SBLIMIT] = MULH(t1, win[9 + 4]) + buf[9 + 4];
  966. out[(8 - 4)*SBLIMIT] = MULH(t1, win[8 - 4]) + buf[8 - 4];
  967. buf[9 + 4] = MULH(t0, win[18 + 9 + 4]);
  968. buf[8 - 4] = MULH(t0, win[18 + 8 - 4]);
  969. }
  970. /* return the number of decoded frames */
  971. static int mp_decode_layer1(MPADecodeContext *s)
  972. {
  973. int bound, i, v, n, ch, j, mant;
  974. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  975. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  976. if (s->mode == MPA_JSTEREO)
  977. bound = (s->mode_ext + 1) * 4;
  978. else
  979. bound = SBLIMIT;
  980. /* allocation bits */
  981. for(i=0;i<bound;i++) {
  982. for(ch=0;ch<s->nb_channels;ch++) {
  983. allocation[ch][i] = get_bits(&s->gb, 4);
  984. }
  985. }
  986. for(i=bound;i<SBLIMIT;i++) {
  987. allocation[0][i] = get_bits(&s->gb, 4);
  988. }
  989. /* scale factors */
  990. for(i=0;i<bound;i++) {
  991. for(ch=0;ch<s->nb_channels;ch++) {
  992. if (allocation[ch][i])
  993. scale_factors[ch][i] = get_bits(&s->gb, 6);
  994. }
  995. }
  996. for(i=bound;i<SBLIMIT;i++) {
  997. if (allocation[0][i]) {
  998. scale_factors[0][i] = get_bits(&s->gb, 6);
  999. scale_factors[1][i] = get_bits(&s->gb, 6);
  1000. }
  1001. }
  1002. /* compute samples */
  1003. for(j=0;j<12;j++) {
  1004. for(i=0;i<bound;i++) {
  1005. for(ch=0;ch<s->nb_channels;ch++) {
  1006. n = allocation[ch][i];
  1007. if (n) {
  1008. mant = get_bits(&s->gb, n + 1);
  1009. v = l1_unscale(n, mant, scale_factors[ch][i]);
  1010. } else {
  1011. v = 0;
  1012. }
  1013. s->sb_samples[ch][j][i] = v;
  1014. }
  1015. }
  1016. for(i=bound;i<SBLIMIT;i++) {
  1017. n = allocation[0][i];
  1018. if (n) {
  1019. mant = get_bits(&s->gb, n + 1);
  1020. v = l1_unscale(n, mant, scale_factors[0][i]);
  1021. s->sb_samples[0][j][i] = v;
  1022. v = l1_unscale(n, mant, scale_factors[1][i]);
  1023. s->sb_samples[1][j][i] = v;
  1024. } else {
  1025. s->sb_samples[0][j][i] = 0;
  1026. s->sb_samples[1][j][i] = 0;
  1027. }
  1028. }
  1029. }
  1030. return 12;
  1031. }
  1032. static int mp_decode_layer2(MPADecodeContext *s)
  1033. {
  1034. int sblimit; /* number of used subbands */
  1035. const unsigned char *alloc_table;
  1036. int table, bit_alloc_bits, i, j, ch, bound, v;
  1037. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  1038. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  1039. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  1040. int scale, qindex, bits, steps, k, l, m, b;
  1041. /* select decoding table */
  1042. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  1043. s->sample_rate, s->lsf);
  1044. sblimit = ff_mpa_sblimit_table[table];
  1045. alloc_table = ff_mpa_alloc_tables[table];
  1046. if (s->mode == MPA_JSTEREO)
  1047. bound = (s->mode_ext + 1) * 4;
  1048. else
  1049. bound = sblimit;
  1050. dprintf(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  1051. /* sanity check */
  1052. if( bound > sblimit ) bound = sblimit;
  1053. /* parse bit allocation */
  1054. j = 0;
  1055. for(i=0;i<bound;i++) {
  1056. bit_alloc_bits = alloc_table[j];
  1057. for(ch=0;ch<s->nb_channels;ch++) {
  1058. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  1059. }
  1060. j += 1 << bit_alloc_bits;
  1061. }
  1062. for(i=bound;i<sblimit;i++) {
  1063. bit_alloc_bits = alloc_table[j];
  1064. v = get_bits(&s->gb, bit_alloc_bits);
  1065. bit_alloc[0][i] = v;
  1066. bit_alloc[1][i] = v;
  1067. j += 1 << bit_alloc_bits;
  1068. }
  1069. #ifdef DEBUG
  1070. {
  1071. for(ch=0;ch<s->nb_channels;ch++) {
  1072. for(i=0;i<sblimit;i++)
  1073. dprintf(s->avctx, " %d", bit_alloc[ch][i]);
  1074. dprintf(s->avctx, "\n");
  1075. }
  1076. }
  1077. #endif
  1078. /* scale codes */
  1079. for(i=0;i<sblimit;i++) {
  1080. for(ch=0;ch<s->nb_channels;ch++) {
  1081. if (bit_alloc[ch][i])
  1082. scale_code[ch][i] = get_bits(&s->gb, 2);
  1083. }
  1084. }
  1085. /* scale factors */
  1086. for(i=0;i<sblimit;i++) {
  1087. for(ch=0;ch<s->nb_channels;ch++) {
  1088. if (bit_alloc[ch][i]) {
  1089. sf = scale_factors[ch][i];
  1090. switch(scale_code[ch][i]) {
  1091. default:
  1092. case 0:
  1093. sf[0] = get_bits(&s->gb, 6);
  1094. sf[1] = get_bits(&s->gb, 6);
  1095. sf[2] = get_bits(&s->gb, 6);
  1096. break;
  1097. case 2:
  1098. sf[0] = get_bits(&s->gb, 6);
  1099. sf[1] = sf[0];
  1100. sf[2] = sf[0];
  1101. break;
  1102. case 1:
  1103. sf[0] = get_bits(&s->gb, 6);
  1104. sf[2] = get_bits(&s->gb, 6);
  1105. sf[1] = sf[0];
  1106. break;
  1107. case 3:
  1108. sf[0] = get_bits(&s->gb, 6);
  1109. sf[2] = get_bits(&s->gb, 6);
  1110. sf[1] = sf[2];
  1111. break;
  1112. }
  1113. }
  1114. }
  1115. }
  1116. #ifdef DEBUG
  1117. for(ch=0;ch<s->nb_channels;ch++) {
  1118. for(i=0;i<sblimit;i++) {
  1119. if (bit_alloc[ch][i]) {
  1120. sf = scale_factors[ch][i];
  1121. dprintf(s->avctx, " %d %d %d", sf[0], sf[1], sf[2]);
  1122. } else {
  1123. dprintf(s->avctx, " -");
  1124. }
  1125. }
  1126. dprintf(s->avctx, "\n");
  1127. }
  1128. #endif
  1129. /* samples */
  1130. for(k=0;k<3;k++) {
  1131. for(l=0;l<12;l+=3) {
  1132. j = 0;
  1133. for(i=0;i<bound;i++) {
  1134. bit_alloc_bits = alloc_table[j];
  1135. for(ch=0;ch<s->nb_channels;ch++) {
  1136. b = bit_alloc[ch][i];
  1137. if (b) {
  1138. scale = scale_factors[ch][i][k];
  1139. qindex = alloc_table[j+b];
  1140. bits = ff_mpa_quant_bits[qindex];
  1141. if (bits < 0) {
  1142. /* 3 values at the same time */
  1143. v = get_bits(&s->gb, -bits);
  1144. steps = ff_mpa_quant_steps[qindex];
  1145. s->sb_samples[ch][k * 12 + l + 0][i] =
  1146. l2_unscale_group(steps, v % steps, scale);
  1147. v = v / steps;
  1148. s->sb_samples[ch][k * 12 + l + 1][i] =
  1149. l2_unscale_group(steps, v % steps, scale);
  1150. v = v / steps;
  1151. s->sb_samples[ch][k * 12 + l + 2][i] =
  1152. l2_unscale_group(steps, v, scale);
  1153. } else {
  1154. for(m=0;m<3;m++) {
  1155. v = get_bits(&s->gb, bits);
  1156. v = l1_unscale(bits - 1, v, scale);
  1157. s->sb_samples[ch][k * 12 + l + m][i] = v;
  1158. }
  1159. }
  1160. } else {
  1161. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  1162. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  1163. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  1164. }
  1165. }
  1166. /* next subband in alloc table */
  1167. j += 1 << bit_alloc_bits;
  1168. }
  1169. /* XXX: find a way to avoid this duplication of code */
  1170. for(i=bound;i<sblimit;i++) {
  1171. bit_alloc_bits = alloc_table[j];
  1172. b = bit_alloc[0][i];
  1173. if (b) {
  1174. int mant, scale0, scale1;
  1175. scale0 = scale_factors[0][i][k];
  1176. scale1 = scale_factors[1][i][k];
  1177. qindex = alloc_table[j+b];
  1178. bits = ff_mpa_quant_bits[qindex];
  1179. if (bits < 0) {
  1180. /* 3 values at the same time */
  1181. v = get_bits(&s->gb, -bits);
  1182. steps = ff_mpa_quant_steps[qindex];
  1183. mant = v % steps;
  1184. v = v / steps;
  1185. s->sb_samples[0][k * 12 + l + 0][i] =
  1186. l2_unscale_group(steps, mant, scale0);
  1187. s->sb_samples[1][k * 12 + l + 0][i] =
  1188. l2_unscale_group(steps, mant, scale1);
  1189. mant = v % steps;
  1190. v = v / steps;
  1191. s->sb_samples[0][k * 12 + l + 1][i] =
  1192. l2_unscale_group(steps, mant, scale0);
  1193. s->sb_samples[1][k * 12 + l + 1][i] =
  1194. l2_unscale_group(steps, mant, scale1);
  1195. s->sb_samples[0][k * 12 + l + 2][i] =
  1196. l2_unscale_group(steps, v, scale0);
  1197. s->sb_samples[1][k * 12 + l + 2][i] =
  1198. l2_unscale_group(steps, v, scale1);
  1199. } else {
  1200. for(m=0;m<3;m++) {
  1201. mant = get_bits(&s->gb, bits);
  1202. s->sb_samples[0][k * 12 + l + m][i] =
  1203. l1_unscale(bits - 1, mant, scale0);
  1204. s->sb_samples[1][k * 12 + l + m][i] =
  1205. l1_unscale(bits - 1, mant, scale1);
  1206. }
  1207. }
  1208. } else {
  1209. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  1210. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  1211. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  1212. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  1213. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  1214. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  1215. }
  1216. /* next subband in alloc table */
  1217. j += 1 << bit_alloc_bits;
  1218. }
  1219. /* fill remaining samples to zero */
  1220. for(i=sblimit;i<SBLIMIT;i++) {
  1221. for(ch=0;ch<s->nb_channels;ch++) {
  1222. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  1223. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  1224. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  1225. }
  1226. }
  1227. }
  1228. }
  1229. return 3 * 12;
  1230. }
  1231. static inline void lsf_sf_expand(int *slen,
  1232. int sf, int n1, int n2, int n3)
  1233. {
  1234. if (n3) {
  1235. slen[3] = sf % n3;
  1236. sf /= n3;
  1237. } else {
  1238. slen[3] = 0;
  1239. }
  1240. if (n2) {
  1241. slen[2] = sf % n2;
  1242. sf /= n2;
  1243. } else {
  1244. slen[2] = 0;
  1245. }
  1246. slen[1] = sf % n1;
  1247. sf /= n1;
  1248. slen[0] = sf;
  1249. }
  1250. static void exponents_from_scale_factors(MPADecodeContext *s,
  1251. GranuleDef *g,
  1252. int16_t *exponents)
  1253. {
  1254. const uint8_t *bstab, *pretab;
  1255. int len, i, j, k, l, v0, shift, gain, gains[3];
  1256. int16_t *exp_ptr;
  1257. exp_ptr = exponents;
  1258. gain = g->global_gain - 210;
  1259. shift = g->scalefac_scale + 1;
  1260. bstab = band_size_long[s->sample_rate_index];
  1261. pretab = mpa_pretab[g->preflag];
  1262. for(i=0;i<g->long_end;i++) {
  1263. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  1264. len = bstab[i];
  1265. for(j=len;j>0;j--)
  1266. *exp_ptr++ = v0;
  1267. }
  1268. if (g->short_start < 13) {
  1269. bstab = band_size_short[s->sample_rate_index];
  1270. gains[0] = gain - (g->subblock_gain[0] << 3);
  1271. gains[1] = gain - (g->subblock_gain[1] << 3);
  1272. gains[2] = gain - (g->subblock_gain[2] << 3);
  1273. k = g->long_end;
  1274. for(i=g->short_start;i<13;i++) {
  1275. len = bstab[i];
  1276. for(l=0;l<3;l++) {
  1277. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  1278. for(j=len;j>0;j--)
  1279. *exp_ptr++ = v0;
  1280. }
  1281. }
  1282. }
  1283. }
  1284. /* handle n = 0 too */
  1285. static inline int get_bitsz(GetBitContext *s, int n)
  1286. {
  1287. if (n == 0)
  1288. return 0;
  1289. else
  1290. return get_bits(s, n);
  1291. }
  1292. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos, int *end_pos2){
  1293. if(s->in_gb.buffer && *pos >= s->gb.size_in_bits){
  1294. s->gb= s->in_gb;
  1295. s->in_gb.buffer=NULL;
  1296. assert((get_bits_count(&s->gb) & 7) == 0);
  1297. skip_bits_long(&s->gb, *pos - *end_pos);
  1298. *end_pos2=
  1299. *end_pos= *end_pos2 + get_bits_count(&s->gb) - *pos;
  1300. *pos= get_bits_count(&s->gb);
  1301. }
  1302. }
  1303. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  1304. int16_t *exponents, int end_pos2)
  1305. {
  1306. int s_index;
  1307. int i;
  1308. int last_pos, bits_left;
  1309. VLC *vlc;
  1310. int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
  1311. /* low frequencies (called big values) */
  1312. s_index = 0;
  1313. for(i=0;i<3;i++) {
  1314. int j, k, l, linbits;
  1315. j = g->region_size[i];
  1316. if (j == 0)
  1317. continue;
  1318. /* select vlc table */
  1319. k = g->table_select[i];
  1320. l = mpa_huff_data[k][0];
  1321. linbits = mpa_huff_data[k][1];
  1322. vlc = &huff_vlc[l];
  1323. if(!l){
  1324. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
  1325. s_index += 2*j;
  1326. continue;
  1327. }
  1328. /* read huffcode and compute each couple */
  1329. for(;j>0;j--) {
  1330. int exponent, x, y, v;
  1331. int pos= get_bits_count(&s->gb);
  1332. if (pos >= end_pos){
  1333. // av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1334. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1335. // av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
  1336. if(pos >= end_pos)
  1337. break;
  1338. }
  1339. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  1340. if(!y){
  1341. g->sb_hybrid[s_index ] =
  1342. g->sb_hybrid[s_index+1] = 0;
  1343. s_index += 2;
  1344. continue;
  1345. }
  1346. exponent= exponents[s_index];
  1347. dprintf(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  1348. i, g->region_size[i] - j, x, y, exponent);
  1349. if(y&16){
  1350. x = y >> 5;
  1351. y = y & 0x0f;
  1352. if (x < 15){
  1353. v = expval_table[ exponent ][ x ];
  1354. // v = expval_table[ (exponent&3) ][ x ] >> FFMIN(0 - (exponent>>2), 31);
  1355. }else{
  1356. x += get_bitsz(&s->gb, linbits);
  1357. v = l3_unscale(x, exponent);
  1358. }
  1359. if (get_bits1(&s->gb))
  1360. v = -v;
  1361. g->sb_hybrid[s_index] = v;
  1362. if (y < 15){
  1363. v = expval_table[ exponent ][ y ];
  1364. }else{
  1365. y += get_bitsz(&s->gb, linbits);
  1366. v = l3_unscale(y, exponent);
  1367. }
  1368. if (get_bits1(&s->gb))
  1369. v = -v;
  1370. g->sb_hybrid[s_index+1] = v;
  1371. }else{
  1372. x = y >> 5;
  1373. y = y & 0x0f;
  1374. x += y;
  1375. if (x < 15){
  1376. v = expval_table[ exponent ][ x ];
  1377. }else{
  1378. x += get_bitsz(&s->gb, linbits);
  1379. v = l3_unscale(x, exponent);
  1380. }
  1381. if (get_bits1(&s->gb))
  1382. v = -v;
  1383. g->sb_hybrid[s_index+!!y] = v;
  1384. g->sb_hybrid[s_index+ !y] = 0;
  1385. }
  1386. s_index+=2;
  1387. }
  1388. }
  1389. /* high frequencies */
  1390. vlc = &huff_quad_vlc[g->count1table_select];
  1391. last_pos=0;
  1392. while (s_index <= 572) {
  1393. int pos, code;
  1394. pos = get_bits_count(&s->gb);
  1395. if (pos >= end_pos) {
  1396. if (pos > end_pos2 && last_pos){
  1397. /* some encoders generate an incorrect size for this
  1398. part. We must go back into the data */
  1399. s_index -= 4;
  1400. skip_bits_long(&s->gb, last_pos - pos);
  1401. av_log(NULL, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  1402. if(s->error_resilience >= FF_ER_COMPLIANT)
  1403. s_index=0;
  1404. break;
  1405. }
  1406. // av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
  1407. switch_buffer(s, &pos, &end_pos, &end_pos2);
  1408. // av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
  1409. if(pos >= end_pos)
  1410. break;
  1411. }
  1412. last_pos= pos;
  1413. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  1414. dprintf(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  1415. g->sb_hybrid[s_index+0]=
  1416. g->sb_hybrid[s_index+1]=
  1417. g->sb_hybrid[s_index+2]=
  1418. g->sb_hybrid[s_index+3]= 0;
  1419. while(code){
  1420. static const int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
  1421. int v;
  1422. int pos= s_index+idxtab[code];
  1423. code ^= 8>>idxtab[code];
  1424. v = exp_table[ exponents[pos] ];
  1425. // v = exp_table[ (exponents[pos]&3) ] >> FFMIN(0 - (exponents[pos]>>2), 31);
  1426. if(get_bits1(&s->gb))
  1427. v = -v;
  1428. g->sb_hybrid[pos] = v;
  1429. }
  1430. s_index+=4;
  1431. }
  1432. /* skip extension bits */
  1433. bits_left = end_pos2 - get_bits_count(&s->gb);
  1434. //av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
  1435. if (bits_left < 0/* || bits_left > 500*/) {
  1436. av_log(NULL, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1437. s_index=0;
  1438. }else if(bits_left > 0 && s->error_resilience >= FF_ER_AGGRESSIVE){
  1439. av_log(NULL, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  1440. s_index=0;
  1441. }
  1442. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
  1443. skip_bits_long(&s->gb, bits_left);
  1444. i= get_bits_count(&s->gb);
  1445. switch_buffer(s, &i, &end_pos, &end_pos2);
  1446. return 0;
  1447. }
  1448. /* Reorder short blocks from bitstream order to interleaved order. It
  1449. would be faster to do it in parsing, but the code would be far more
  1450. complicated */
  1451. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  1452. {
  1453. int i, j, len;
  1454. int32_t *ptr, *dst, *ptr1;
  1455. int32_t tmp[576];
  1456. if (g->block_type != 2)
  1457. return;
  1458. if (g->switch_point) {
  1459. if (s->sample_rate_index != 8) {
  1460. ptr = g->sb_hybrid + 36;
  1461. } else {
  1462. ptr = g->sb_hybrid + 48;
  1463. }
  1464. } else {
  1465. ptr = g->sb_hybrid;
  1466. }
  1467. for(i=g->short_start;i<13;i++) {
  1468. len = band_size_short[s->sample_rate_index][i];
  1469. ptr1 = ptr;
  1470. dst = tmp;
  1471. for(j=len;j>0;j--) {
  1472. *dst++ = ptr[0*len];
  1473. *dst++ = ptr[1*len];
  1474. *dst++ = ptr[2*len];
  1475. ptr++;
  1476. }
  1477. ptr+=2*len;
  1478. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  1479. }
  1480. }
  1481. #define ISQRT2 FIXR(0.70710678118654752440)
  1482. static void compute_stereo(MPADecodeContext *s,
  1483. GranuleDef *g0, GranuleDef *g1)
  1484. {
  1485. int i, j, k, l;
  1486. int32_t v1, v2;
  1487. int sf_max, tmp0, tmp1, sf, len, non_zero_found;
  1488. int32_t (*is_tab)[16];
  1489. int32_t *tab0, *tab1;
  1490. int non_zero_found_short[3];
  1491. /* intensity stereo */
  1492. if (s->mode_ext & MODE_EXT_I_STEREO) {
  1493. if (!s->lsf) {
  1494. is_tab = is_table;
  1495. sf_max = 7;
  1496. } else {
  1497. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  1498. sf_max = 16;
  1499. }
  1500. tab0 = g0->sb_hybrid + 576;
  1501. tab1 = g1->sb_hybrid + 576;
  1502. non_zero_found_short[0] = 0;
  1503. non_zero_found_short[1] = 0;
  1504. non_zero_found_short[2] = 0;
  1505. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  1506. for(i = 12;i >= g1->short_start;i--) {
  1507. /* for last band, use previous scale factor */
  1508. if (i != 11)
  1509. k -= 3;
  1510. len = band_size_short[s->sample_rate_index][i];
  1511. for(l=2;l>=0;l--) {
  1512. tab0 -= len;
  1513. tab1 -= len;
  1514. if (!non_zero_found_short[l]) {
  1515. /* test if non zero band. if so, stop doing i-stereo */
  1516. for(j=0;j<len;j++) {
  1517. if (tab1[j] != 0) {
  1518. non_zero_found_short[l] = 1;
  1519. goto found1;
  1520. }
  1521. }
  1522. sf = g1->scale_factors[k + l];
  1523. if (sf >= sf_max)
  1524. goto found1;
  1525. v1 = is_tab[0][sf];
  1526. v2 = is_tab[1][sf];
  1527. for(j=0;j<len;j++) {
  1528. tmp0 = tab0[j];
  1529. tab0[j] = MULL(tmp0, v1);
  1530. tab1[j] = MULL(tmp0, v2);
  1531. }
  1532. } else {
  1533. found1:
  1534. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1535. /* lower part of the spectrum : do ms stereo
  1536. if enabled */
  1537. for(j=0;j<len;j++) {
  1538. tmp0 = tab0[j];
  1539. tmp1 = tab1[j];
  1540. tab0[j] = MULL(tmp0 + tmp1, ISQRT2);
  1541. tab1[j] = MULL(tmp0 - tmp1, ISQRT2);
  1542. }
  1543. }
  1544. }
  1545. }
  1546. }
  1547. non_zero_found = non_zero_found_short[0] |
  1548. non_zero_found_short[1] |
  1549. non_zero_found_short[2];
  1550. for(i = g1->long_end - 1;i >= 0;i--) {
  1551. len = band_size_long[s->sample_rate_index][i];
  1552. tab0 -= len;
  1553. tab1 -= len;
  1554. /* test if non zero band. if so, stop doing i-stereo */
  1555. if (!non_zero_found) {
  1556. for(j=0;j<len;j++) {
  1557. if (tab1[j] != 0) {
  1558. non_zero_found = 1;
  1559. goto found2;
  1560. }
  1561. }
  1562. /* for last band, use previous scale factor */
  1563. k = (i == 21) ? 20 : i;
  1564. sf = g1->scale_factors[k];
  1565. if (sf >= sf_max)
  1566. goto found2;
  1567. v1 = is_tab[0][sf];
  1568. v2 = is_tab[1][sf];
  1569. for(j=0;j<len;j++) {
  1570. tmp0 = tab0[j];
  1571. tab0[j] = MULL(tmp0, v1);
  1572. tab1[j] = MULL(tmp0, v2);
  1573. }
  1574. } else {
  1575. found2:
  1576. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1577. /* lower part of the spectrum : do ms stereo
  1578. if enabled */
  1579. for(j=0;j<len;j++) {
  1580. tmp0 = tab0[j];
  1581. tmp1 = tab1[j];
  1582. tab0[j] = MULL(tmp0 + tmp1, ISQRT2);
  1583. tab1[j] = MULL(tmp0 - tmp1, ISQRT2);
  1584. }
  1585. }
  1586. }
  1587. }
  1588. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1589. /* ms stereo ONLY */
  1590. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1591. global gain */
  1592. tab0 = g0->sb_hybrid;
  1593. tab1 = g1->sb_hybrid;
  1594. for(i=0;i<576;i++) {
  1595. tmp0 = tab0[i];
  1596. tmp1 = tab1[i];
  1597. tab0[i] = tmp0 + tmp1;
  1598. tab1[i] = tmp0 - tmp1;
  1599. }
  1600. }
  1601. }
  1602. static void compute_antialias_integer(MPADecodeContext *s,
  1603. GranuleDef *g)
  1604. {
  1605. int32_t *ptr, *csa;
  1606. int n, i;
  1607. /* we antialias only "long" bands */
  1608. if (g->block_type == 2) {
  1609. if (!g->switch_point)
  1610. return;
  1611. /* XXX: check this for 8000Hz case */
  1612. n = 1;
  1613. } else {
  1614. n = SBLIMIT - 1;
  1615. }
  1616. ptr = g->sb_hybrid + 18;
  1617. for(i = n;i > 0;i--) {
  1618. int tmp0, tmp1, tmp2;
  1619. csa = &csa_table[0][0];
  1620. #define INT_AA(j) \
  1621. tmp0 = ptr[-1-j];\
  1622. tmp1 = ptr[ j];\
  1623. tmp2= MULH(tmp0 + tmp1, csa[0+4*j]);\
  1624. ptr[-1-j] = 4*(tmp2 - MULH(tmp1, csa[2+4*j]));\
  1625. ptr[ j] = 4*(tmp2 + MULH(tmp0, csa[3+4*j]));
  1626. INT_AA(0)
  1627. INT_AA(1)
  1628. INT_AA(2)
  1629. INT_AA(3)
  1630. INT_AA(4)
  1631. INT_AA(5)
  1632. INT_AA(6)
  1633. INT_AA(7)
  1634. ptr += 18;
  1635. }
  1636. }
  1637. static void compute_antialias_float(MPADecodeContext *s,
  1638. GranuleDef *g)
  1639. {
  1640. int32_t *ptr;
  1641. int n, i;
  1642. /* we antialias only "long" bands */
  1643. if (g->block_type == 2) {
  1644. if (!g->switch_point)
  1645. return;
  1646. /* XXX: check this for 8000Hz case */
  1647. n = 1;
  1648. } else {
  1649. n = SBLIMIT - 1;
  1650. }
  1651. ptr = g->sb_hybrid + 18;
  1652. for(i = n;i > 0;i--) {
  1653. float tmp0, tmp1;
  1654. float *csa = &csa_table_float[0][0];
  1655. #define FLOAT_AA(j)\
  1656. tmp0= ptr[-1-j];\
  1657. tmp1= ptr[ j];\
  1658. ptr[-1-j] = lrintf(tmp0 * csa[0+4*j] - tmp1 * csa[1+4*j]);\
  1659. ptr[ j] = lrintf(tmp0 * csa[1+4*j] + tmp1 * csa[0+4*j]);
  1660. FLOAT_AA(0)
  1661. FLOAT_AA(1)
  1662. FLOAT_AA(2)
  1663. FLOAT_AA(3)
  1664. FLOAT_AA(4)
  1665. FLOAT_AA(5)
  1666. FLOAT_AA(6)
  1667. FLOAT_AA(7)
  1668. ptr += 18;
  1669. }
  1670. }
  1671. static void compute_imdct(MPADecodeContext *s,
  1672. GranuleDef *g,
  1673. int32_t *sb_samples,
  1674. int32_t *mdct_buf)
  1675. {
  1676. int32_t *ptr, *win, *win1, *buf, *out_ptr, *ptr1;
  1677. int32_t out2[12];
  1678. int i, j, mdct_long_end, v, sblimit;
  1679. /* find last non zero block */
  1680. ptr = g->sb_hybrid + 576;
  1681. ptr1 = g->sb_hybrid + 2 * 18;
  1682. while (ptr >= ptr1) {
  1683. ptr -= 6;
  1684. v = ptr[0] | ptr[1] | ptr[2] | ptr[3] | ptr[4] | ptr[5];
  1685. if (v != 0)
  1686. break;
  1687. }
  1688. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1689. if (g->block_type == 2) {
  1690. /* XXX: check for 8000 Hz */
  1691. if (g->switch_point)
  1692. mdct_long_end = 2;
  1693. else
  1694. mdct_long_end = 0;
  1695. } else {
  1696. mdct_long_end = sblimit;
  1697. }
  1698. buf = mdct_buf;
  1699. ptr = g->sb_hybrid;
  1700. for(j=0;j<mdct_long_end;j++) {
  1701. /* apply window & overlap with previous buffer */
  1702. out_ptr = sb_samples + j;
  1703. /* select window */
  1704. if (g->switch_point && j < 2)
  1705. win1 = mdct_win[0];
  1706. else
  1707. win1 = mdct_win[g->block_type];
  1708. /* select frequency inversion */
  1709. win = win1 + ((4 * 36) & -(j & 1));
  1710. imdct36(out_ptr, buf, ptr, win);
  1711. out_ptr += 18*SBLIMIT;
  1712. ptr += 18;
  1713. buf += 18;
  1714. }
  1715. for(j=mdct_long_end;j<sblimit;j++) {
  1716. /* select frequency inversion */
  1717. win = mdct_win[2] + ((4 * 36) & -(j & 1));
  1718. out_ptr = sb_samples + j;
  1719. for(i=0; i<6; i++){
  1720. *out_ptr = buf[i];
  1721. out_ptr += SBLIMIT;
  1722. }
  1723. imdct12(out2, ptr + 0);
  1724. for(i=0;i<6;i++) {
  1725. *out_ptr = MULH(out2[i], win[i]) + buf[i + 6*1];
  1726. buf[i + 6*2] = MULH(out2[i + 6], win[i + 6]);
  1727. out_ptr += SBLIMIT;
  1728. }
  1729. imdct12(out2, ptr + 1);
  1730. for(i=0;i<6;i++) {
  1731. *out_ptr = MULH(out2[i], win[i]) + buf[i + 6*2];
  1732. buf[i + 6*0] = MULH(out2[i + 6], win[i + 6]);
  1733. out_ptr += SBLIMIT;
  1734. }
  1735. imdct12(out2, ptr + 2);
  1736. for(i=0;i<6;i++) {
  1737. buf[i + 6*0] = MULH(out2[i], win[i]) + buf[i + 6*0];
  1738. buf[i + 6*1] = MULH(out2[i + 6], win[i + 6]);
  1739. buf[i + 6*2] = 0;
  1740. }
  1741. ptr += 18;
  1742. buf += 18;
  1743. }
  1744. /* zero bands */
  1745. for(j=sblimit;j<SBLIMIT;j++) {
  1746. /* overlap */
  1747. out_ptr = sb_samples + j;
  1748. for(i=0;i<18;i++) {
  1749. *out_ptr = buf[i];
  1750. buf[i] = 0;
  1751. out_ptr += SBLIMIT;
  1752. }
  1753. buf += 18;
  1754. }
  1755. }
  1756. #if defined(DEBUG)
  1757. void sample_dump(int fnum, int32_t *tab, int n)
  1758. {
  1759. static FILE *files[16], *f;
  1760. char buf[512];
  1761. int i;
  1762. int32_t v;
  1763. f = files[fnum];
  1764. if (!f) {
  1765. snprintf(buf, sizeof(buf), "/tmp/out%d.%s.pcm",
  1766. fnum,
  1767. #ifdef USE_HIGHPRECISION
  1768. "hp"
  1769. #else
  1770. "lp"
  1771. #endif
  1772. );
  1773. f = fopen(buf, "w");
  1774. if (!f)
  1775. return;
  1776. files[fnum] = f;
  1777. }
  1778. if (fnum == 0) {
  1779. static int pos = 0;
  1780. av_log(NULL, AV_LOG_DEBUG, "pos=%d\n", pos);
  1781. for(i=0;i<n;i++) {
  1782. av_log(NULL, AV_LOG_DEBUG, " %0.4f", (double)tab[i] / FRAC_ONE);
  1783. if ((i % 18) == 17)
  1784. av_log(NULL, AV_LOG_DEBUG, "\n");
  1785. }
  1786. pos += n;
  1787. }
  1788. for(i=0;i<n;i++) {
  1789. /* normalize to 23 frac bits */
  1790. v = tab[i] << (23 - FRAC_BITS);
  1791. fwrite(&v, 1, sizeof(int32_t), f);
  1792. }
  1793. }
  1794. #endif
  1795. /* main layer3 decoding function */
  1796. static int mp_decode_layer3(MPADecodeContext *s)
  1797. {
  1798. int nb_granules, main_data_begin, private_bits;
  1799. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1800. GranuleDef granules[2][2], *g;
  1801. int16_t exponents[576];
  1802. /* read side info */
  1803. if (s->lsf) {
  1804. main_data_begin = get_bits(&s->gb, 8);
  1805. private_bits = get_bits(&s->gb, s->nb_channels);
  1806. nb_granules = 1;
  1807. } else {
  1808. main_data_begin = get_bits(&s->gb, 9);
  1809. if (s->nb_channels == 2)
  1810. private_bits = get_bits(&s->gb, 3);
  1811. else
  1812. private_bits = get_bits(&s->gb, 5);
  1813. nb_granules = 2;
  1814. for(ch=0;ch<s->nb_channels;ch++) {
  1815. granules[ch][0].scfsi = 0; /* all scale factors are transmitted */
  1816. granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1817. }
  1818. }
  1819. for(gr=0;gr<nb_granules;gr++) {
  1820. for(ch=0;ch<s->nb_channels;ch++) {
  1821. dprintf(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1822. g = &granules[ch][gr];
  1823. g->part2_3_length = get_bits(&s->gb, 12);
  1824. g->big_values = get_bits(&s->gb, 9);
  1825. if(g->big_values > 288){
  1826. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1827. return -1;
  1828. }
  1829. g->global_gain = get_bits(&s->gb, 8);
  1830. /* if MS stereo only is selected, we precompute the
  1831. 1/sqrt(2) renormalization factor */
  1832. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1833. MODE_EXT_MS_STEREO)
  1834. g->global_gain -= 2;
  1835. if (s->lsf)
  1836. g->scalefac_compress = get_bits(&s->gb, 9);
  1837. else
  1838. g->scalefac_compress = get_bits(&s->gb, 4);
  1839. blocksplit_flag = get_bits1(&s->gb);
  1840. if (blocksplit_flag) {
  1841. g->block_type = get_bits(&s->gb, 2);
  1842. if (g->block_type == 0){
  1843. av_log(NULL, AV_LOG_ERROR, "invalid block type\n");
  1844. return -1;
  1845. }
  1846. g->switch_point = get_bits1(&s->gb);
  1847. for(i=0;i<2;i++)
  1848. g->table_select[i] = get_bits(&s->gb, 5);
  1849. for(i=0;i<3;i++)
  1850. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1851. ff_init_short_region(s, g);
  1852. } else {
  1853. int region_address1, region_address2;
  1854. g->block_type = 0;
  1855. g->switch_point = 0;
  1856. for(i=0;i<3;i++)
  1857. g->table_select[i] = get_bits(&s->gb, 5);
  1858. /* compute huffman coded region sizes */
  1859. region_address1 = get_bits(&s->gb, 4);
  1860. region_address2 = get_bits(&s->gb, 3);
  1861. dprintf(s->avctx, "region1=%d region2=%d\n",
  1862. region_address1, region_address2);
  1863. ff_init_long_region(s, g, region_address1, region_address2);
  1864. }
  1865. ff_region_offset2size(g);
  1866. ff_compute_band_indexes(s, g);
  1867. g->preflag = 0;
  1868. if (!s->lsf)
  1869. g->preflag = get_bits1(&s->gb);
  1870. g->scalefac_scale = get_bits1(&s->gb);
  1871. g->count1table_select = get_bits1(&s->gb);
  1872. dprintf(s->avctx, "block_type=%d switch_point=%d\n",
  1873. g->block_type, g->switch_point);
  1874. }
  1875. }
  1876. if (!s->adu_mode) {
  1877. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1878. assert((get_bits_count(&s->gb) & 7) == 0);
  1879. /* now we get bits from the main_data_begin offset */
  1880. dprintf(s->avctx, "seekback: %d\n", main_data_begin);
  1881. //av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
  1882. memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
  1883. s->in_gb= s->gb;
  1884. init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
  1885. skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
  1886. }
  1887. for(gr=0;gr<nb_granules;gr++) {
  1888. for(ch=0;ch<s->nb_channels;ch++) {
  1889. g = &granules[ch][gr];
  1890. if(get_bits_count(&s->gb)<0){
  1891. av_log(NULL, AV_LOG_ERROR, "mdb:%d, lastbuf:%d skipping granule %d\n",
  1892. main_data_begin, s->last_buf_size, gr);
  1893. skip_bits_long(&s->gb, g->part2_3_length);
  1894. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1895. if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
  1896. skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
  1897. s->gb= s->in_gb;
  1898. s->in_gb.buffer=NULL;
  1899. }
  1900. continue;
  1901. }
  1902. bits_pos = get_bits_count(&s->gb);
  1903. if (!s->lsf) {
  1904. uint8_t *sc;
  1905. int slen, slen1, slen2;
  1906. /* MPEG1 scale factors */
  1907. slen1 = slen_table[0][g->scalefac_compress];
  1908. slen2 = slen_table[1][g->scalefac_compress];
  1909. dprintf(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1910. if (g->block_type == 2) {
  1911. n = g->switch_point ? 17 : 18;
  1912. j = 0;
  1913. if(slen1){
  1914. for(i=0;i<n;i++)
  1915. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1916. }else{
  1917. for(i=0;i<n;i++)
  1918. g->scale_factors[j++] = 0;
  1919. }
  1920. if(slen2){
  1921. for(i=0;i<18;i++)
  1922. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1923. for(i=0;i<3;i++)
  1924. g->scale_factors[j++] = 0;
  1925. }else{
  1926. for(i=0;i<21;i++)
  1927. g->scale_factors[j++] = 0;
  1928. }
  1929. } else {
  1930. sc = granules[ch][0].scale_factors;
  1931. j = 0;
  1932. for(k=0;k<4;k++) {
  1933. n = (k == 0 ? 6 : 5);
  1934. if ((g->scfsi & (0x8 >> k)) == 0) {
  1935. slen = (k < 2) ? slen1 : slen2;
  1936. if(slen){
  1937. for(i=0;i<n;i++)
  1938. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1939. }else{
  1940. for(i=0;i<n;i++)
  1941. g->scale_factors[j++] = 0;
  1942. }
  1943. } else {
  1944. /* simply copy from last granule */
  1945. for(i=0;i<n;i++) {
  1946. g->scale_factors[j] = sc[j];
  1947. j++;
  1948. }
  1949. }
  1950. }
  1951. g->scale_factors[j++] = 0;
  1952. }
  1953. #if defined(DEBUG)
  1954. {
  1955. dprintf(s->avctx, "scfsi=%x gr=%d ch=%d scale_factors:\n",
  1956. g->scfsi, gr, ch);
  1957. for(i=0;i<j;i++)
  1958. dprintf(s->avctx, " %d", g->scale_factors[i]);
  1959. dprintf(s->avctx, "\n");
  1960. }
  1961. #endif
  1962. } else {
  1963. int tindex, tindex2, slen[4], sl, sf;
  1964. /* LSF scale factors */
  1965. if (g->block_type == 2) {
  1966. tindex = g->switch_point ? 2 : 1;
  1967. } else {
  1968. tindex = 0;
  1969. }
  1970. sf = g->scalefac_compress;
  1971. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1972. /* intensity stereo case */
  1973. sf >>= 1;
  1974. if (sf < 180) {
  1975. lsf_sf_expand(slen, sf, 6, 6, 0);
  1976. tindex2 = 3;
  1977. } else if (sf < 244) {
  1978. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1979. tindex2 = 4;
  1980. } else {
  1981. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1982. tindex2 = 5;
  1983. }
  1984. } else {
  1985. /* normal case */
  1986. if (sf < 400) {
  1987. lsf_sf_expand(slen, sf, 5, 4, 4);
  1988. tindex2 = 0;
  1989. } else if (sf < 500) {
  1990. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1991. tindex2 = 1;
  1992. } else {
  1993. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1994. tindex2 = 2;
  1995. g->preflag = 1;
  1996. }
  1997. }
  1998. j = 0;
  1999. for(k=0;k<4;k++) {
  2000. n = lsf_nsf_table[tindex2][tindex][k];
  2001. sl = slen[k];
  2002. if(sl){
  2003. for(i=0;i<n;i++)
  2004. g->scale_factors[j++] = get_bits(&s->gb, sl);
  2005. }else{
  2006. for(i=0;i<n;i++)
  2007. g->scale_factors[j++] = 0;
  2008. }
  2009. }
  2010. /* XXX: should compute exact size */
  2011. for(;j<40;j++)
  2012. g->scale_factors[j] = 0;
  2013. #if defined(DEBUG)
  2014. {
  2015. dprintf(s->avctx, "gr=%d ch=%d scale_factors:\n",
  2016. gr, ch);
  2017. for(i=0;i<40;i++)
  2018. dprintf(s->avctx, " %d", g->scale_factors[i]);
  2019. dprintf(s->avctx, "\n");
  2020. }
  2021. #endif
  2022. }
  2023. exponents_from_scale_factors(s, g, exponents);
  2024. /* read Huffman coded residue */
  2025. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  2026. #if defined(DEBUG)
  2027. sample_dump(0, g->sb_hybrid, 576);
  2028. #endif
  2029. } /* ch */
  2030. if (s->nb_channels == 2)
  2031. compute_stereo(s, &granules[0][gr], &granules[1][gr]);
  2032. for(ch=0;ch<s->nb_channels;ch++) {
  2033. g = &granules[ch][gr];
  2034. reorder_block(s, g);
  2035. #if defined(DEBUG)
  2036. sample_dump(0, g->sb_hybrid, 576);
  2037. #endif
  2038. s->compute_antialias(s, g);
  2039. #if defined(DEBUG)
  2040. sample_dump(1, g->sb_hybrid, 576);
  2041. #endif
  2042. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  2043. #if defined(DEBUG)
  2044. sample_dump(2, &s->sb_samples[ch][18 * gr][0], 576);
  2045. #endif
  2046. }
  2047. } /* gr */
  2048. if(get_bits_count(&s->gb)<0)
  2049. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  2050. return nb_granules * 18;
  2051. }
  2052. static int mp_decode_frame(MPADecodeContext *s,
  2053. OUT_INT *samples, const uint8_t *buf, int buf_size)
  2054. {
  2055. int i, nb_frames, ch;
  2056. OUT_INT *samples_ptr;
  2057. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);
  2058. /* skip error protection field */
  2059. if (s->error_protection)
  2060. skip_bits(&s->gb, 16);
  2061. dprintf(s->avctx, "frame %d:\n", s->frame_count);
  2062. switch(s->layer) {
  2063. case 1:
  2064. s->avctx->frame_size = 384;
  2065. nb_frames = mp_decode_layer1(s);
  2066. break;
  2067. case 2:
  2068. s->avctx->frame_size = 1152;
  2069. nb_frames = mp_decode_layer2(s);
  2070. break;
  2071. case 3:
  2072. s->avctx->frame_size = s->lsf ? 576 : 1152;
  2073. default:
  2074. nb_frames = mp_decode_layer3(s);
  2075. s->last_buf_size=0;
  2076. if(s->in_gb.buffer){
  2077. align_get_bits(&s->gb);
  2078. i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3;
  2079. if(i >= 0 && i <= BACKSTEP_SIZE){
  2080. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  2081. s->last_buf_size=i;
  2082. }else
  2083. av_log(NULL, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  2084. s->gb= s->in_gb;
  2085. s->in_gb.buffer= NULL;
  2086. }
  2087. align_get_bits(&s->gb);
  2088. assert((get_bits_count(&s->gb) & 7) == 0);
  2089. i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3;
  2090. if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){
  2091. av_log(NULL, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  2092. i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  2093. }
  2094. assert(i <= buf_size - HEADER_SIZE && i>= 0);
  2095. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  2096. s->last_buf_size += i;
  2097. break;
  2098. }
  2099. #if defined(DEBUG)
  2100. for(i=0;i<nb_frames;i++) {
  2101. for(ch=0;ch<s->nb_channels;ch++) {
  2102. int j;
  2103. dprintf(s->avctx, "%d-%d:", i, ch);
  2104. for(j=0;j<SBLIMIT;j++)
  2105. dprintf(s->avctx, " %0.6f", (double)s->sb_samples[ch][i][j] / FRAC_ONE);
  2106. dprintf(s->avctx, "\n");
  2107. }
  2108. }
  2109. #endif
  2110. /* apply the synthesis filter */
  2111. for(ch=0;ch<s->nb_channels;ch++) {
  2112. samples_ptr = samples + ch;
  2113. for(i=0;i<nb_frames;i++) {
  2114. ff_mpa_synth_filter(s->synth_buf[ch], &(s->synth_buf_offset[ch]),
  2115. window, &s->dither_state,
  2116. samples_ptr, s->nb_channels,
  2117. s->sb_samples[ch][i]);
  2118. samples_ptr += 32 * s->nb_channels;
  2119. }
  2120. }
  2121. #ifdef DEBUG
  2122. s->frame_count++;
  2123. #endif
  2124. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  2125. }
  2126. static int decode_frame(AVCodecContext * avctx,
  2127. void *data, int *data_size,
  2128. const uint8_t * buf, int buf_size)
  2129. {
  2130. MPADecodeContext *s = avctx->priv_data;
  2131. uint32_t header;
  2132. int out_size;
  2133. OUT_INT *out_samples = data;
  2134. retry:
  2135. if(buf_size < HEADER_SIZE)
  2136. return -1;
  2137. header = AV_RB32(buf);
  2138. if(ff_mpa_check_header(header) < 0){
  2139. buf++;
  2140. // buf_size--;
  2141. av_log(avctx, AV_LOG_ERROR, "Header missing skipping one byte.\n");
  2142. goto retry;
  2143. }
  2144. if (ff_mpegaudio_decode_header(s, header) == 1) {
  2145. /* free format: prepare to compute frame size */
  2146. s->frame_size = -1;
  2147. return -1;
  2148. }
  2149. /* update codec info */
  2150. avctx->channels = s->nb_channels;
  2151. avctx->bit_rate = s->bit_rate;
  2152. avctx->sub_id = s->layer;
  2153. if(s->frame_size<=0 || s->frame_size > buf_size){
  2154. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  2155. return -1;
  2156. }else if(s->frame_size < buf_size){
  2157. av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n");
  2158. buf_size= s->frame_size;
  2159. }
  2160. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  2161. if(out_size>=0){
  2162. *data_size = out_size;
  2163. avctx->sample_rate = s->sample_rate;
  2164. //FIXME maybe move the other codec info stuff from above here too
  2165. }else
  2166. av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed
  2167. s->frame_size = 0;
  2168. return buf_size;
  2169. }
  2170. static void flush(AVCodecContext *avctx){
  2171. MPADecodeContext *s = avctx->priv_data;
  2172. memset(s->synth_buf, 0, sizeof(s->synth_buf));
  2173. s->last_buf_size= 0;
  2174. }
  2175. #ifdef CONFIG_MP3ADU_DECODER
  2176. static int decode_frame_adu(AVCodecContext * avctx,
  2177. void *data, int *data_size,
  2178. const uint8_t * buf, int buf_size)
  2179. {
  2180. MPADecodeContext *s = avctx->priv_data;
  2181. uint32_t header;
  2182. int len, out_size;
  2183. OUT_INT *out_samples = data;
  2184. len = buf_size;
  2185. // Discard too short frames
  2186. if (buf_size < HEADER_SIZE) {
  2187. *data_size = 0;
  2188. return buf_size;
  2189. }
  2190. if (len > MPA_MAX_CODED_FRAME_SIZE)
  2191. len = MPA_MAX_CODED_FRAME_SIZE;
  2192. // Get header and restore sync word
  2193. header = AV_RB32(buf) | 0xffe00000;
  2194. if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
  2195. *data_size = 0;
  2196. return buf_size;
  2197. }
  2198. ff_mpegaudio_decode_header(s, header);
  2199. /* update codec info */
  2200. avctx->sample_rate = s->sample_rate;
  2201. avctx->channels = s->nb_channels;
  2202. avctx->bit_rate = s->bit_rate;
  2203. avctx->sub_id = s->layer;
  2204. s->frame_size = len;
  2205. if (avctx->parse_only) {
  2206. out_size = buf_size;
  2207. } else {
  2208. out_size = mp_decode_frame(s, out_samples, buf, buf_size);
  2209. }
  2210. *data_size = out_size;
  2211. return buf_size;
  2212. }
  2213. #endif /* CONFIG_MP3ADU_DECODER */
  2214. #ifdef CONFIG_MP3ON4_DECODER
  2215. /**
  2216. * Context for MP3On4 decoder
  2217. */
  2218. typedef struct MP3On4DecodeContext {
  2219. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  2220. int syncword; ///< syncword patch
  2221. const uint8_t *coff; ///< channels offsets in output buffer
  2222. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  2223. } MP3On4DecodeContext;
  2224. #include "mpeg4audio.h"
  2225. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  2226. static const uint8_t mp3Frames[8] = {0,1,1,2,3,3,4,5}; /* number of mp3 decoder instances */
  2227. /* offsets into output buffer, assume output order is FL FR BL BR C LFE */
  2228. static const uint8_t chan_offset[8][5] = {
  2229. {0},
  2230. {0}, // C
  2231. {0}, // FLR
  2232. {2,0}, // C FLR
  2233. {2,0,3}, // C FLR BS
  2234. {4,0,2}, // C FLR BLRS
  2235. {4,0,2,5}, // C FLR BLRS LFE
  2236. {4,0,2,6,5}, // C FLR BLRS BLR LFE
  2237. };
  2238. static int decode_init_mp3on4(AVCodecContext * avctx)
  2239. {
  2240. MP3On4DecodeContext *s = avctx->priv_data;
  2241. MPEG4AudioConfig cfg;
  2242. int i;
  2243. if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
  2244. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  2245. return -1;
  2246. }
  2247. ff_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size);
  2248. if (!cfg.chan_config || cfg.chan_config > 7) {
  2249. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  2250. return -1;
  2251. }
  2252. s->frames = mp3Frames[cfg.chan_config];
  2253. s->coff = chan_offset[cfg.chan_config];
  2254. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  2255. if (cfg.sample_rate < 16000)
  2256. s->syncword = 0xffe00000;
  2257. else
  2258. s->syncword = 0xfff00000;
  2259. /* Init the first mp3 decoder in standard way, so that all tables get builded
  2260. * We replace avctx->priv_data with the context of the first decoder so that
  2261. * decode_init() does not have to be changed.
  2262. * Other decoders will be initialized here copying data from the first context
  2263. */
  2264. // Allocate zeroed memory for the first decoder context
  2265. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  2266. // Put decoder context in place to make init_decode() happy
  2267. avctx->priv_data = s->mp3decctx[0];
  2268. decode_init(avctx);
  2269. // Restore mp3on4 context pointer
  2270. avctx->priv_data = s;
  2271. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  2272. /* Create a separate codec/context for each frame (first is already ok).
  2273. * Each frame is 1 or 2 channels - up to 5 frames allowed
  2274. */
  2275. for (i = 1; i < s->frames; i++) {
  2276. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  2277. s->mp3decctx[i]->compute_antialias = s->mp3decctx[0]->compute_antialias;
  2278. s->mp3decctx[i]->adu_mode = 1;
  2279. s->mp3decctx[i]->avctx = avctx;
  2280. }
  2281. return 0;
  2282. }
  2283. static int decode_close_mp3on4(AVCodecContext * avctx)
  2284. {
  2285. MP3On4DecodeContext *s = avctx->priv_data;
  2286. int i;
  2287. for (i = 0; i < s->frames; i++)
  2288. if (s->mp3decctx[i])
  2289. av_free(s->mp3decctx[i]);
  2290. return 0;
  2291. }
  2292. static int decode_frame_mp3on4(AVCodecContext * avctx,
  2293. void *data, int *data_size,
  2294. const uint8_t * buf, int buf_size)
  2295. {
  2296. MP3On4DecodeContext *s = avctx->priv_data;
  2297. MPADecodeContext *m;
  2298. int fsize, len = buf_size, out_size = 0;
  2299. uint32_t header;
  2300. OUT_INT *out_samples = data;
  2301. OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS];
  2302. OUT_INT *outptr, *bp;
  2303. int fr, j, n;
  2304. *data_size = 0;
  2305. // Discard too short frames
  2306. if (buf_size < HEADER_SIZE)
  2307. return -1;
  2308. // If only one decoder interleave is not needed
  2309. outptr = s->frames == 1 ? out_samples : decoded_buf;
  2310. avctx->bit_rate = 0;
  2311. for (fr = 0; fr < s->frames; fr++) {
  2312. fsize = AV_RB16(buf) >> 4;
  2313. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  2314. m = s->mp3decctx[fr];
  2315. assert (m != NULL);
  2316. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  2317. if (ff_mpa_check_header(header) < 0) // Bad header, discard block
  2318. break;
  2319. ff_mpegaudio_decode_header(m, header);
  2320. out_size += mp_decode_frame(m, outptr, buf, fsize);
  2321. buf += fsize;
  2322. len -= fsize;
  2323. if(s->frames > 1) {
  2324. n = m->avctx->frame_size*m->nb_channels;
  2325. /* interleave output data */
  2326. bp = out_samples + s->coff[fr];
  2327. if(m->nb_channels == 1) {
  2328. for(j = 0; j < n; j++) {
  2329. *bp = decoded_buf[j];
  2330. bp += avctx->channels;
  2331. }
  2332. } else {
  2333. for(j = 0; j < n; j++) {
  2334. bp[0] = decoded_buf[j++];
  2335. bp[1] = decoded_buf[j];
  2336. bp += avctx->channels;
  2337. }
  2338. }
  2339. }
  2340. avctx->bit_rate += m->bit_rate;
  2341. }
  2342. /* update codec info */
  2343. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  2344. *data_size = out_size;
  2345. return buf_size;
  2346. }
  2347. #endif /* CONFIG_MP3ON4_DECODER */
  2348. #ifdef CONFIG_MP2_DECODER
  2349. AVCodec mp2_decoder =
  2350. {
  2351. "mp2",
  2352. CODEC_TYPE_AUDIO,
  2353. CODEC_ID_MP2,
  2354. sizeof(MPADecodeContext),
  2355. decode_init,
  2356. NULL,
  2357. NULL,
  2358. decode_frame,
  2359. CODEC_CAP_PARSE_ONLY,
  2360. .flush= flush,
  2361. .long_name= NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
  2362. };
  2363. #endif
  2364. #ifdef CONFIG_MP3_DECODER
  2365. AVCodec mp3_decoder =
  2366. {
  2367. "mp3",
  2368. CODEC_TYPE_AUDIO,
  2369. CODEC_ID_MP3,
  2370. sizeof(MPADecodeContext),
  2371. decode_init,
  2372. NULL,
  2373. NULL,
  2374. decode_frame,
  2375. CODEC_CAP_PARSE_ONLY,
  2376. .flush= flush,
  2377. .long_name= NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
  2378. };
  2379. #endif
  2380. #ifdef CONFIG_MP3ADU_DECODER
  2381. AVCodec mp3adu_decoder =
  2382. {
  2383. "mp3adu",
  2384. CODEC_TYPE_AUDIO,
  2385. CODEC_ID_MP3ADU,
  2386. sizeof(MPADecodeContext),
  2387. decode_init,
  2388. NULL,
  2389. NULL,
  2390. decode_frame_adu,
  2391. CODEC_CAP_PARSE_ONLY,
  2392. .flush= flush,
  2393. .long_name= NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
  2394. };
  2395. #endif
  2396. #ifdef CONFIG_MP3ON4_DECODER
  2397. AVCodec mp3on4_decoder =
  2398. {
  2399. "mp3on4",
  2400. CODEC_TYPE_AUDIO,
  2401. CODEC_ID_MP3ON4,
  2402. sizeof(MP3On4DecodeContext),
  2403. decode_init_mp3on4,
  2404. NULL,
  2405. decode_close_mp3on4,
  2406. decode_frame_mp3on4,
  2407. .flush= flush,
  2408. .long_name= NULL_IF_CONFIG_SMALL("MP3onMP4"),
  2409. };
  2410. #endif