You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1999 lines
65KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder
  24. */
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/float_dsp.h"
  29. #include "libavutil/libm.h"
  30. #include "avcodec.h"
  31. #include "get_bits.h"
  32. #include "internal.h"
  33. #include "mathops.h"
  34. #include "mpegaudiodsp.h"
  35. /*
  36. * TODO:
  37. * - test lsf / mpeg25 extensively.
  38. */
  39. #include "mpegaudio.h"
  40. #include "mpegaudiodecheader.h"
  41. #define BACKSTEP_SIZE 512
  42. #define EXTRABYTES 24
  43. #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
  44. /* layer 3 "granule" */
  45. typedef struct GranuleDef {
  46. uint8_t scfsi;
  47. int part2_3_length;
  48. int big_values;
  49. int global_gain;
  50. int scalefac_compress;
  51. uint8_t block_type;
  52. uint8_t switch_point;
  53. int table_select[3];
  54. int subblock_gain[3];
  55. uint8_t scalefac_scale;
  56. uint8_t count1table_select;
  57. int region_size[3]; /* number of huffman codes in each region */
  58. int preflag;
  59. int short_start, long_end; /* long/short band indexes */
  60. uint8_t scale_factors[40];
  61. DECLARE_ALIGNED(16, INTFLOAT, sb_hybrid)[SBLIMIT * 18]; /* 576 samples */
  62. } GranuleDef;
  63. typedef struct MPADecodeContext {
  64. MPA_DECODE_HEADER
  65. uint8_t last_buf[LAST_BUF_SIZE];
  66. int last_buf_size;
  67. int extrasize;
  68. /* next header (used in free format parsing) */
  69. uint32_t free_format_next_header;
  70. GetBitContext gb;
  71. GetBitContext in_gb;
  72. DECLARE_ALIGNED(32, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2];
  73. int synth_buf_offset[MPA_MAX_CHANNELS];
  74. DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
  75. INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
  76. GranuleDef granules[2][2]; /* Used in Layer 3 */
  77. int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
  78. int dither_state;
  79. int err_recognition;
  80. AVCodecContext* avctx;
  81. MPADSPContext mpadsp;
  82. AVFloatDSPContext *fdsp;
  83. AVFrame *frame;
  84. } MPADecodeContext;
  85. #define HEADER_SIZE 4
  86. #include "mpegaudiodata.h"
  87. #include "mpegaudiodectab.h"
  88. /* vlc structure for decoding layer 3 huffman tables */
  89. static VLC huff_vlc[16];
  90. static VLC_TYPE huff_vlc_tables[
  91. 0 + 128 + 128 + 128 + 130 + 128 + 154 + 166 +
  92. 142 + 204 + 190 + 170 + 542 + 460 + 662 + 414
  93. ][2];
  94. static const int huff_vlc_tables_sizes[16] = {
  95. 0, 128, 128, 128, 130, 128, 154, 166,
  96. 142, 204, 190, 170, 542, 460, 662, 414
  97. };
  98. static VLC huff_quad_vlc[2];
  99. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  100. static const int huff_quad_vlc_tables_sizes[2] = { 128, 16 };
  101. /* computed from band_size_long */
  102. static uint16_t band_index_long[9][23];
  103. #include "mpegaudio_tablegen.h"
  104. /* intensity stereo coef table */
  105. static INTFLOAT is_table[2][16];
  106. static INTFLOAT is_table_lsf[2][2][16];
  107. static INTFLOAT csa_table[8][4];
  108. static int16_t division_tab3[1<<6 ];
  109. static int16_t division_tab5[1<<8 ];
  110. static int16_t division_tab9[1<<11];
  111. static int16_t * const division_tabs[4] = {
  112. division_tab3, division_tab5, NULL, division_tab9
  113. };
  114. /* lower 2 bits: modulo 3, higher bits: shift */
  115. static uint16_t scale_factor_modshift[64];
  116. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  117. static int32_t scale_factor_mult[15][3];
  118. /* mult table for layer 2 group quantization */
  119. #define SCALE_GEN(v) \
  120. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  121. static const int32_t scale_factor_mult2[3][3] = {
  122. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  123. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  124. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  125. };
  126. /**
  127. * Convert region offsets to region sizes and truncate
  128. * size to big_values.
  129. */
  130. static void region_offset2size(GranuleDef *g)
  131. {
  132. int i, k, j = 0;
  133. g->region_size[2] = 576 / 2;
  134. for (i = 0; i < 3; i++) {
  135. k = FFMIN(g->region_size[i], g->big_values);
  136. g->region_size[i] = k - j;
  137. j = k;
  138. }
  139. }
  140. static void init_short_region(MPADecodeContext *s, GranuleDef *g)
  141. {
  142. if (g->block_type == 2) {
  143. if (s->sample_rate_index != 8)
  144. g->region_size[0] = (36 / 2);
  145. else
  146. g->region_size[0] = (72 / 2);
  147. } else {
  148. if (s->sample_rate_index <= 2)
  149. g->region_size[0] = (36 / 2);
  150. else if (s->sample_rate_index != 8)
  151. g->region_size[0] = (54 / 2);
  152. else
  153. g->region_size[0] = (108 / 2);
  154. }
  155. g->region_size[1] = (576 / 2);
  156. }
  157. static void init_long_region(MPADecodeContext *s, GranuleDef *g,
  158. int ra1, int ra2)
  159. {
  160. int l;
  161. g->region_size[0] = band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  162. /* should not overflow */
  163. l = FFMIN(ra1 + ra2 + 2, 22);
  164. g->region_size[1] = band_index_long[s->sample_rate_index][ l] >> 1;
  165. }
  166. static void compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
  167. {
  168. if (g->block_type == 2) {
  169. if (g->switch_point) {
  170. if(s->sample_rate_index == 8)
  171. avpriv_request_sample(s->avctx, "switch point in 8khz");
  172. /* if switched mode, we handle the 36 first samples as
  173. long blocks. For 8000Hz, we handle the 72 first
  174. exponents as long blocks */
  175. if (s->sample_rate_index <= 2)
  176. g->long_end = 8;
  177. else
  178. g->long_end = 6;
  179. g->short_start = 3;
  180. } else {
  181. g->long_end = 0;
  182. g->short_start = 0;
  183. }
  184. } else {
  185. g->short_start = 13;
  186. g->long_end = 22;
  187. }
  188. }
  189. /* layer 1 unscaling */
  190. /* n = number of bits of the mantissa minus 1 */
  191. static inline int l1_unscale(int n, int mant, int scale_factor)
  192. {
  193. int shift, mod;
  194. int64_t val;
  195. shift = scale_factor_modshift[scale_factor];
  196. mod = shift & 3;
  197. shift >>= 2;
  198. val = MUL64((int)(mant + (-1U << n) + 1), scale_factor_mult[n-1][mod]);
  199. shift += n;
  200. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  201. return (int)((val + (1LL << (shift - 1))) >> shift);
  202. }
  203. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  204. {
  205. int shift, mod, val;
  206. shift = scale_factor_modshift[scale_factor];
  207. mod = shift & 3;
  208. shift >>= 2;
  209. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  210. /* NOTE: at this point, 0 <= shift <= 21 */
  211. if (shift > 0)
  212. val = (val + (1 << (shift - 1))) >> shift;
  213. return val;
  214. }
  215. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  216. static inline int l3_unscale(int value, int exponent)
  217. {
  218. unsigned int m;
  219. int e;
  220. e = table_4_3_exp [4 * value + (exponent & 3)];
  221. m = table_4_3_value[4 * value + (exponent & 3)];
  222. e -= exponent >> 2;
  223. #ifdef DEBUG
  224. if(e < 1)
  225. av_log(NULL, AV_LOG_WARNING, "l3_unscale: e is %d\n", e);
  226. #endif
  227. if (e > (SUINT)31)
  228. return 0;
  229. m = (m + (1 << (e - 1))) >> e;
  230. return m;
  231. }
  232. static av_cold void decode_init_static(void)
  233. {
  234. int i, j, k;
  235. int offset;
  236. /* scale factors table for layer 1/2 */
  237. for (i = 0; i < 64; i++) {
  238. int shift, mod;
  239. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  240. shift = i / 3;
  241. mod = i % 3;
  242. scale_factor_modshift[i] = mod | (shift << 2);
  243. }
  244. /* scale factor multiply for layer 1 */
  245. for (i = 0; i < 15; i++) {
  246. int n, norm;
  247. n = i + 2;
  248. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  249. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  250. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  251. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  252. ff_dlog(NULL, "%d: norm=%x s=%x %x %x\n", i, norm,
  253. scale_factor_mult[i][0],
  254. scale_factor_mult[i][1],
  255. scale_factor_mult[i][2]);
  256. }
  257. RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window));
  258. /* huffman decode tables */
  259. offset = 0;
  260. for (i = 1; i < 16; i++) {
  261. const HuffTable *h = &mpa_huff_tables[i];
  262. int xsize, x, y;
  263. uint8_t tmp_bits [512] = { 0 };
  264. uint16_t tmp_codes[512] = { 0 };
  265. xsize = h->xsize;
  266. j = 0;
  267. for (x = 0; x < xsize; x++) {
  268. for (y = 0; y < xsize; y++) {
  269. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  270. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  271. }
  272. }
  273. /* XXX: fail test */
  274. huff_vlc[i].table = huff_vlc_tables+offset;
  275. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  276. init_vlc(&huff_vlc[i], 7, 512,
  277. tmp_bits, 1, 1, tmp_codes, 2, 2,
  278. INIT_VLC_USE_NEW_STATIC);
  279. offset += huff_vlc_tables_sizes[i];
  280. }
  281. av_assert0(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  282. offset = 0;
  283. for (i = 0; i < 2; i++) {
  284. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  285. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  286. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  287. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  288. INIT_VLC_USE_NEW_STATIC);
  289. offset += huff_quad_vlc_tables_sizes[i];
  290. }
  291. av_assert0(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  292. for (i = 0; i < 9; i++) {
  293. k = 0;
  294. for (j = 0; j < 22; j++) {
  295. band_index_long[i][j] = k;
  296. k += band_size_long[i][j];
  297. }
  298. band_index_long[i][22] = k;
  299. }
  300. /* compute n ^ (4/3) and store it in mantissa/exp format */
  301. mpegaudio_tableinit();
  302. for (i = 0; i < 4; i++) {
  303. if (ff_mpa_quant_bits[i] < 0) {
  304. for (j = 0; j < (1 << (-ff_mpa_quant_bits[i]+1)); j++) {
  305. int val1, val2, val3, steps;
  306. int val = j;
  307. steps = ff_mpa_quant_steps[i];
  308. val1 = val % steps;
  309. val /= steps;
  310. val2 = val % steps;
  311. val3 = val / steps;
  312. division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
  313. }
  314. }
  315. }
  316. for (i = 0; i < 7; i++) {
  317. float f;
  318. INTFLOAT v;
  319. if (i != 6) {
  320. f = tan((double)i * M_PI / 12.0);
  321. v = FIXR(f / (1.0 + f));
  322. } else {
  323. v = FIXR(1.0);
  324. }
  325. is_table[0][ i] = v;
  326. is_table[1][6 - i] = v;
  327. }
  328. /* invalid values */
  329. for (i = 7; i < 16; i++)
  330. is_table[0][i] = is_table[1][i] = 0.0;
  331. for (i = 0; i < 16; i++) {
  332. double f;
  333. int e, k;
  334. for (j = 0; j < 2; j++) {
  335. e = -(j + 1) * ((i + 1) >> 1);
  336. f = exp2(e / 4.0);
  337. k = i & 1;
  338. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  339. is_table_lsf[j][k ][i] = FIXR(1.0);
  340. ff_dlog(NULL, "is_table_lsf %d %d: %f %f\n",
  341. i, j, (float) is_table_lsf[j][0][i],
  342. (float) is_table_lsf[j][1][i]);
  343. }
  344. }
  345. for (i = 0; i < 8; i++) {
  346. double ci, cs, ca;
  347. ci = ci_table[i];
  348. cs = 1.0 / sqrt(1.0 + ci * ci);
  349. ca = cs * ci;
  350. #if !USE_FLOATS
  351. csa_table[i][0] = FIXHR(cs/4);
  352. csa_table[i][1] = FIXHR(ca/4);
  353. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  354. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  355. #else
  356. csa_table[i][0] = cs;
  357. csa_table[i][1] = ca;
  358. csa_table[i][2] = ca + cs;
  359. csa_table[i][3] = ca - cs;
  360. #endif
  361. }
  362. }
  363. #if USE_FLOATS
  364. static av_cold int decode_close(AVCodecContext * avctx)
  365. {
  366. MPADecodeContext *s = avctx->priv_data;
  367. av_freep(&s->fdsp);
  368. return 0;
  369. }
  370. #endif
  371. static av_cold int decode_init(AVCodecContext * avctx)
  372. {
  373. static int initialized_tables = 0;
  374. MPADecodeContext *s = avctx->priv_data;
  375. if (!initialized_tables) {
  376. decode_init_static();
  377. initialized_tables = 1;
  378. }
  379. s->avctx = avctx;
  380. #if USE_FLOATS
  381. s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
  382. if (!s->fdsp)
  383. return AVERROR(ENOMEM);
  384. #endif
  385. ff_mpadsp_init(&s->mpadsp);
  386. if (avctx->request_sample_fmt == OUT_FMT &&
  387. avctx->codec_id != AV_CODEC_ID_MP3ON4)
  388. avctx->sample_fmt = OUT_FMT;
  389. else
  390. avctx->sample_fmt = OUT_FMT_P;
  391. s->err_recognition = avctx->err_recognition;
  392. if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
  393. s->adu_mode = 1;
  394. return 0;
  395. }
  396. #define C3 FIXHR(0.86602540378443864676/2)
  397. #define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
  398. #define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
  399. #define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
  400. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  401. cases. */
  402. static void imdct12(INTFLOAT *out, SUINTFLOAT *in)
  403. {
  404. SUINTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  405. in0 = in[0*3];
  406. in1 = in[1*3] + in[0*3];
  407. in2 = in[2*3] + in[1*3];
  408. in3 = in[3*3] + in[2*3];
  409. in4 = in[4*3] + in[3*3];
  410. in5 = in[5*3] + in[4*3];
  411. in5 += in3;
  412. in3 += in1;
  413. in2 = MULH3(in2, C3, 2);
  414. in3 = MULH3(in3, C3, 4);
  415. t1 = in0 - in4;
  416. t2 = MULH3(in1 - in5, C4, 2);
  417. out[ 7] =
  418. out[10] = t1 + t2;
  419. out[ 1] =
  420. out[ 4] = t1 - t2;
  421. in0 += SHR(in4, 1);
  422. in4 = in0 + in2;
  423. in5 += 2*in1;
  424. in1 = MULH3(in5 + in3, C5, 1);
  425. out[ 8] =
  426. out[ 9] = in4 + in1;
  427. out[ 2] =
  428. out[ 3] = in4 - in1;
  429. in0 -= in2;
  430. in5 = MULH3(in5 - in3, C6, 2);
  431. out[ 0] =
  432. out[ 5] = in0 - in5;
  433. out[ 6] =
  434. out[11] = in0 + in5;
  435. }
  436. /* return the number of decoded frames */
  437. static int mp_decode_layer1(MPADecodeContext *s)
  438. {
  439. int bound, i, v, n, ch, j, mant;
  440. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  441. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  442. if (s->mode == MPA_JSTEREO)
  443. bound = (s->mode_ext + 1) * 4;
  444. else
  445. bound = SBLIMIT;
  446. /* allocation bits */
  447. for (i = 0; i < bound; i++) {
  448. for (ch = 0; ch < s->nb_channels; ch++) {
  449. allocation[ch][i] = get_bits(&s->gb, 4);
  450. }
  451. }
  452. for (i = bound; i < SBLIMIT; i++)
  453. allocation[0][i] = get_bits(&s->gb, 4);
  454. /* scale factors */
  455. for (i = 0; i < bound; i++) {
  456. for (ch = 0; ch < s->nb_channels; ch++) {
  457. if (allocation[ch][i])
  458. scale_factors[ch][i] = get_bits(&s->gb, 6);
  459. }
  460. }
  461. for (i = bound; i < SBLIMIT; i++) {
  462. if (allocation[0][i]) {
  463. scale_factors[0][i] = get_bits(&s->gb, 6);
  464. scale_factors[1][i] = get_bits(&s->gb, 6);
  465. }
  466. }
  467. /* compute samples */
  468. for (j = 0; j < 12; j++) {
  469. for (i = 0; i < bound; i++) {
  470. for (ch = 0; ch < s->nb_channels; ch++) {
  471. n = allocation[ch][i];
  472. if (n) {
  473. mant = get_bits(&s->gb, n + 1);
  474. v = l1_unscale(n, mant, scale_factors[ch][i]);
  475. } else {
  476. v = 0;
  477. }
  478. s->sb_samples[ch][j][i] = v;
  479. }
  480. }
  481. for (i = bound; i < SBLIMIT; i++) {
  482. n = allocation[0][i];
  483. if (n) {
  484. mant = get_bits(&s->gb, n + 1);
  485. v = l1_unscale(n, mant, scale_factors[0][i]);
  486. s->sb_samples[0][j][i] = v;
  487. v = l1_unscale(n, mant, scale_factors[1][i]);
  488. s->sb_samples[1][j][i] = v;
  489. } else {
  490. s->sb_samples[0][j][i] = 0;
  491. s->sb_samples[1][j][i] = 0;
  492. }
  493. }
  494. }
  495. return 12;
  496. }
  497. static int mp_decode_layer2(MPADecodeContext *s)
  498. {
  499. int sblimit; /* number of used subbands */
  500. const unsigned char *alloc_table;
  501. int table, bit_alloc_bits, i, j, ch, bound, v;
  502. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  503. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  504. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  505. int scale, qindex, bits, steps, k, l, m, b;
  506. /* select decoding table */
  507. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  508. s->sample_rate, s->lsf);
  509. sblimit = ff_mpa_sblimit_table[table];
  510. alloc_table = ff_mpa_alloc_tables[table];
  511. if (s->mode == MPA_JSTEREO)
  512. bound = (s->mode_ext + 1) * 4;
  513. else
  514. bound = sblimit;
  515. ff_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  516. /* sanity check */
  517. if (bound > sblimit)
  518. bound = sblimit;
  519. /* parse bit allocation */
  520. j = 0;
  521. for (i = 0; i < bound; i++) {
  522. bit_alloc_bits = alloc_table[j];
  523. for (ch = 0; ch < s->nb_channels; ch++)
  524. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  525. j += 1 << bit_alloc_bits;
  526. }
  527. for (i = bound; i < sblimit; i++) {
  528. bit_alloc_bits = alloc_table[j];
  529. v = get_bits(&s->gb, bit_alloc_bits);
  530. bit_alloc[0][i] = v;
  531. bit_alloc[1][i] = v;
  532. j += 1 << bit_alloc_bits;
  533. }
  534. /* scale codes */
  535. for (i = 0; i < sblimit; i++) {
  536. for (ch = 0; ch < s->nb_channels; ch++) {
  537. if (bit_alloc[ch][i])
  538. scale_code[ch][i] = get_bits(&s->gb, 2);
  539. }
  540. }
  541. /* scale factors */
  542. for (i = 0; i < sblimit; i++) {
  543. for (ch = 0; ch < s->nb_channels; ch++) {
  544. if (bit_alloc[ch][i]) {
  545. sf = scale_factors[ch][i];
  546. switch (scale_code[ch][i]) {
  547. default:
  548. case 0:
  549. sf[0] = get_bits(&s->gb, 6);
  550. sf[1] = get_bits(&s->gb, 6);
  551. sf[2] = get_bits(&s->gb, 6);
  552. break;
  553. case 2:
  554. sf[0] = get_bits(&s->gb, 6);
  555. sf[1] = sf[0];
  556. sf[2] = sf[0];
  557. break;
  558. case 1:
  559. sf[0] = get_bits(&s->gb, 6);
  560. sf[2] = get_bits(&s->gb, 6);
  561. sf[1] = sf[0];
  562. break;
  563. case 3:
  564. sf[0] = get_bits(&s->gb, 6);
  565. sf[2] = get_bits(&s->gb, 6);
  566. sf[1] = sf[2];
  567. break;
  568. }
  569. }
  570. }
  571. }
  572. /* samples */
  573. for (k = 0; k < 3; k++) {
  574. for (l = 0; l < 12; l += 3) {
  575. j = 0;
  576. for (i = 0; i < bound; i++) {
  577. bit_alloc_bits = alloc_table[j];
  578. for (ch = 0; ch < s->nb_channels; ch++) {
  579. b = bit_alloc[ch][i];
  580. if (b) {
  581. scale = scale_factors[ch][i][k];
  582. qindex = alloc_table[j+b];
  583. bits = ff_mpa_quant_bits[qindex];
  584. if (bits < 0) {
  585. int v2;
  586. /* 3 values at the same time */
  587. v = get_bits(&s->gb, -bits);
  588. v2 = division_tabs[qindex][v];
  589. steps = ff_mpa_quant_steps[qindex];
  590. s->sb_samples[ch][k * 12 + l + 0][i] =
  591. l2_unscale_group(steps, v2 & 15, scale);
  592. s->sb_samples[ch][k * 12 + l + 1][i] =
  593. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  594. s->sb_samples[ch][k * 12 + l + 2][i] =
  595. l2_unscale_group(steps, v2 >> 8 , scale);
  596. } else {
  597. for (m = 0; m < 3; m++) {
  598. v = get_bits(&s->gb, bits);
  599. v = l1_unscale(bits - 1, v, scale);
  600. s->sb_samples[ch][k * 12 + l + m][i] = v;
  601. }
  602. }
  603. } else {
  604. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  605. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  606. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  607. }
  608. }
  609. /* next subband in alloc table */
  610. j += 1 << bit_alloc_bits;
  611. }
  612. /* XXX: find a way to avoid this duplication of code */
  613. for (i = bound; i < sblimit; i++) {
  614. bit_alloc_bits = alloc_table[j];
  615. b = bit_alloc[0][i];
  616. if (b) {
  617. int mant, scale0, scale1;
  618. scale0 = scale_factors[0][i][k];
  619. scale1 = scale_factors[1][i][k];
  620. qindex = alloc_table[j+b];
  621. bits = ff_mpa_quant_bits[qindex];
  622. if (bits < 0) {
  623. /* 3 values at the same time */
  624. v = get_bits(&s->gb, -bits);
  625. steps = ff_mpa_quant_steps[qindex];
  626. mant = v % steps;
  627. v = v / steps;
  628. s->sb_samples[0][k * 12 + l + 0][i] =
  629. l2_unscale_group(steps, mant, scale0);
  630. s->sb_samples[1][k * 12 + l + 0][i] =
  631. l2_unscale_group(steps, mant, scale1);
  632. mant = v % steps;
  633. v = v / steps;
  634. s->sb_samples[0][k * 12 + l + 1][i] =
  635. l2_unscale_group(steps, mant, scale0);
  636. s->sb_samples[1][k * 12 + l + 1][i] =
  637. l2_unscale_group(steps, mant, scale1);
  638. s->sb_samples[0][k * 12 + l + 2][i] =
  639. l2_unscale_group(steps, v, scale0);
  640. s->sb_samples[1][k * 12 + l + 2][i] =
  641. l2_unscale_group(steps, v, scale1);
  642. } else {
  643. for (m = 0; m < 3; m++) {
  644. mant = get_bits(&s->gb, bits);
  645. s->sb_samples[0][k * 12 + l + m][i] =
  646. l1_unscale(bits - 1, mant, scale0);
  647. s->sb_samples[1][k * 12 + l + m][i] =
  648. l1_unscale(bits - 1, mant, scale1);
  649. }
  650. }
  651. } else {
  652. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  653. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  654. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  655. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  656. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  657. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  658. }
  659. /* next subband in alloc table */
  660. j += 1 << bit_alloc_bits;
  661. }
  662. /* fill remaining samples to zero */
  663. for (i = sblimit; i < SBLIMIT; i++) {
  664. for (ch = 0; ch < s->nb_channels; ch++) {
  665. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  666. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  667. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  668. }
  669. }
  670. }
  671. }
  672. return 3 * 12;
  673. }
  674. #define SPLIT(dst,sf,n) \
  675. if (n == 3) { \
  676. int m = (sf * 171) >> 9; \
  677. dst = sf - 3 * m; \
  678. sf = m; \
  679. } else if (n == 4) { \
  680. dst = sf & 3; \
  681. sf >>= 2; \
  682. } else if (n == 5) { \
  683. int m = (sf * 205) >> 10; \
  684. dst = sf - 5 * m; \
  685. sf = m; \
  686. } else if (n == 6) { \
  687. int m = (sf * 171) >> 10; \
  688. dst = sf - 6 * m; \
  689. sf = m; \
  690. } else { \
  691. dst = 0; \
  692. }
  693. static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
  694. int n3)
  695. {
  696. SPLIT(slen[3], sf, n3)
  697. SPLIT(slen[2], sf, n2)
  698. SPLIT(slen[1], sf, n1)
  699. slen[0] = sf;
  700. }
  701. static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
  702. int16_t *exponents)
  703. {
  704. const uint8_t *bstab, *pretab;
  705. int len, i, j, k, l, v0, shift, gain, gains[3];
  706. int16_t *exp_ptr;
  707. exp_ptr = exponents;
  708. gain = g->global_gain - 210;
  709. shift = g->scalefac_scale + 1;
  710. bstab = band_size_long[s->sample_rate_index];
  711. pretab = mpa_pretab[g->preflag];
  712. for (i = 0; i < g->long_end; i++) {
  713. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  714. len = bstab[i];
  715. for (j = len; j > 0; j--)
  716. *exp_ptr++ = v0;
  717. }
  718. if (g->short_start < 13) {
  719. bstab = band_size_short[s->sample_rate_index];
  720. gains[0] = gain - (g->subblock_gain[0] << 3);
  721. gains[1] = gain - (g->subblock_gain[1] << 3);
  722. gains[2] = gain - (g->subblock_gain[2] << 3);
  723. k = g->long_end;
  724. for (i = g->short_start; i < 13; i++) {
  725. len = bstab[i];
  726. for (l = 0; l < 3; l++) {
  727. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  728. for (j = len; j > 0; j--)
  729. *exp_ptr++ = v0;
  730. }
  731. }
  732. }
  733. }
  734. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
  735. int *end_pos2)
  736. {
  737. if (s->in_gb.buffer && *pos >= s->gb.size_in_bits - s->extrasize * 8) {
  738. s->gb = s->in_gb;
  739. s->in_gb.buffer = NULL;
  740. s->extrasize = 0;
  741. av_assert2((get_bits_count(&s->gb) & 7) == 0);
  742. skip_bits_long(&s->gb, *pos - *end_pos);
  743. *end_pos2 =
  744. *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
  745. *pos = get_bits_count(&s->gb);
  746. }
  747. }
  748. /* Following is an optimized code for
  749. INTFLOAT v = *src
  750. if(get_bits1(&s->gb))
  751. v = -v;
  752. *dst = v;
  753. */
  754. #if USE_FLOATS
  755. #define READ_FLIP_SIGN(dst,src) \
  756. v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
  757. AV_WN32A(dst, v);
  758. #else
  759. #define READ_FLIP_SIGN(dst,src) \
  760. v = -get_bits1(&s->gb); \
  761. *(dst) = (*(src) ^ v) - v;
  762. #endif
  763. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  764. int16_t *exponents, int end_pos2)
  765. {
  766. int s_index;
  767. int i;
  768. int last_pos, bits_left;
  769. VLC *vlc;
  770. int end_pos = FFMIN(end_pos2, s->gb.size_in_bits - s->extrasize * 8);
  771. /* low frequencies (called big values) */
  772. s_index = 0;
  773. for (i = 0; i < 3; i++) {
  774. int j, k, l, linbits;
  775. j = g->region_size[i];
  776. if (j == 0)
  777. continue;
  778. /* select vlc table */
  779. k = g->table_select[i];
  780. l = mpa_huff_data[k][0];
  781. linbits = mpa_huff_data[k][1];
  782. vlc = &huff_vlc[l];
  783. if (!l) {
  784. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
  785. s_index += 2 * j;
  786. continue;
  787. }
  788. /* read huffcode and compute each couple */
  789. for (; j > 0; j--) {
  790. int exponent, x, y;
  791. int v;
  792. int pos = get_bits_count(&s->gb);
  793. if (pos >= end_pos){
  794. switch_buffer(s, &pos, &end_pos, &end_pos2);
  795. if (pos >= end_pos)
  796. break;
  797. }
  798. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  799. if (!y) {
  800. g->sb_hybrid[s_index ] =
  801. g->sb_hybrid[s_index+1] = 0;
  802. s_index += 2;
  803. continue;
  804. }
  805. exponent= exponents[s_index];
  806. ff_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  807. i, g->region_size[i] - j, x, y, exponent);
  808. if (y & 16) {
  809. x = y >> 5;
  810. y = y & 0x0f;
  811. if (x < 15) {
  812. READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
  813. } else {
  814. x += get_bitsz(&s->gb, linbits);
  815. v = l3_unscale(x, exponent);
  816. if (get_bits1(&s->gb))
  817. v = -v;
  818. g->sb_hybrid[s_index] = v;
  819. }
  820. if (y < 15) {
  821. READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
  822. } else {
  823. y += get_bitsz(&s->gb, linbits);
  824. v = l3_unscale(y, exponent);
  825. if (get_bits1(&s->gb))
  826. v = -v;
  827. g->sb_hybrid[s_index+1] = v;
  828. }
  829. } else {
  830. x = y >> 5;
  831. y = y & 0x0f;
  832. x += y;
  833. if (x < 15) {
  834. READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
  835. } else {
  836. x += get_bitsz(&s->gb, linbits);
  837. v = l3_unscale(x, exponent);
  838. if (get_bits1(&s->gb))
  839. v = -v;
  840. g->sb_hybrid[s_index+!!y] = v;
  841. }
  842. g->sb_hybrid[s_index + !y] = 0;
  843. }
  844. s_index += 2;
  845. }
  846. }
  847. /* high frequencies */
  848. vlc = &huff_quad_vlc[g->count1table_select];
  849. last_pos = 0;
  850. while (s_index <= 572) {
  851. int pos, code;
  852. pos = get_bits_count(&s->gb);
  853. if (pos >= end_pos) {
  854. if (pos > end_pos2 && last_pos) {
  855. /* some encoders generate an incorrect size for this
  856. part. We must go back into the data */
  857. s_index -= 4;
  858. skip_bits_long(&s->gb, last_pos - pos);
  859. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  860. if(s->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))
  861. s_index=0;
  862. break;
  863. }
  864. switch_buffer(s, &pos, &end_pos, &end_pos2);
  865. if (pos >= end_pos)
  866. break;
  867. }
  868. last_pos = pos;
  869. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  870. ff_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  871. g->sb_hybrid[s_index+0] =
  872. g->sb_hybrid[s_index+1] =
  873. g->sb_hybrid[s_index+2] =
  874. g->sb_hybrid[s_index+3] = 0;
  875. while (code) {
  876. static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
  877. int v;
  878. int pos = s_index + idxtab[code];
  879. code ^= 8 >> idxtab[code];
  880. READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
  881. }
  882. s_index += 4;
  883. }
  884. /* skip extension bits */
  885. bits_left = end_pos2 - get_bits_count(&s->gb);
  886. if (bits_left < 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_COMPLIANT))) {
  887. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  888. s_index=0;
  889. } else if (bits_left > 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) {
  890. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  891. s_index = 0;
  892. }
  893. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
  894. skip_bits_long(&s->gb, bits_left);
  895. i = get_bits_count(&s->gb);
  896. switch_buffer(s, &i, &end_pos, &end_pos2);
  897. return 0;
  898. }
  899. /* Reorder short blocks from bitstream order to interleaved order. It
  900. would be faster to do it in parsing, but the code would be far more
  901. complicated */
  902. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  903. {
  904. int i, j, len;
  905. INTFLOAT *ptr, *dst, *ptr1;
  906. INTFLOAT tmp[576];
  907. if (g->block_type != 2)
  908. return;
  909. if (g->switch_point) {
  910. if (s->sample_rate_index != 8)
  911. ptr = g->sb_hybrid + 36;
  912. else
  913. ptr = g->sb_hybrid + 72;
  914. } else {
  915. ptr = g->sb_hybrid;
  916. }
  917. for (i = g->short_start; i < 13; i++) {
  918. len = band_size_short[s->sample_rate_index][i];
  919. ptr1 = ptr;
  920. dst = tmp;
  921. for (j = len; j > 0; j--) {
  922. *dst++ = ptr[0*len];
  923. *dst++ = ptr[1*len];
  924. *dst++ = ptr[2*len];
  925. ptr++;
  926. }
  927. ptr += 2 * len;
  928. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  929. }
  930. }
  931. #define ISQRT2 FIXR(0.70710678118654752440)
  932. static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
  933. {
  934. int i, j, k, l;
  935. int sf_max, sf, len, non_zero_found;
  936. INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2;
  937. int non_zero_found_short[3];
  938. /* intensity stereo */
  939. if (s->mode_ext & MODE_EXT_I_STEREO) {
  940. if (!s->lsf) {
  941. is_tab = is_table;
  942. sf_max = 7;
  943. } else {
  944. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  945. sf_max = 16;
  946. }
  947. tab0 = g0->sb_hybrid + 576;
  948. tab1 = g1->sb_hybrid + 576;
  949. non_zero_found_short[0] = 0;
  950. non_zero_found_short[1] = 0;
  951. non_zero_found_short[2] = 0;
  952. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  953. for (i = 12; i >= g1->short_start; i--) {
  954. /* for last band, use previous scale factor */
  955. if (i != 11)
  956. k -= 3;
  957. len = band_size_short[s->sample_rate_index][i];
  958. for (l = 2; l >= 0; l--) {
  959. tab0 -= len;
  960. tab1 -= len;
  961. if (!non_zero_found_short[l]) {
  962. /* test if non zero band. if so, stop doing i-stereo */
  963. for (j = 0; j < len; j++) {
  964. if (tab1[j] != 0) {
  965. non_zero_found_short[l] = 1;
  966. goto found1;
  967. }
  968. }
  969. sf = g1->scale_factors[k + l];
  970. if (sf >= sf_max)
  971. goto found1;
  972. v1 = is_tab[0][sf];
  973. v2 = is_tab[1][sf];
  974. for (j = 0; j < len; j++) {
  975. tmp0 = tab0[j];
  976. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  977. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  978. }
  979. } else {
  980. found1:
  981. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  982. /* lower part of the spectrum : do ms stereo
  983. if enabled */
  984. for (j = 0; j < len; j++) {
  985. tmp0 = tab0[j];
  986. tmp1 = tab1[j];
  987. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  988. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  989. }
  990. }
  991. }
  992. }
  993. }
  994. non_zero_found = non_zero_found_short[0] |
  995. non_zero_found_short[1] |
  996. non_zero_found_short[2];
  997. for (i = g1->long_end - 1;i >= 0;i--) {
  998. len = band_size_long[s->sample_rate_index][i];
  999. tab0 -= len;
  1000. tab1 -= len;
  1001. /* test if non zero band. if so, stop doing i-stereo */
  1002. if (!non_zero_found) {
  1003. for (j = 0; j < len; j++) {
  1004. if (tab1[j] != 0) {
  1005. non_zero_found = 1;
  1006. goto found2;
  1007. }
  1008. }
  1009. /* for last band, use previous scale factor */
  1010. k = (i == 21) ? 20 : i;
  1011. sf = g1->scale_factors[k];
  1012. if (sf >= sf_max)
  1013. goto found2;
  1014. v1 = is_tab[0][sf];
  1015. v2 = is_tab[1][sf];
  1016. for (j = 0; j < len; j++) {
  1017. tmp0 = tab0[j];
  1018. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1019. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1020. }
  1021. } else {
  1022. found2:
  1023. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1024. /* lower part of the spectrum : do ms stereo
  1025. if enabled */
  1026. for (j = 0; j < len; j++) {
  1027. tmp0 = tab0[j];
  1028. tmp1 = tab1[j];
  1029. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1030. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1031. }
  1032. }
  1033. }
  1034. }
  1035. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1036. /* ms stereo ONLY */
  1037. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1038. global gain */
  1039. #if USE_FLOATS
  1040. s->fdsp->butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
  1041. #else
  1042. tab0 = g0->sb_hybrid;
  1043. tab1 = g1->sb_hybrid;
  1044. for (i = 0; i < 576; i++) {
  1045. tmp0 = tab0[i];
  1046. tmp1 = tab1[i];
  1047. tab0[i] = tmp0 + tmp1;
  1048. tab1[i] = tmp0 - tmp1;
  1049. }
  1050. #endif
  1051. }
  1052. }
  1053. #if USE_FLOATS
  1054. #if HAVE_MIPSFPU
  1055. # include "mips/compute_antialias_float.h"
  1056. #endif /* HAVE_MIPSFPU */
  1057. #else
  1058. #if HAVE_MIPSDSP
  1059. # include "mips/compute_antialias_fixed.h"
  1060. #endif /* HAVE_MIPSDSP */
  1061. #endif /* USE_FLOATS */
  1062. #ifndef compute_antialias
  1063. #if USE_FLOATS
  1064. #define AA(j) do { \
  1065. float tmp0 = ptr[-1-j]; \
  1066. float tmp1 = ptr[ j]; \
  1067. ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
  1068. ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
  1069. } while (0)
  1070. #else
  1071. #define AA(j) do { \
  1072. SUINT tmp0 = ptr[-1-j]; \
  1073. SUINT tmp1 = ptr[ j]; \
  1074. SUINT tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
  1075. ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
  1076. ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
  1077. } while (0)
  1078. #endif
  1079. static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
  1080. {
  1081. INTFLOAT *ptr;
  1082. int n, i;
  1083. /* we antialias only "long" bands */
  1084. if (g->block_type == 2) {
  1085. if (!g->switch_point)
  1086. return;
  1087. /* XXX: check this for 8000Hz case */
  1088. n = 1;
  1089. } else {
  1090. n = SBLIMIT - 1;
  1091. }
  1092. ptr = g->sb_hybrid + 18;
  1093. for (i = n; i > 0; i--) {
  1094. AA(0);
  1095. AA(1);
  1096. AA(2);
  1097. AA(3);
  1098. AA(4);
  1099. AA(5);
  1100. AA(6);
  1101. AA(7);
  1102. ptr += 18;
  1103. }
  1104. }
  1105. #endif /* compute_antialias */
  1106. static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
  1107. INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
  1108. {
  1109. INTFLOAT *win, *out_ptr, *ptr, *buf, *ptr1;
  1110. INTFLOAT out2[12];
  1111. int i, j, mdct_long_end, sblimit;
  1112. /* find last non zero block */
  1113. ptr = g->sb_hybrid + 576;
  1114. ptr1 = g->sb_hybrid + 2 * 18;
  1115. while (ptr >= ptr1) {
  1116. int32_t *p;
  1117. ptr -= 6;
  1118. p = (int32_t*)ptr;
  1119. if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1120. break;
  1121. }
  1122. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1123. if (g->block_type == 2) {
  1124. /* XXX: check for 8000 Hz */
  1125. if (g->switch_point)
  1126. mdct_long_end = 2;
  1127. else
  1128. mdct_long_end = 0;
  1129. } else {
  1130. mdct_long_end = sblimit;
  1131. }
  1132. s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf, g->sb_hybrid,
  1133. mdct_long_end, g->switch_point,
  1134. g->block_type);
  1135. buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
  1136. ptr = g->sb_hybrid + 18 * mdct_long_end;
  1137. for (j = mdct_long_end; j < sblimit; j++) {
  1138. /* select frequency inversion */
  1139. win = RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
  1140. out_ptr = sb_samples + j;
  1141. for (i = 0; i < 6; i++) {
  1142. *out_ptr = buf[4*i];
  1143. out_ptr += SBLIMIT;
  1144. }
  1145. imdct12(out2, ptr + 0);
  1146. for (i = 0; i < 6; i++) {
  1147. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*1)];
  1148. buf[4*(i + 6*2)] = MULH3(out2[i + 6], win[i + 6], 1);
  1149. out_ptr += SBLIMIT;
  1150. }
  1151. imdct12(out2, ptr + 1);
  1152. for (i = 0; i < 6; i++) {
  1153. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*2)];
  1154. buf[4*(i + 6*0)] = MULH3(out2[i + 6], win[i + 6], 1);
  1155. out_ptr += SBLIMIT;
  1156. }
  1157. imdct12(out2, ptr + 2);
  1158. for (i = 0; i < 6; i++) {
  1159. buf[4*(i + 6*0)] = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*0)];
  1160. buf[4*(i + 6*1)] = MULH3(out2[i + 6], win[i + 6], 1);
  1161. buf[4*(i + 6*2)] = 0;
  1162. }
  1163. ptr += 18;
  1164. buf += (j&3) != 3 ? 1 : (4*18-3);
  1165. }
  1166. /* zero bands */
  1167. for (j = sblimit; j < SBLIMIT; j++) {
  1168. /* overlap */
  1169. out_ptr = sb_samples + j;
  1170. for (i = 0; i < 18; i++) {
  1171. *out_ptr = buf[4*i];
  1172. buf[4*i] = 0;
  1173. out_ptr += SBLIMIT;
  1174. }
  1175. buf += (j&3) != 3 ? 1 : (4*18-3);
  1176. }
  1177. }
  1178. /* main layer3 decoding function */
  1179. static int mp_decode_layer3(MPADecodeContext *s)
  1180. {
  1181. int nb_granules, main_data_begin;
  1182. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1183. GranuleDef *g;
  1184. int16_t exponents[576]; //FIXME try INTFLOAT
  1185. /* read side info */
  1186. if (s->lsf) {
  1187. main_data_begin = get_bits(&s->gb, 8);
  1188. skip_bits(&s->gb, s->nb_channels);
  1189. nb_granules = 1;
  1190. } else {
  1191. main_data_begin = get_bits(&s->gb, 9);
  1192. if (s->nb_channels == 2)
  1193. skip_bits(&s->gb, 3);
  1194. else
  1195. skip_bits(&s->gb, 5);
  1196. nb_granules = 2;
  1197. for (ch = 0; ch < s->nb_channels; ch++) {
  1198. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1199. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1200. }
  1201. }
  1202. for (gr = 0; gr < nb_granules; gr++) {
  1203. for (ch = 0; ch < s->nb_channels; ch++) {
  1204. ff_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1205. g = &s->granules[ch][gr];
  1206. g->part2_3_length = get_bits(&s->gb, 12);
  1207. g->big_values = get_bits(&s->gb, 9);
  1208. if (g->big_values > 288) {
  1209. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1210. return AVERROR_INVALIDDATA;
  1211. }
  1212. g->global_gain = get_bits(&s->gb, 8);
  1213. /* if MS stereo only is selected, we precompute the
  1214. 1/sqrt(2) renormalization factor */
  1215. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1216. MODE_EXT_MS_STEREO)
  1217. g->global_gain -= 2;
  1218. if (s->lsf)
  1219. g->scalefac_compress = get_bits(&s->gb, 9);
  1220. else
  1221. g->scalefac_compress = get_bits(&s->gb, 4);
  1222. blocksplit_flag = get_bits1(&s->gb);
  1223. if (blocksplit_flag) {
  1224. g->block_type = get_bits(&s->gb, 2);
  1225. if (g->block_type == 0) {
  1226. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1227. return AVERROR_INVALIDDATA;
  1228. }
  1229. g->switch_point = get_bits1(&s->gb);
  1230. for (i = 0; i < 2; i++)
  1231. g->table_select[i] = get_bits(&s->gb, 5);
  1232. for (i = 0; i < 3; i++)
  1233. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1234. init_short_region(s, g);
  1235. } else {
  1236. int region_address1, region_address2;
  1237. g->block_type = 0;
  1238. g->switch_point = 0;
  1239. for (i = 0; i < 3; i++)
  1240. g->table_select[i] = get_bits(&s->gb, 5);
  1241. /* compute huffman coded region sizes */
  1242. region_address1 = get_bits(&s->gb, 4);
  1243. region_address2 = get_bits(&s->gb, 3);
  1244. ff_dlog(s->avctx, "region1=%d region2=%d\n",
  1245. region_address1, region_address2);
  1246. init_long_region(s, g, region_address1, region_address2);
  1247. }
  1248. region_offset2size(g);
  1249. compute_band_indexes(s, g);
  1250. g->preflag = 0;
  1251. if (!s->lsf)
  1252. g->preflag = get_bits1(&s->gb);
  1253. g->scalefac_scale = get_bits1(&s->gb);
  1254. g->count1table_select = get_bits1(&s->gb);
  1255. ff_dlog(s->avctx, "block_type=%d switch_point=%d\n",
  1256. g->block_type, g->switch_point);
  1257. }
  1258. }
  1259. if (!s->adu_mode) {
  1260. int skip;
  1261. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1262. s->extrasize = av_clip((get_bits_left(&s->gb) >> 3) - s->extrasize, 0,
  1263. FFMAX(0, LAST_BUF_SIZE - s->last_buf_size));
  1264. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1265. /* now we get bits from the main_data_begin offset */
  1266. ff_dlog(s->avctx, "seekback:%d, lastbuf:%d\n",
  1267. main_data_begin, s->last_buf_size);
  1268. memcpy(s->last_buf + s->last_buf_size, ptr, s->extrasize);
  1269. s->in_gb = s->gb;
  1270. init_get_bits(&s->gb, s->last_buf, (s->last_buf_size + s->extrasize) * 8);
  1271. s->last_buf_size <<= 3;
  1272. for (gr = 0; gr < nb_granules && (s->last_buf_size >> 3) < main_data_begin; gr++) {
  1273. for (ch = 0; ch < s->nb_channels; ch++) {
  1274. g = &s->granules[ch][gr];
  1275. s->last_buf_size += g->part2_3_length;
  1276. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1277. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1278. }
  1279. }
  1280. skip = s->last_buf_size - 8 * main_data_begin;
  1281. if (skip >= s->gb.size_in_bits - s->extrasize * 8 && s->in_gb.buffer) {
  1282. skip_bits_long(&s->in_gb, skip - s->gb.size_in_bits + s->extrasize * 8);
  1283. s->gb = s->in_gb;
  1284. s->in_gb.buffer = NULL;
  1285. s->extrasize = 0;
  1286. } else {
  1287. skip_bits_long(&s->gb, skip);
  1288. }
  1289. } else {
  1290. gr = 0;
  1291. s->extrasize = 0;
  1292. }
  1293. for (; gr < nb_granules; gr++) {
  1294. for (ch = 0; ch < s->nb_channels; ch++) {
  1295. g = &s->granules[ch][gr];
  1296. bits_pos = get_bits_count(&s->gb);
  1297. if (!s->lsf) {
  1298. uint8_t *sc;
  1299. int slen, slen1, slen2;
  1300. /* MPEG-1 scale factors */
  1301. slen1 = slen_table[0][g->scalefac_compress];
  1302. slen2 = slen_table[1][g->scalefac_compress];
  1303. ff_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1304. if (g->block_type == 2) {
  1305. n = g->switch_point ? 17 : 18;
  1306. j = 0;
  1307. if (slen1) {
  1308. for (i = 0; i < n; i++)
  1309. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1310. } else {
  1311. for (i = 0; i < n; i++)
  1312. g->scale_factors[j++] = 0;
  1313. }
  1314. if (slen2) {
  1315. for (i = 0; i < 18; i++)
  1316. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1317. for (i = 0; i < 3; i++)
  1318. g->scale_factors[j++] = 0;
  1319. } else {
  1320. for (i = 0; i < 21; i++)
  1321. g->scale_factors[j++] = 0;
  1322. }
  1323. } else {
  1324. sc = s->granules[ch][0].scale_factors;
  1325. j = 0;
  1326. for (k = 0; k < 4; k++) {
  1327. n = k == 0 ? 6 : 5;
  1328. if ((g->scfsi & (0x8 >> k)) == 0) {
  1329. slen = (k < 2) ? slen1 : slen2;
  1330. if (slen) {
  1331. for (i = 0; i < n; i++)
  1332. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1333. } else {
  1334. for (i = 0; i < n; i++)
  1335. g->scale_factors[j++] = 0;
  1336. }
  1337. } else {
  1338. /* simply copy from last granule */
  1339. for (i = 0; i < n; i++) {
  1340. g->scale_factors[j] = sc[j];
  1341. j++;
  1342. }
  1343. }
  1344. }
  1345. g->scale_factors[j++] = 0;
  1346. }
  1347. } else {
  1348. int tindex, tindex2, slen[4], sl, sf;
  1349. /* LSF scale factors */
  1350. if (g->block_type == 2)
  1351. tindex = g->switch_point ? 2 : 1;
  1352. else
  1353. tindex = 0;
  1354. sf = g->scalefac_compress;
  1355. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1356. /* intensity stereo case */
  1357. sf >>= 1;
  1358. if (sf < 180) {
  1359. lsf_sf_expand(slen, sf, 6, 6, 0);
  1360. tindex2 = 3;
  1361. } else if (sf < 244) {
  1362. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1363. tindex2 = 4;
  1364. } else {
  1365. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1366. tindex2 = 5;
  1367. }
  1368. } else {
  1369. /* normal case */
  1370. if (sf < 400) {
  1371. lsf_sf_expand(slen, sf, 5, 4, 4);
  1372. tindex2 = 0;
  1373. } else if (sf < 500) {
  1374. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1375. tindex2 = 1;
  1376. } else {
  1377. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1378. tindex2 = 2;
  1379. g->preflag = 1;
  1380. }
  1381. }
  1382. j = 0;
  1383. for (k = 0; k < 4; k++) {
  1384. n = lsf_nsf_table[tindex2][tindex][k];
  1385. sl = slen[k];
  1386. if (sl) {
  1387. for (i = 0; i < n; i++)
  1388. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1389. } else {
  1390. for (i = 0; i < n; i++)
  1391. g->scale_factors[j++] = 0;
  1392. }
  1393. }
  1394. /* XXX: should compute exact size */
  1395. for (; j < 40; j++)
  1396. g->scale_factors[j] = 0;
  1397. }
  1398. exponents_from_scale_factors(s, g, exponents);
  1399. /* read Huffman coded residue */
  1400. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1401. } /* ch */
  1402. if (s->mode == MPA_JSTEREO)
  1403. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1404. for (ch = 0; ch < s->nb_channels; ch++) {
  1405. g = &s->granules[ch][gr];
  1406. reorder_block(s, g);
  1407. compute_antialias(s, g);
  1408. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1409. }
  1410. } /* gr */
  1411. if (get_bits_count(&s->gb) < 0)
  1412. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1413. return nb_granules * 18;
  1414. }
  1415. static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
  1416. const uint8_t *buf, int buf_size)
  1417. {
  1418. int i, nb_frames, ch, ret;
  1419. OUT_INT *samples_ptr;
  1420. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
  1421. /* skip error protection field */
  1422. if (s->error_protection)
  1423. skip_bits(&s->gb, 16);
  1424. switch(s->layer) {
  1425. case 1:
  1426. s->avctx->frame_size = 384;
  1427. nb_frames = mp_decode_layer1(s);
  1428. break;
  1429. case 2:
  1430. s->avctx->frame_size = 1152;
  1431. nb_frames = mp_decode_layer2(s);
  1432. break;
  1433. case 3:
  1434. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1435. default:
  1436. nb_frames = mp_decode_layer3(s);
  1437. s->last_buf_size=0;
  1438. if (s->in_gb.buffer) {
  1439. align_get_bits(&s->gb);
  1440. i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
  1441. if (i >= 0 && i <= BACKSTEP_SIZE) {
  1442. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  1443. s->last_buf_size=i;
  1444. } else
  1445. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  1446. s->gb = s->in_gb;
  1447. s->in_gb.buffer = NULL;
  1448. s->extrasize = 0;
  1449. }
  1450. align_get_bits(&s->gb);
  1451. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1452. i = (get_bits_left(&s->gb) >> 3) - s->extrasize;
  1453. if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
  1454. if (i < 0)
  1455. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  1456. i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  1457. }
  1458. av_assert1(i <= buf_size - HEADER_SIZE && i >= 0);
  1459. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  1460. s->last_buf_size += i;
  1461. }
  1462. if(nb_frames < 0)
  1463. return nb_frames;
  1464. /* get output buffer */
  1465. if (!samples) {
  1466. av_assert0(s->frame);
  1467. s->frame->nb_samples = s->avctx->frame_size;
  1468. if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0)
  1469. return ret;
  1470. samples = (OUT_INT **)s->frame->extended_data;
  1471. }
  1472. /* apply the synthesis filter */
  1473. for (ch = 0; ch < s->nb_channels; ch++) {
  1474. int sample_stride;
  1475. if (s->avctx->sample_fmt == OUT_FMT_P) {
  1476. samples_ptr = samples[ch];
  1477. sample_stride = 1;
  1478. } else {
  1479. samples_ptr = samples[0] + ch;
  1480. sample_stride = s->nb_channels;
  1481. }
  1482. for (i = 0; i < nb_frames; i++) {
  1483. RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch],
  1484. &(s->synth_buf_offset[ch]),
  1485. RENAME(ff_mpa_synth_window),
  1486. &s->dither_state, samples_ptr,
  1487. sample_stride, s->sb_samples[ch][i]);
  1488. samples_ptr += 32 * sample_stride;
  1489. }
  1490. }
  1491. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  1492. }
  1493. static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
  1494. AVPacket *avpkt)
  1495. {
  1496. const uint8_t *buf = avpkt->data;
  1497. int buf_size = avpkt->size;
  1498. MPADecodeContext *s = avctx->priv_data;
  1499. uint32_t header;
  1500. int ret;
  1501. int skipped = 0;
  1502. while(buf_size && !*buf){
  1503. buf++;
  1504. buf_size--;
  1505. skipped++;
  1506. }
  1507. if (buf_size < HEADER_SIZE)
  1508. return AVERROR_INVALIDDATA;
  1509. header = AV_RB32(buf);
  1510. if (header>>8 == AV_RB32("TAG")>>8) {
  1511. av_log(avctx, AV_LOG_DEBUG, "discarding ID3 tag\n");
  1512. return buf_size + skipped;
  1513. }
  1514. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1515. if (ret < 0) {
  1516. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  1517. return AVERROR_INVALIDDATA;
  1518. } else if (ret == 1) {
  1519. /* free format: prepare to compute frame size */
  1520. s->frame_size = -1;
  1521. return AVERROR_INVALIDDATA;
  1522. }
  1523. /* update codec info */
  1524. avctx->channels = s->nb_channels;
  1525. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1526. if (!avctx->bit_rate)
  1527. avctx->bit_rate = s->bit_rate;
  1528. if (s->frame_size <= 0) {
  1529. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  1530. return AVERROR_INVALIDDATA;
  1531. } else if (s->frame_size < buf_size) {
  1532. av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
  1533. buf_size= s->frame_size;
  1534. }
  1535. s->frame = data;
  1536. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1537. if (ret >= 0) {
  1538. s->frame->nb_samples = avctx->frame_size;
  1539. *got_frame_ptr = 1;
  1540. avctx->sample_rate = s->sample_rate;
  1541. //FIXME maybe move the other codec info stuff from above here too
  1542. } else {
  1543. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1544. /* Only return an error if the bad frame makes up the whole packet or
  1545. * the error is related to buffer management.
  1546. * If there is more data in the packet, just consume the bad frame
  1547. * instead of returning an error, which would discard the whole
  1548. * packet. */
  1549. *got_frame_ptr = 0;
  1550. if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
  1551. return ret;
  1552. }
  1553. s->frame_size = 0;
  1554. return buf_size + skipped;
  1555. }
  1556. static void mp_flush(MPADecodeContext *ctx)
  1557. {
  1558. memset(ctx->synth_buf, 0, sizeof(ctx->synth_buf));
  1559. memset(ctx->mdct_buf, 0, sizeof(ctx->mdct_buf));
  1560. ctx->last_buf_size = 0;
  1561. ctx->dither_state = 0;
  1562. }
  1563. static void flush(AVCodecContext *avctx)
  1564. {
  1565. mp_flush(avctx->priv_data);
  1566. }
  1567. #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
  1568. static int decode_frame_adu(AVCodecContext *avctx, void *data,
  1569. int *got_frame_ptr, AVPacket *avpkt)
  1570. {
  1571. const uint8_t *buf = avpkt->data;
  1572. int buf_size = avpkt->size;
  1573. MPADecodeContext *s = avctx->priv_data;
  1574. uint32_t header;
  1575. int len, ret;
  1576. int av_unused out_size;
  1577. len = buf_size;
  1578. // Discard too short frames
  1579. if (buf_size < HEADER_SIZE) {
  1580. av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
  1581. return AVERROR_INVALIDDATA;
  1582. }
  1583. if (len > MPA_MAX_CODED_FRAME_SIZE)
  1584. len = MPA_MAX_CODED_FRAME_SIZE;
  1585. // Get header and restore sync word
  1586. header = AV_RB32(buf) | 0xffe00000;
  1587. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1588. if (ret < 0) {
  1589. av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
  1590. return ret;
  1591. }
  1592. /* update codec info */
  1593. avctx->sample_rate = s->sample_rate;
  1594. avctx->channels = s->nb_channels;
  1595. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1596. if (!avctx->bit_rate)
  1597. avctx->bit_rate = s->bit_rate;
  1598. s->frame_size = len;
  1599. s->frame = data;
  1600. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1601. if (ret < 0) {
  1602. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1603. return ret;
  1604. }
  1605. *got_frame_ptr = 1;
  1606. return buf_size;
  1607. }
  1608. #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
  1609. #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
  1610. /**
  1611. * Context for MP3On4 decoder
  1612. */
  1613. typedef struct MP3On4DecodeContext {
  1614. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  1615. int syncword; ///< syncword patch
  1616. const uint8_t *coff; ///< channel offsets in output buffer
  1617. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  1618. } MP3On4DecodeContext;
  1619. #include "mpeg4audio.h"
  1620. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  1621. /* number of mp3 decoder instances */
  1622. static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
  1623. /* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
  1624. static const uint8_t chan_offset[8][5] = {
  1625. { 0 },
  1626. { 0 }, // C
  1627. { 0 }, // FLR
  1628. { 2, 0 }, // C FLR
  1629. { 2, 0, 3 }, // C FLR BS
  1630. { 2, 0, 3 }, // C FLR BLRS
  1631. { 2, 0, 4, 3 }, // C FLR BLRS LFE
  1632. { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
  1633. };
  1634. /* mp3on4 channel layouts */
  1635. static const int16_t chan_layout[8] = {
  1636. 0,
  1637. AV_CH_LAYOUT_MONO,
  1638. AV_CH_LAYOUT_STEREO,
  1639. AV_CH_LAYOUT_SURROUND,
  1640. AV_CH_LAYOUT_4POINT0,
  1641. AV_CH_LAYOUT_5POINT0,
  1642. AV_CH_LAYOUT_5POINT1,
  1643. AV_CH_LAYOUT_7POINT1
  1644. };
  1645. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  1646. {
  1647. MP3On4DecodeContext *s = avctx->priv_data;
  1648. int i;
  1649. if (s->mp3decctx[0])
  1650. av_freep(&s->mp3decctx[0]->fdsp);
  1651. for (i = 0; i < s->frames; i++)
  1652. av_freep(&s->mp3decctx[i]);
  1653. return 0;
  1654. }
  1655. static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
  1656. {
  1657. MP3On4DecodeContext *s = avctx->priv_data;
  1658. MPEG4AudioConfig cfg;
  1659. int i;
  1660. if ((avctx->extradata_size < 2) || !avctx->extradata) {
  1661. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  1662. return AVERROR_INVALIDDATA;
  1663. }
  1664. avpriv_mpeg4audio_get_config(&cfg, avctx->extradata,
  1665. avctx->extradata_size * 8, 1);
  1666. if (!cfg.chan_config || cfg.chan_config > 7) {
  1667. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  1668. return AVERROR_INVALIDDATA;
  1669. }
  1670. s->frames = mp3Frames[cfg.chan_config];
  1671. s->coff = chan_offset[cfg.chan_config];
  1672. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  1673. avctx->channel_layout = chan_layout[cfg.chan_config];
  1674. if (cfg.sample_rate < 16000)
  1675. s->syncword = 0xffe00000;
  1676. else
  1677. s->syncword = 0xfff00000;
  1678. /* Init the first mp3 decoder in standard way, so that all tables get builded
  1679. * We replace avctx->priv_data with the context of the first decoder so that
  1680. * decode_init() does not have to be changed.
  1681. * Other decoders will be initialized here copying data from the first context
  1682. */
  1683. // Allocate zeroed memory for the first decoder context
  1684. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  1685. if (!s->mp3decctx[0])
  1686. goto alloc_fail;
  1687. // Put decoder context in place to make init_decode() happy
  1688. avctx->priv_data = s->mp3decctx[0];
  1689. decode_init(avctx);
  1690. // Restore mp3on4 context pointer
  1691. avctx->priv_data = s;
  1692. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  1693. /* Create a separate codec/context for each frame (first is already ok).
  1694. * Each frame is 1 or 2 channels - up to 5 frames allowed
  1695. */
  1696. for (i = 1; i < s->frames; i++) {
  1697. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  1698. if (!s->mp3decctx[i])
  1699. goto alloc_fail;
  1700. s->mp3decctx[i]->adu_mode = 1;
  1701. s->mp3decctx[i]->avctx = avctx;
  1702. s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
  1703. s->mp3decctx[i]->fdsp = s->mp3decctx[0]->fdsp;
  1704. }
  1705. return 0;
  1706. alloc_fail:
  1707. decode_close_mp3on4(avctx);
  1708. return AVERROR(ENOMEM);
  1709. }
  1710. static void flush_mp3on4(AVCodecContext *avctx)
  1711. {
  1712. int i;
  1713. MP3On4DecodeContext *s = avctx->priv_data;
  1714. for (i = 0; i < s->frames; i++)
  1715. mp_flush(s->mp3decctx[i]);
  1716. }
  1717. static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
  1718. int *got_frame_ptr, AVPacket *avpkt)
  1719. {
  1720. AVFrame *frame = data;
  1721. const uint8_t *buf = avpkt->data;
  1722. int buf_size = avpkt->size;
  1723. MP3On4DecodeContext *s = avctx->priv_data;
  1724. MPADecodeContext *m;
  1725. int fsize, len = buf_size, out_size = 0;
  1726. uint32_t header;
  1727. OUT_INT **out_samples;
  1728. OUT_INT *outptr[2];
  1729. int fr, ch, ret;
  1730. /* get output buffer */
  1731. frame->nb_samples = MPA_FRAME_SIZE;
  1732. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  1733. return ret;
  1734. out_samples = (OUT_INT **)frame->extended_data;
  1735. // Discard too short frames
  1736. if (buf_size < HEADER_SIZE)
  1737. return AVERROR_INVALIDDATA;
  1738. avctx->bit_rate = 0;
  1739. ch = 0;
  1740. for (fr = 0; fr < s->frames; fr++) {
  1741. fsize = AV_RB16(buf) >> 4;
  1742. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  1743. m = s->mp3decctx[fr];
  1744. av_assert1(m);
  1745. if (fsize < HEADER_SIZE) {
  1746. av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
  1747. return AVERROR_INVALIDDATA;
  1748. }
  1749. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  1750. ret = avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  1751. if (ret < 0) {
  1752. av_log(avctx, AV_LOG_ERROR, "Bad header, discard block\n");
  1753. return AVERROR_INVALIDDATA;
  1754. }
  1755. if (ch + m->nb_channels > avctx->channels ||
  1756. s->coff[fr] + m->nb_channels > avctx->channels) {
  1757. av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
  1758. "channel count\n");
  1759. return AVERROR_INVALIDDATA;
  1760. }
  1761. ch += m->nb_channels;
  1762. outptr[0] = out_samples[s->coff[fr]];
  1763. if (m->nb_channels > 1)
  1764. outptr[1] = out_samples[s->coff[fr] + 1];
  1765. if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0) {
  1766. av_log(avctx, AV_LOG_ERROR, "failed to decode channel %d\n", ch);
  1767. memset(outptr[0], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
  1768. if (m->nb_channels > 1)
  1769. memset(outptr[1], 0, MPA_FRAME_SIZE*sizeof(OUT_INT));
  1770. ret = m->nb_channels * MPA_FRAME_SIZE*sizeof(OUT_INT);
  1771. }
  1772. out_size += ret;
  1773. buf += fsize;
  1774. len -= fsize;
  1775. avctx->bit_rate += m->bit_rate;
  1776. }
  1777. if (ch != avctx->channels) {
  1778. av_log(avctx, AV_LOG_ERROR, "failed to decode all channels\n");
  1779. return AVERROR_INVALIDDATA;
  1780. }
  1781. /* update codec info */
  1782. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  1783. frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
  1784. *got_frame_ptr = 1;
  1785. return buf_size;
  1786. }
  1787. #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */