You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1975 lines
64KB

  1. /*
  2. * MPEG Audio decoder
  3. * Copyright (c) 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * MPEG Audio decoder
  24. */
  25. #include "libavutil/attributes.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/float_dsp.h"
  29. #include "libavutil/libm.h"
  30. #include "avcodec.h"
  31. #include "get_bits.h"
  32. #include "internal.h"
  33. #include "mathops.h"
  34. #include "mpegaudiodsp.h"
  35. /*
  36. * TODO:
  37. * - test lsf / mpeg25 extensively.
  38. */
  39. #include "mpegaudio.h"
  40. #include "mpegaudiodecheader.h"
  41. #define BACKSTEP_SIZE 512
  42. #define EXTRABYTES 24
  43. #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES
  44. /* layer 3 "granule" */
  45. typedef struct GranuleDef {
  46. uint8_t scfsi;
  47. int part2_3_length;
  48. int big_values;
  49. int global_gain;
  50. int scalefac_compress;
  51. uint8_t block_type;
  52. uint8_t switch_point;
  53. int table_select[3];
  54. int subblock_gain[3];
  55. uint8_t scalefac_scale;
  56. uint8_t count1table_select;
  57. int region_size[3]; /* number of huffman codes in each region */
  58. int preflag;
  59. int short_start, long_end; /* long/short band indexes */
  60. uint8_t scale_factors[40];
  61. DECLARE_ALIGNED(16, INTFLOAT, sb_hybrid)[SBLIMIT * 18]; /* 576 samples */
  62. } GranuleDef;
  63. typedef struct MPADecodeContext {
  64. MPA_DECODE_HEADER
  65. uint8_t last_buf[LAST_BUF_SIZE];
  66. int last_buf_size;
  67. /* next header (used in free format parsing) */
  68. uint32_t free_format_next_header;
  69. GetBitContext gb;
  70. GetBitContext in_gb;
  71. DECLARE_ALIGNED(32, MPA_INT, synth_buf)[MPA_MAX_CHANNELS][512 * 2];
  72. int synth_buf_offset[MPA_MAX_CHANNELS];
  73. DECLARE_ALIGNED(32, INTFLOAT, sb_samples)[MPA_MAX_CHANNELS][36][SBLIMIT];
  74. INTFLOAT mdct_buf[MPA_MAX_CHANNELS][SBLIMIT * 18]; /* previous samples, for layer 3 MDCT */
  75. GranuleDef granules[2][2]; /* Used in Layer 3 */
  76. int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
  77. int dither_state;
  78. int err_recognition;
  79. AVCodecContext* avctx;
  80. MPADSPContext mpadsp;
  81. AVFloatDSPContext fdsp;
  82. AVFrame *frame;
  83. } MPADecodeContext;
  84. #define HEADER_SIZE 4
  85. #include "mpegaudiodata.h"
  86. #include "mpegaudiodectab.h"
  87. /* vlc structure for decoding layer 3 huffman tables */
  88. static VLC huff_vlc[16];
  89. static VLC_TYPE huff_vlc_tables[
  90. 0 + 128 + 128 + 128 + 130 + 128 + 154 + 166 +
  91. 142 + 204 + 190 + 170 + 542 + 460 + 662 + 414
  92. ][2];
  93. static const int huff_vlc_tables_sizes[16] = {
  94. 0, 128, 128, 128, 130, 128, 154, 166,
  95. 142, 204, 190, 170, 542, 460, 662, 414
  96. };
  97. static VLC huff_quad_vlc[2];
  98. static VLC_TYPE huff_quad_vlc_tables[128+16][2];
  99. static const int huff_quad_vlc_tables_sizes[2] = { 128, 16 };
  100. /* computed from band_size_long */
  101. static uint16_t band_index_long[9][23];
  102. #include "mpegaudio_tablegen.h"
  103. /* intensity stereo coef table */
  104. static INTFLOAT is_table[2][16];
  105. static INTFLOAT is_table_lsf[2][2][16];
  106. static INTFLOAT csa_table[8][4];
  107. static int16_t division_tab3[1<<6 ];
  108. static int16_t division_tab5[1<<8 ];
  109. static int16_t division_tab9[1<<11];
  110. static int16_t * const division_tabs[4] = {
  111. division_tab3, division_tab5, NULL, division_tab9
  112. };
  113. /* lower 2 bits: modulo 3, higher bits: shift */
  114. static uint16_t scale_factor_modshift[64];
  115. /* [i][j]: 2^(-j/3) * FRAC_ONE * 2^(i+2) / (2^(i+2) - 1) */
  116. static int32_t scale_factor_mult[15][3];
  117. /* mult table for layer 2 group quantization */
  118. #define SCALE_GEN(v) \
  119. { FIXR_OLD(1.0 * (v)), FIXR_OLD(0.7937005259 * (v)), FIXR_OLD(0.6299605249 * (v)) }
  120. static const int32_t scale_factor_mult2[3][3] = {
  121. SCALE_GEN(4.0 / 3.0), /* 3 steps */
  122. SCALE_GEN(4.0 / 5.0), /* 5 steps */
  123. SCALE_GEN(4.0 / 9.0), /* 9 steps */
  124. };
  125. /**
  126. * Convert region offsets to region sizes and truncate
  127. * size to big_values.
  128. */
  129. static void region_offset2size(GranuleDef *g)
  130. {
  131. int i, k, j = 0;
  132. g->region_size[2] = 576 / 2;
  133. for (i = 0; i < 3; i++) {
  134. k = FFMIN(g->region_size[i], g->big_values);
  135. g->region_size[i] = k - j;
  136. j = k;
  137. }
  138. }
  139. static void init_short_region(MPADecodeContext *s, GranuleDef *g)
  140. {
  141. if (g->block_type == 2) {
  142. if (s->sample_rate_index != 8)
  143. g->region_size[0] = (36 / 2);
  144. else
  145. g->region_size[0] = (72 / 2);
  146. } else {
  147. if (s->sample_rate_index <= 2)
  148. g->region_size[0] = (36 / 2);
  149. else if (s->sample_rate_index != 8)
  150. g->region_size[0] = (54 / 2);
  151. else
  152. g->region_size[0] = (108 / 2);
  153. }
  154. g->region_size[1] = (576 / 2);
  155. }
  156. static void init_long_region(MPADecodeContext *s, GranuleDef *g,
  157. int ra1, int ra2)
  158. {
  159. int l;
  160. g->region_size[0] = band_index_long[s->sample_rate_index][ra1 + 1] >> 1;
  161. /* should not overflow */
  162. l = FFMIN(ra1 + ra2 + 2, 22);
  163. g->region_size[1] = band_index_long[s->sample_rate_index][ l] >> 1;
  164. }
  165. static void compute_band_indexes(MPADecodeContext *s, GranuleDef *g)
  166. {
  167. if (g->block_type == 2) {
  168. if (g->switch_point) {
  169. if(s->sample_rate_index == 8)
  170. avpriv_request_sample(s->avctx, "switch point in 8khz");
  171. /* if switched mode, we handle the 36 first samples as
  172. long blocks. For 8000Hz, we handle the 72 first
  173. exponents as long blocks */
  174. if (s->sample_rate_index <= 2)
  175. g->long_end = 8;
  176. else
  177. g->long_end = 6;
  178. g->short_start = 3;
  179. } else {
  180. g->long_end = 0;
  181. g->short_start = 0;
  182. }
  183. } else {
  184. g->short_start = 13;
  185. g->long_end = 22;
  186. }
  187. }
  188. /* layer 1 unscaling */
  189. /* n = number of bits of the mantissa minus 1 */
  190. static inline int l1_unscale(int n, int mant, int scale_factor)
  191. {
  192. int shift, mod;
  193. int64_t val;
  194. shift = scale_factor_modshift[scale_factor];
  195. mod = shift & 3;
  196. shift >>= 2;
  197. val = MUL64(mant + (-1 << n) + 1, scale_factor_mult[n-1][mod]);
  198. shift += n;
  199. /* NOTE: at this point, 1 <= shift >= 21 + 15 */
  200. return (int)((val + (1LL << (shift - 1))) >> shift);
  201. }
  202. static inline int l2_unscale_group(int steps, int mant, int scale_factor)
  203. {
  204. int shift, mod, val;
  205. shift = scale_factor_modshift[scale_factor];
  206. mod = shift & 3;
  207. shift >>= 2;
  208. val = (mant - (steps >> 1)) * scale_factor_mult2[steps >> 2][mod];
  209. /* NOTE: at this point, 0 <= shift <= 21 */
  210. if (shift > 0)
  211. val = (val + (1 << (shift - 1))) >> shift;
  212. return val;
  213. }
  214. /* compute value^(4/3) * 2^(exponent/4). It normalized to FRAC_BITS */
  215. static inline int l3_unscale(int value, int exponent)
  216. {
  217. unsigned int m;
  218. int e;
  219. e = table_4_3_exp [4 * value + (exponent & 3)];
  220. m = table_4_3_value[4 * value + (exponent & 3)];
  221. e -= exponent >> 2;
  222. #ifdef DEBUG
  223. if(e < 1)
  224. av_log(NULL, AV_LOG_WARNING, "l3_unscale: e is %d\n", e);
  225. #endif
  226. if (e > 31)
  227. return 0;
  228. m = (m + (1 << (e - 1))) >> e;
  229. return m;
  230. }
  231. static av_cold void decode_init_static(void)
  232. {
  233. int i, j, k;
  234. int offset;
  235. /* scale factors table for layer 1/2 */
  236. for (i = 0; i < 64; i++) {
  237. int shift, mod;
  238. /* 1.0 (i = 3) is normalized to 2 ^ FRAC_BITS */
  239. shift = i / 3;
  240. mod = i % 3;
  241. scale_factor_modshift[i] = mod | (shift << 2);
  242. }
  243. /* scale factor multiply for layer 1 */
  244. for (i = 0; i < 15; i++) {
  245. int n, norm;
  246. n = i + 2;
  247. norm = ((INT64_C(1) << n) * FRAC_ONE) / ((1 << n) - 1);
  248. scale_factor_mult[i][0] = MULLx(norm, FIXR(1.0 * 2.0), FRAC_BITS);
  249. scale_factor_mult[i][1] = MULLx(norm, FIXR(0.7937005259 * 2.0), FRAC_BITS);
  250. scale_factor_mult[i][2] = MULLx(norm, FIXR(0.6299605249 * 2.0), FRAC_BITS);
  251. av_dlog(NULL, "%d: norm=%x s=%x %x %x\n", i, norm,
  252. scale_factor_mult[i][0],
  253. scale_factor_mult[i][1],
  254. scale_factor_mult[i][2]);
  255. }
  256. RENAME(ff_mpa_synth_init)(RENAME(ff_mpa_synth_window));
  257. /* huffman decode tables */
  258. offset = 0;
  259. for (i = 1; i < 16; i++) {
  260. const HuffTable *h = &mpa_huff_tables[i];
  261. int xsize, x, y;
  262. uint8_t tmp_bits [512] = { 0 };
  263. uint16_t tmp_codes[512] = { 0 };
  264. xsize = h->xsize;
  265. j = 0;
  266. for (x = 0; x < xsize; x++) {
  267. for (y = 0; y < xsize; y++) {
  268. tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
  269. tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
  270. }
  271. }
  272. /* XXX: fail test */
  273. huff_vlc[i].table = huff_vlc_tables+offset;
  274. huff_vlc[i].table_allocated = huff_vlc_tables_sizes[i];
  275. init_vlc(&huff_vlc[i], 7, 512,
  276. tmp_bits, 1, 1, tmp_codes, 2, 2,
  277. INIT_VLC_USE_NEW_STATIC);
  278. offset += huff_vlc_tables_sizes[i];
  279. }
  280. av_assert0(offset == FF_ARRAY_ELEMS(huff_vlc_tables));
  281. offset = 0;
  282. for (i = 0; i < 2; i++) {
  283. huff_quad_vlc[i].table = huff_quad_vlc_tables+offset;
  284. huff_quad_vlc[i].table_allocated = huff_quad_vlc_tables_sizes[i];
  285. init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
  286. mpa_quad_bits[i], 1, 1, mpa_quad_codes[i], 1, 1,
  287. INIT_VLC_USE_NEW_STATIC);
  288. offset += huff_quad_vlc_tables_sizes[i];
  289. }
  290. av_assert0(offset == FF_ARRAY_ELEMS(huff_quad_vlc_tables));
  291. for (i = 0; i < 9; i++) {
  292. k = 0;
  293. for (j = 0; j < 22; j++) {
  294. band_index_long[i][j] = k;
  295. k += band_size_long[i][j];
  296. }
  297. band_index_long[i][22] = k;
  298. }
  299. /* compute n ^ (4/3) and store it in mantissa/exp format */
  300. mpegaudio_tableinit();
  301. for (i = 0; i < 4; i++) {
  302. if (ff_mpa_quant_bits[i] < 0) {
  303. for (j = 0; j < (1 << (-ff_mpa_quant_bits[i]+1)); j++) {
  304. int val1, val2, val3, steps;
  305. int val = j;
  306. steps = ff_mpa_quant_steps[i];
  307. val1 = val % steps;
  308. val /= steps;
  309. val2 = val % steps;
  310. val3 = val / steps;
  311. division_tabs[i][j] = val1 + (val2 << 4) + (val3 << 8);
  312. }
  313. }
  314. }
  315. for (i = 0; i < 7; i++) {
  316. float f;
  317. INTFLOAT v;
  318. if (i != 6) {
  319. f = tan((double)i * M_PI / 12.0);
  320. v = FIXR(f / (1.0 + f));
  321. } else {
  322. v = FIXR(1.0);
  323. }
  324. is_table[0][ i] = v;
  325. is_table[1][6 - i] = v;
  326. }
  327. /* invalid values */
  328. for (i = 7; i < 16; i++)
  329. is_table[0][i] = is_table[1][i] = 0.0;
  330. for (i = 0; i < 16; i++) {
  331. double f;
  332. int e, k;
  333. for (j = 0; j < 2; j++) {
  334. e = -(j + 1) * ((i + 1) >> 1);
  335. f = exp2(e / 4.0);
  336. k = i & 1;
  337. is_table_lsf[j][k ^ 1][i] = FIXR(f);
  338. is_table_lsf[j][k ][i] = FIXR(1.0);
  339. av_dlog(NULL, "is_table_lsf %d %d: %f %f\n",
  340. i, j, (float) is_table_lsf[j][0][i],
  341. (float) is_table_lsf[j][1][i]);
  342. }
  343. }
  344. for (i = 0; i < 8; i++) {
  345. float ci, cs, ca;
  346. ci = ci_table[i];
  347. cs = 1.0 / sqrt(1.0 + ci * ci);
  348. ca = cs * ci;
  349. #if !USE_FLOATS
  350. csa_table[i][0] = FIXHR(cs/4);
  351. csa_table[i][1] = FIXHR(ca/4);
  352. csa_table[i][2] = FIXHR(ca/4) + FIXHR(cs/4);
  353. csa_table[i][3] = FIXHR(ca/4) - FIXHR(cs/4);
  354. #else
  355. csa_table[i][0] = cs;
  356. csa_table[i][1] = ca;
  357. csa_table[i][2] = ca + cs;
  358. csa_table[i][3] = ca - cs;
  359. #endif
  360. }
  361. }
  362. static av_cold int decode_init(AVCodecContext * avctx)
  363. {
  364. static int initialized_tables = 0;
  365. MPADecodeContext *s = avctx->priv_data;
  366. if (!initialized_tables) {
  367. decode_init_static();
  368. initialized_tables = 1;
  369. }
  370. s->avctx = avctx;
  371. avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
  372. ff_mpadsp_init(&s->mpadsp);
  373. if (avctx->request_sample_fmt == OUT_FMT &&
  374. avctx->codec_id != AV_CODEC_ID_MP3ON4)
  375. avctx->sample_fmt = OUT_FMT;
  376. else
  377. avctx->sample_fmt = OUT_FMT_P;
  378. s->err_recognition = avctx->err_recognition;
  379. if (avctx->codec_id == AV_CODEC_ID_MP3ADU)
  380. s->adu_mode = 1;
  381. return 0;
  382. }
  383. #define C3 FIXHR(0.86602540378443864676/2)
  384. #define C4 FIXHR(0.70710678118654752439/2) //0.5 / cos(pi*(9)/36)
  385. #define C5 FIXHR(0.51763809020504152469/2) //0.5 / cos(pi*(5)/36)
  386. #define C6 FIXHR(1.93185165257813657349/4) //0.5 / cos(pi*(15)/36)
  387. /* 12 points IMDCT. We compute it "by hand" by factorizing obvious
  388. cases. */
  389. static void imdct12(INTFLOAT *out, INTFLOAT *in)
  390. {
  391. INTFLOAT in0, in1, in2, in3, in4, in5, t1, t2;
  392. in0 = in[0*3];
  393. in1 = in[1*3] + in[0*3];
  394. in2 = in[2*3] + in[1*3];
  395. in3 = in[3*3] + in[2*3];
  396. in4 = in[4*3] + in[3*3];
  397. in5 = in[5*3] + in[4*3];
  398. in5 += in3;
  399. in3 += in1;
  400. in2 = MULH3(in2, C3, 2);
  401. in3 = MULH3(in3, C3, 4);
  402. t1 = in0 - in4;
  403. t2 = MULH3(in1 - in5, C4, 2);
  404. out[ 7] =
  405. out[10] = t1 + t2;
  406. out[ 1] =
  407. out[ 4] = t1 - t2;
  408. in0 += SHR(in4, 1);
  409. in4 = in0 + in2;
  410. in5 += 2*in1;
  411. in1 = MULH3(in5 + in3, C5, 1);
  412. out[ 8] =
  413. out[ 9] = in4 + in1;
  414. out[ 2] =
  415. out[ 3] = in4 - in1;
  416. in0 -= in2;
  417. in5 = MULH3(in5 - in3, C6, 2);
  418. out[ 0] =
  419. out[ 5] = in0 - in5;
  420. out[ 6] =
  421. out[11] = in0 + in5;
  422. }
  423. /* return the number of decoded frames */
  424. static int mp_decode_layer1(MPADecodeContext *s)
  425. {
  426. int bound, i, v, n, ch, j, mant;
  427. uint8_t allocation[MPA_MAX_CHANNELS][SBLIMIT];
  428. uint8_t scale_factors[MPA_MAX_CHANNELS][SBLIMIT];
  429. if (s->mode == MPA_JSTEREO)
  430. bound = (s->mode_ext + 1) * 4;
  431. else
  432. bound = SBLIMIT;
  433. /* allocation bits */
  434. for (i = 0; i < bound; i++) {
  435. for (ch = 0; ch < s->nb_channels; ch++) {
  436. allocation[ch][i] = get_bits(&s->gb, 4);
  437. }
  438. }
  439. for (i = bound; i < SBLIMIT; i++)
  440. allocation[0][i] = get_bits(&s->gb, 4);
  441. /* scale factors */
  442. for (i = 0; i < bound; i++) {
  443. for (ch = 0; ch < s->nb_channels; ch++) {
  444. if (allocation[ch][i])
  445. scale_factors[ch][i] = get_bits(&s->gb, 6);
  446. }
  447. }
  448. for (i = bound; i < SBLIMIT; i++) {
  449. if (allocation[0][i]) {
  450. scale_factors[0][i] = get_bits(&s->gb, 6);
  451. scale_factors[1][i] = get_bits(&s->gb, 6);
  452. }
  453. }
  454. /* compute samples */
  455. for (j = 0; j < 12; j++) {
  456. for (i = 0; i < bound; i++) {
  457. for (ch = 0; ch < s->nb_channels; ch++) {
  458. n = allocation[ch][i];
  459. if (n) {
  460. mant = get_bits(&s->gb, n + 1);
  461. v = l1_unscale(n, mant, scale_factors[ch][i]);
  462. } else {
  463. v = 0;
  464. }
  465. s->sb_samples[ch][j][i] = v;
  466. }
  467. }
  468. for (i = bound; i < SBLIMIT; i++) {
  469. n = allocation[0][i];
  470. if (n) {
  471. mant = get_bits(&s->gb, n + 1);
  472. v = l1_unscale(n, mant, scale_factors[0][i]);
  473. s->sb_samples[0][j][i] = v;
  474. v = l1_unscale(n, mant, scale_factors[1][i]);
  475. s->sb_samples[1][j][i] = v;
  476. } else {
  477. s->sb_samples[0][j][i] = 0;
  478. s->sb_samples[1][j][i] = 0;
  479. }
  480. }
  481. }
  482. return 12;
  483. }
  484. static int mp_decode_layer2(MPADecodeContext *s)
  485. {
  486. int sblimit; /* number of used subbands */
  487. const unsigned char *alloc_table;
  488. int table, bit_alloc_bits, i, j, ch, bound, v;
  489. unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
  490. unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
  491. unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3], *sf;
  492. int scale, qindex, bits, steps, k, l, m, b;
  493. /* select decoding table */
  494. table = ff_mpa_l2_select_table(s->bit_rate / 1000, s->nb_channels,
  495. s->sample_rate, s->lsf);
  496. sblimit = ff_mpa_sblimit_table[table];
  497. alloc_table = ff_mpa_alloc_tables[table];
  498. if (s->mode == MPA_JSTEREO)
  499. bound = (s->mode_ext + 1) * 4;
  500. else
  501. bound = sblimit;
  502. av_dlog(s->avctx, "bound=%d sblimit=%d\n", bound, sblimit);
  503. /* sanity check */
  504. if (bound > sblimit)
  505. bound = sblimit;
  506. /* parse bit allocation */
  507. j = 0;
  508. for (i = 0; i < bound; i++) {
  509. bit_alloc_bits = alloc_table[j];
  510. for (ch = 0; ch < s->nb_channels; ch++)
  511. bit_alloc[ch][i] = get_bits(&s->gb, bit_alloc_bits);
  512. j += 1 << bit_alloc_bits;
  513. }
  514. for (i = bound; i < sblimit; i++) {
  515. bit_alloc_bits = alloc_table[j];
  516. v = get_bits(&s->gb, bit_alloc_bits);
  517. bit_alloc[0][i] = v;
  518. bit_alloc[1][i] = v;
  519. j += 1 << bit_alloc_bits;
  520. }
  521. /* scale codes */
  522. for (i = 0; i < sblimit; i++) {
  523. for (ch = 0; ch < s->nb_channels; ch++) {
  524. if (bit_alloc[ch][i])
  525. scale_code[ch][i] = get_bits(&s->gb, 2);
  526. }
  527. }
  528. /* scale factors */
  529. for (i = 0; i < sblimit; i++) {
  530. for (ch = 0; ch < s->nb_channels; ch++) {
  531. if (bit_alloc[ch][i]) {
  532. sf = scale_factors[ch][i];
  533. switch (scale_code[ch][i]) {
  534. default:
  535. case 0:
  536. sf[0] = get_bits(&s->gb, 6);
  537. sf[1] = get_bits(&s->gb, 6);
  538. sf[2] = get_bits(&s->gb, 6);
  539. break;
  540. case 2:
  541. sf[0] = get_bits(&s->gb, 6);
  542. sf[1] = sf[0];
  543. sf[2] = sf[0];
  544. break;
  545. case 1:
  546. sf[0] = get_bits(&s->gb, 6);
  547. sf[2] = get_bits(&s->gb, 6);
  548. sf[1] = sf[0];
  549. break;
  550. case 3:
  551. sf[0] = get_bits(&s->gb, 6);
  552. sf[2] = get_bits(&s->gb, 6);
  553. sf[1] = sf[2];
  554. break;
  555. }
  556. }
  557. }
  558. }
  559. /* samples */
  560. for (k = 0; k < 3; k++) {
  561. for (l = 0; l < 12; l += 3) {
  562. j = 0;
  563. for (i = 0; i < bound; i++) {
  564. bit_alloc_bits = alloc_table[j];
  565. for (ch = 0; ch < s->nb_channels; ch++) {
  566. b = bit_alloc[ch][i];
  567. if (b) {
  568. scale = scale_factors[ch][i][k];
  569. qindex = alloc_table[j+b];
  570. bits = ff_mpa_quant_bits[qindex];
  571. if (bits < 0) {
  572. int v2;
  573. /* 3 values at the same time */
  574. v = get_bits(&s->gb, -bits);
  575. v2 = division_tabs[qindex][v];
  576. steps = ff_mpa_quant_steps[qindex];
  577. s->sb_samples[ch][k * 12 + l + 0][i] =
  578. l2_unscale_group(steps, v2 & 15, scale);
  579. s->sb_samples[ch][k * 12 + l + 1][i] =
  580. l2_unscale_group(steps, (v2 >> 4) & 15, scale);
  581. s->sb_samples[ch][k * 12 + l + 2][i] =
  582. l2_unscale_group(steps, v2 >> 8 , scale);
  583. } else {
  584. for (m = 0; m < 3; m++) {
  585. v = get_bits(&s->gb, bits);
  586. v = l1_unscale(bits - 1, v, scale);
  587. s->sb_samples[ch][k * 12 + l + m][i] = v;
  588. }
  589. }
  590. } else {
  591. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  592. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  593. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  594. }
  595. }
  596. /* next subband in alloc table */
  597. j += 1 << bit_alloc_bits;
  598. }
  599. /* XXX: find a way to avoid this duplication of code */
  600. for (i = bound; i < sblimit; i++) {
  601. bit_alloc_bits = alloc_table[j];
  602. b = bit_alloc[0][i];
  603. if (b) {
  604. int mant, scale0, scale1;
  605. scale0 = scale_factors[0][i][k];
  606. scale1 = scale_factors[1][i][k];
  607. qindex = alloc_table[j+b];
  608. bits = ff_mpa_quant_bits[qindex];
  609. if (bits < 0) {
  610. /* 3 values at the same time */
  611. v = get_bits(&s->gb, -bits);
  612. steps = ff_mpa_quant_steps[qindex];
  613. mant = v % steps;
  614. v = v / steps;
  615. s->sb_samples[0][k * 12 + l + 0][i] =
  616. l2_unscale_group(steps, mant, scale0);
  617. s->sb_samples[1][k * 12 + l + 0][i] =
  618. l2_unscale_group(steps, mant, scale1);
  619. mant = v % steps;
  620. v = v / steps;
  621. s->sb_samples[0][k * 12 + l + 1][i] =
  622. l2_unscale_group(steps, mant, scale0);
  623. s->sb_samples[1][k * 12 + l + 1][i] =
  624. l2_unscale_group(steps, mant, scale1);
  625. s->sb_samples[0][k * 12 + l + 2][i] =
  626. l2_unscale_group(steps, v, scale0);
  627. s->sb_samples[1][k * 12 + l + 2][i] =
  628. l2_unscale_group(steps, v, scale1);
  629. } else {
  630. for (m = 0; m < 3; m++) {
  631. mant = get_bits(&s->gb, bits);
  632. s->sb_samples[0][k * 12 + l + m][i] =
  633. l1_unscale(bits - 1, mant, scale0);
  634. s->sb_samples[1][k * 12 + l + m][i] =
  635. l1_unscale(bits - 1, mant, scale1);
  636. }
  637. }
  638. } else {
  639. s->sb_samples[0][k * 12 + l + 0][i] = 0;
  640. s->sb_samples[0][k * 12 + l + 1][i] = 0;
  641. s->sb_samples[0][k * 12 + l + 2][i] = 0;
  642. s->sb_samples[1][k * 12 + l + 0][i] = 0;
  643. s->sb_samples[1][k * 12 + l + 1][i] = 0;
  644. s->sb_samples[1][k * 12 + l + 2][i] = 0;
  645. }
  646. /* next subband in alloc table */
  647. j += 1 << bit_alloc_bits;
  648. }
  649. /* fill remaining samples to zero */
  650. for (i = sblimit; i < SBLIMIT; i++) {
  651. for (ch = 0; ch < s->nb_channels; ch++) {
  652. s->sb_samples[ch][k * 12 + l + 0][i] = 0;
  653. s->sb_samples[ch][k * 12 + l + 1][i] = 0;
  654. s->sb_samples[ch][k * 12 + l + 2][i] = 0;
  655. }
  656. }
  657. }
  658. }
  659. return 3 * 12;
  660. }
  661. #define SPLIT(dst,sf,n) \
  662. if (n == 3) { \
  663. int m = (sf * 171) >> 9; \
  664. dst = sf - 3 * m; \
  665. sf = m; \
  666. } else if (n == 4) { \
  667. dst = sf & 3; \
  668. sf >>= 2; \
  669. } else if (n == 5) { \
  670. int m = (sf * 205) >> 10; \
  671. dst = sf - 5 * m; \
  672. sf = m; \
  673. } else if (n == 6) { \
  674. int m = (sf * 171) >> 10; \
  675. dst = sf - 6 * m; \
  676. sf = m; \
  677. } else { \
  678. dst = 0; \
  679. }
  680. static av_always_inline void lsf_sf_expand(int *slen, int sf, int n1, int n2,
  681. int n3)
  682. {
  683. SPLIT(slen[3], sf, n3)
  684. SPLIT(slen[2], sf, n2)
  685. SPLIT(slen[1], sf, n1)
  686. slen[0] = sf;
  687. }
  688. static void exponents_from_scale_factors(MPADecodeContext *s, GranuleDef *g,
  689. int16_t *exponents)
  690. {
  691. const uint8_t *bstab, *pretab;
  692. int len, i, j, k, l, v0, shift, gain, gains[3];
  693. int16_t *exp_ptr;
  694. exp_ptr = exponents;
  695. gain = g->global_gain - 210;
  696. shift = g->scalefac_scale + 1;
  697. bstab = band_size_long[s->sample_rate_index];
  698. pretab = mpa_pretab[g->preflag];
  699. for (i = 0; i < g->long_end; i++) {
  700. v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
  701. len = bstab[i];
  702. for (j = len; j > 0; j--)
  703. *exp_ptr++ = v0;
  704. }
  705. if (g->short_start < 13) {
  706. bstab = band_size_short[s->sample_rate_index];
  707. gains[0] = gain - (g->subblock_gain[0] << 3);
  708. gains[1] = gain - (g->subblock_gain[1] << 3);
  709. gains[2] = gain - (g->subblock_gain[2] << 3);
  710. k = g->long_end;
  711. for (i = g->short_start; i < 13; i++) {
  712. len = bstab[i];
  713. for (l = 0; l < 3; l++) {
  714. v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
  715. for (j = len; j > 0; j--)
  716. *exp_ptr++ = v0;
  717. }
  718. }
  719. }
  720. }
  721. /* handle n = 0 too */
  722. static inline int get_bitsz(GetBitContext *s, int n)
  723. {
  724. return n ? get_bits(s, n) : 0;
  725. }
  726. static void switch_buffer(MPADecodeContext *s, int *pos, int *end_pos,
  727. int *end_pos2)
  728. {
  729. if (s->in_gb.buffer && *pos >= s->gb.size_in_bits) {
  730. s->gb = s->in_gb;
  731. s->in_gb.buffer = NULL;
  732. av_assert2((get_bits_count(&s->gb) & 7) == 0);
  733. skip_bits_long(&s->gb, *pos - *end_pos);
  734. *end_pos2 =
  735. *end_pos = *end_pos2 + get_bits_count(&s->gb) - *pos;
  736. *pos = get_bits_count(&s->gb);
  737. }
  738. }
  739. /* Following is a optimized code for
  740. INTFLOAT v = *src
  741. if(get_bits1(&s->gb))
  742. v = -v;
  743. *dst = v;
  744. */
  745. #if USE_FLOATS
  746. #define READ_FLIP_SIGN(dst,src) \
  747. v = AV_RN32A(src) ^ (get_bits1(&s->gb) << 31); \
  748. AV_WN32A(dst, v);
  749. #else
  750. #define READ_FLIP_SIGN(dst,src) \
  751. v = -get_bits1(&s->gb); \
  752. *(dst) = (*(src) ^ v) - v;
  753. #endif
  754. static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
  755. int16_t *exponents, int end_pos2)
  756. {
  757. int s_index;
  758. int i;
  759. int last_pos, bits_left;
  760. VLC *vlc;
  761. int end_pos = FFMIN(end_pos2, s->gb.size_in_bits);
  762. /* low frequencies (called big values) */
  763. s_index = 0;
  764. for (i = 0; i < 3; i++) {
  765. int j, k, l, linbits;
  766. j = g->region_size[i];
  767. if (j == 0)
  768. continue;
  769. /* select vlc table */
  770. k = g->table_select[i];
  771. l = mpa_huff_data[k][0];
  772. linbits = mpa_huff_data[k][1];
  773. vlc = &huff_vlc[l];
  774. if (!l) {
  775. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j);
  776. s_index += 2 * j;
  777. continue;
  778. }
  779. /* read huffcode and compute each couple */
  780. for (; j > 0; j--) {
  781. int exponent, x, y;
  782. int v;
  783. int pos = get_bits_count(&s->gb);
  784. if (pos >= end_pos){
  785. switch_buffer(s, &pos, &end_pos, &end_pos2);
  786. if (pos >= end_pos)
  787. break;
  788. }
  789. y = get_vlc2(&s->gb, vlc->table, 7, 3);
  790. if (!y) {
  791. g->sb_hybrid[s_index ] =
  792. g->sb_hybrid[s_index+1] = 0;
  793. s_index += 2;
  794. continue;
  795. }
  796. exponent= exponents[s_index];
  797. av_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n",
  798. i, g->region_size[i] - j, x, y, exponent);
  799. if (y & 16) {
  800. x = y >> 5;
  801. y = y & 0x0f;
  802. if (x < 15) {
  803. READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x)
  804. } else {
  805. x += get_bitsz(&s->gb, linbits);
  806. v = l3_unscale(x, exponent);
  807. if (get_bits1(&s->gb))
  808. v = -v;
  809. g->sb_hybrid[s_index] = v;
  810. }
  811. if (y < 15) {
  812. READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y)
  813. } else {
  814. y += get_bitsz(&s->gb, linbits);
  815. v = l3_unscale(y, exponent);
  816. if (get_bits1(&s->gb))
  817. v = -v;
  818. g->sb_hybrid[s_index+1] = v;
  819. }
  820. } else {
  821. x = y >> 5;
  822. y = y & 0x0f;
  823. x += y;
  824. if (x < 15) {
  825. READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x)
  826. } else {
  827. x += get_bitsz(&s->gb, linbits);
  828. v = l3_unscale(x, exponent);
  829. if (get_bits1(&s->gb))
  830. v = -v;
  831. g->sb_hybrid[s_index+!!y] = v;
  832. }
  833. g->sb_hybrid[s_index + !y] = 0;
  834. }
  835. s_index += 2;
  836. }
  837. }
  838. /* high frequencies */
  839. vlc = &huff_quad_vlc[g->count1table_select];
  840. last_pos = 0;
  841. while (s_index <= 572) {
  842. int pos, code;
  843. pos = get_bits_count(&s->gb);
  844. if (pos >= end_pos) {
  845. if (pos > end_pos2 && last_pos) {
  846. /* some encoders generate an incorrect size for this
  847. part. We must go back into the data */
  848. s_index -= 4;
  849. skip_bits_long(&s->gb, last_pos - pos);
  850. av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
  851. if(s->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))
  852. s_index=0;
  853. break;
  854. }
  855. switch_buffer(s, &pos, &end_pos, &end_pos2);
  856. if (pos >= end_pos)
  857. break;
  858. }
  859. last_pos = pos;
  860. code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
  861. av_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code);
  862. g->sb_hybrid[s_index+0] =
  863. g->sb_hybrid[s_index+1] =
  864. g->sb_hybrid[s_index+2] =
  865. g->sb_hybrid[s_index+3] = 0;
  866. while (code) {
  867. static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 };
  868. int v;
  869. int pos = s_index + idxtab[code];
  870. code ^= 8 >> idxtab[code];
  871. READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos])
  872. }
  873. s_index += 4;
  874. }
  875. /* skip extension bits */
  876. bits_left = end_pos2 - get_bits_count(&s->gb);
  877. if (bits_left < 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_COMPLIANT))) {
  878. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  879. s_index=0;
  880. } else if (bits_left > 0 && (s->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) {
  881. av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
  882. s_index = 0;
  883. }
  884. memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index));
  885. skip_bits_long(&s->gb, bits_left);
  886. i = get_bits_count(&s->gb);
  887. switch_buffer(s, &i, &end_pos, &end_pos2);
  888. return 0;
  889. }
  890. /* Reorder short blocks from bitstream order to interleaved order. It
  891. would be faster to do it in parsing, but the code would be far more
  892. complicated */
  893. static void reorder_block(MPADecodeContext *s, GranuleDef *g)
  894. {
  895. int i, j, len;
  896. INTFLOAT *ptr, *dst, *ptr1;
  897. INTFLOAT tmp[576];
  898. if (g->block_type != 2)
  899. return;
  900. if (g->switch_point) {
  901. if (s->sample_rate_index != 8)
  902. ptr = g->sb_hybrid + 36;
  903. else
  904. ptr = g->sb_hybrid + 72;
  905. } else {
  906. ptr = g->sb_hybrid;
  907. }
  908. for (i = g->short_start; i < 13; i++) {
  909. len = band_size_short[s->sample_rate_index][i];
  910. ptr1 = ptr;
  911. dst = tmp;
  912. for (j = len; j > 0; j--) {
  913. *dst++ = ptr[0*len];
  914. *dst++ = ptr[1*len];
  915. *dst++ = ptr[2*len];
  916. ptr++;
  917. }
  918. ptr += 2 * len;
  919. memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
  920. }
  921. }
  922. #define ISQRT2 FIXR(0.70710678118654752440)
  923. static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1)
  924. {
  925. int i, j, k, l;
  926. int sf_max, sf, len, non_zero_found;
  927. INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2;
  928. int non_zero_found_short[3];
  929. /* intensity stereo */
  930. if (s->mode_ext & MODE_EXT_I_STEREO) {
  931. if (!s->lsf) {
  932. is_tab = is_table;
  933. sf_max = 7;
  934. } else {
  935. is_tab = is_table_lsf[g1->scalefac_compress & 1];
  936. sf_max = 16;
  937. }
  938. tab0 = g0->sb_hybrid + 576;
  939. tab1 = g1->sb_hybrid + 576;
  940. non_zero_found_short[0] = 0;
  941. non_zero_found_short[1] = 0;
  942. non_zero_found_short[2] = 0;
  943. k = (13 - g1->short_start) * 3 + g1->long_end - 3;
  944. for (i = 12; i >= g1->short_start; i--) {
  945. /* for last band, use previous scale factor */
  946. if (i != 11)
  947. k -= 3;
  948. len = band_size_short[s->sample_rate_index][i];
  949. for (l = 2; l >= 0; l--) {
  950. tab0 -= len;
  951. tab1 -= len;
  952. if (!non_zero_found_short[l]) {
  953. /* test if non zero band. if so, stop doing i-stereo */
  954. for (j = 0; j < len; j++) {
  955. if (tab1[j] != 0) {
  956. non_zero_found_short[l] = 1;
  957. goto found1;
  958. }
  959. }
  960. sf = g1->scale_factors[k + l];
  961. if (sf >= sf_max)
  962. goto found1;
  963. v1 = is_tab[0][sf];
  964. v2 = is_tab[1][sf];
  965. for (j = 0; j < len; j++) {
  966. tmp0 = tab0[j];
  967. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  968. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  969. }
  970. } else {
  971. found1:
  972. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  973. /* lower part of the spectrum : do ms stereo
  974. if enabled */
  975. for (j = 0; j < len; j++) {
  976. tmp0 = tab0[j];
  977. tmp1 = tab1[j];
  978. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  979. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  980. }
  981. }
  982. }
  983. }
  984. }
  985. non_zero_found = non_zero_found_short[0] |
  986. non_zero_found_short[1] |
  987. non_zero_found_short[2];
  988. for (i = g1->long_end - 1;i >= 0;i--) {
  989. len = band_size_long[s->sample_rate_index][i];
  990. tab0 -= len;
  991. tab1 -= len;
  992. /* test if non zero band. if so, stop doing i-stereo */
  993. if (!non_zero_found) {
  994. for (j = 0; j < len; j++) {
  995. if (tab1[j] != 0) {
  996. non_zero_found = 1;
  997. goto found2;
  998. }
  999. }
  1000. /* for last band, use previous scale factor */
  1001. k = (i == 21) ? 20 : i;
  1002. sf = g1->scale_factors[k];
  1003. if (sf >= sf_max)
  1004. goto found2;
  1005. v1 = is_tab[0][sf];
  1006. v2 = is_tab[1][sf];
  1007. for (j = 0; j < len; j++) {
  1008. tmp0 = tab0[j];
  1009. tab0[j] = MULLx(tmp0, v1, FRAC_BITS);
  1010. tab1[j] = MULLx(tmp0, v2, FRAC_BITS);
  1011. }
  1012. } else {
  1013. found2:
  1014. if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1015. /* lower part of the spectrum : do ms stereo
  1016. if enabled */
  1017. for (j = 0; j < len; j++) {
  1018. tmp0 = tab0[j];
  1019. tmp1 = tab1[j];
  1020. tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS);
  1021. tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS);
  1022. }
  1023. }
  1024. }
  1025. }
  1026. } else if (s->mode_ext & MODE_EXT_MS_STEREO) {
  1027. /* ms stereo ONLY */
  1028. /* NOTE: the 1/sqrt(2) normalization factor is included in the
  1029. global gain */
  1030. #if USE_FLOATS
  1031. s->fdsp.butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
  1032. #else
  1033. tab0 = g0->sb_hybrid;
  1034. tab1 = g1->sb_hybrid;
  1035. for (i = 0; i < 576; i++) {
  1036. tmp0 = tab0[i];
  1037. tmp1 = tab1[i];
  1038. tab0[i] = tmp0 + tmp1;
  1039. tab1[i] = tmp0 - tmp1;
  1040. }
  1041. #endif
  1042. }
  1043. }
  1044. #if USE_FLOATS
  1045. #if HAVE_MIPSFPU
  1046. # include "mips/compute_antialias_float.h"
  1047. #endif /* HAVE_MIPSFPU */
  1048. #else
  1049. #if HAVE_MIPSDSPR1
  1050. # include "mips/compute_antialias_fixed.h"
  1051. #endif /* HAVE_MIPSDSPR1 */
  1052. #endif /* USE_FLOATS */
  1053. #ifndef compute_antialias
  1054. #if USE_FLOATS
  1055. #define AA(j) do { \
  1056. float tmp0 = ptr[-1-j]; \
  1057. float tmp1 = ptr[ j]; \
  1058. ptr[-1-j] = tmp0 * csa_table[j][0] - tmp1 * csa_table[j][1]; \
  1059. ptr[ j] = tmp0 * csa_table[j][1] + tmp1 * csa_table[j][0]; \
  1060. } while (0)
  1061. #else
  1062. #define AA(j) do { \
  1063. int tmp0 = ptr[-1-j]; \
  1064. int tmp1 = ptr[ j]; \
  1065. int tmp2 = MULH(tmp0 + tmp1, csa_table[j][0]); \
  1066. ptr[-1-j] = 4 * (tmp2 - MULH(tmp1, csa_table[j][2])); \
  1067. ptr[ j] = 4 * (tmp2 + MULH(tmp0, csa_table[j][3])); \
  1068. } while (0)
  1069. #endif
  1070. static void compute_antialias(MPADecodeContext *s, GranuleDef *g)
  1071. {
  1072. INTFLOAT *ptr;
  1073. int n, i;
  1074. /* we antialias only "long" bands */
  1075. if (g->block_type == 2) {
  1076. if (!g->switch_point)
  1077. return;
  1078. /* XXX: check this for 8000Hz case */
  1079. n = 1;
  1080. } else {
  1081. n = SBLIMIT - 1;
  1082. }
  1083. ptr = g->sb_hybrid + 18;
  1084. for (i = n; i > 0; i--) {
  1085. AA(0);
  1086. AA(1);
  1087. AA(2);
  1088. AA(3);
  1089. AA(4);
  1090. AA(5);
  1091. AA(6);
  1092. AA(7);
  1093. ptr += 18;
  1094. }
  1095. }
  1096. #endif /* compute_antialias */
  1097. static void compute_imdct(MPADecodeContext *s, GranuleDef *g,
  1098. INTFLOAT *sb_samples, INTFLOAT *mdct_buf)
  1099. {
  1100. INTFLOAT *win, *out_ptr, *ptr, *buf, *ptr1;
  1101. INTFLOAT out2[12];
  1102. int i, j, mdct_long_end, sblimit;
  1103. /* find last non zero block */
  1104. ptr = g->sb_hybrid + 576;
  1105. ptr1 = g->sb_hybrid + 2 * 18;
  1106. while (ptr >= ptr1) {
  1107. int32_t *p;
  1108. ptr -= 6;
  1109. p = (int32_t*)ptr;
  1110. if (p[0] | p[1] | p[2] | p[3] | p[4] | p[5])
  1111. break;
  1112. }
  1113. sblimit = ((ptr - g->sb_hybrid) / 18) + 1;
  1114. if (g->block_type == 2) {
  1115. /* XXX: check for 8000 Hz */
  1116. if (g->switch_point)
  1117. mdct_long_end = 2;
  1118. else
  1119. mdct_long_end = 0;
  1120. } else {
  1121. mdct_long_end = sblimit;
  1122. }
  1123. s->mpadsp.RENAME(imdct36_blocks)(sb_samples, mdct_buf, g->sb_hybrid,
  1124. mdct_long_end, g->switch_point,
  1125. g->block_type);
  1126. buf = mdct_buf + 4*18*(mdct_long_end >> 2) + (mdct_long_end & 3);
  1127. ptr = g->sb_hybrid + 18 * mdct_long_end;
  1128. for (j = mdct_long_end; j < sblimit; j++) {
  1129. /* select frequency inversion */
  1130. win = RENAME(ff_mdct_win)[2 + (4 & -(j & 1))];
  1131. out_ptr = sb_samples + j;
  1132. for (i = 0; i < 6; i++) {
  1133. *out_ptr = buf[4*i];
  1134. out_ptr += SBLIMIT;
  1135. }
  1136. imdct12(out2, ptr + 0);
  1137. for (i = 0; i < 6; i++) {
  1138. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*1)];
  1139. buf[4*(i + 6*2)] = MULH3(out2[i + 6], win[i + 6], 1);
  1140. out_ptr += SBLIMIT;
  1141. }
  1142. imdct12(out2, ptr + 1);
  1143. for (i = 0; i < 6; i++) {
  1144. *out_ptr = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*2)];
  1145. buf[4*(i + 6*0)] = MULH3(out2[i + 6], win[i + 6], 1);
  1146. out_ptr += SBLIMIT;
  1147. }
  1148. imdct12(out2, ptr + 2);
  1149. for (i = 0; i < 6; i++) {
  1150. buf[4*(i + 6*0)] = MULH3(out2[i ], win[i ], 1) + buf[4*(i + 6*0)];
  1151. buf[4*(i + 6*1)] = MULH3(out2[i + 6], win[i + 6], 1);
  1152. buf[4*(i + 6*2)] = 0;
  1153. }
  1154. ptr += 18;
  1155. buf += (j&3) != 3 ? 1 : (4*18-3);
  1156. }
  1157. /* zero bands */
  1158. for (j = sblimit; j < SBLIMIT; j++) {
  1159. /* overlap */
  1160. out_ptr = sb_samples + j;
  1161. for (i = 0; i < 18; i++) {
  1162. *out_ptr = buf[4*i];
  1163. buf[4*i] = 0;
  1164. out_ptr += SBLIMIT;
  1165. }
  1166. buf += (j&3) != 3 ? 1 : (4*18-3);
  1167. }
  1168. }
  1169. /* main layer3 decoding function */
  1170. static int mp_decode_layer3(MPADecodeContext *s)
  1171. {
  1172. int nb_granules, main_data_begin;
  1173. int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
  1174. GranuleDef *g;
  1175. int16_t exponents[576]; //FIXME try INTFLOAT
  1176. /* read side info */
  1177. if (s->lsf) {
  1178. main_data_begin = get_bits(&s->gb, 8);
  1179. skip_bits(&s->gb, s->nb_channels);
  1180. nb_granules = 1;
  1181. } else {
  1182. main_data_begin = get_bits(&s->gb, 9);
  1183. if (s->nb_channels == 2)
  1184. skip_bits(&s->gb, 3);
  1185. else
  1186. skip_bits(&s->gb, 5);
  1187. nb_granules = 2;
  1188. for (ch = 0; ch < s->nb_channels; ch++) {
  1189. s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */
  1190. s->granules[ch][1].scfsi = get_bits(&s->gb, 4);
  1191. }
  1192. }
  1193. for (gr = 0; gr < nb_granules; gr++) {
  1194. for (ch = 0; ch < s->nb_channels; ch++) {
  1195. av_dlog(s->avctx, "gr=%d ch=%d: side_info\n", gr, ch);
  1196. g = &s->granules[ch][gr];
  1197. g->part2_3_length = get_bits(&s->gb, 12);
  1198. g->big_values = get_bits(&s->gb, 9);
  1199. if (g->big_values > 288) {
  1200. av_log(s->avctx, AV_LOG_ERROR, "big_values too big\n");
  1201. return AVERROR_INVALIDDATA;
  1202. }
  1203. g->global_gain = get_bits(&s->gb, 8);
  1204. /* if MS stereo only is selected, we precompute the
  1205. 1/sqrt(2) renormalization factor */
  1206. if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) ==
  1207. MODE_EXT_MS_STEREO)
  1208. g->global_gain -= 2;
  1209. if (s->lsf)
  1210. g->scalefac_compress = get_bits(&s->gb, 9);
  1211. else
  1212. g->scalefac_compress = get_bits(&s->gb, 4);
  1213. blocksplit_flag = get_bits1(&s->gb);
  1214. if (blocksplit_flag) {
  1215. g->block_type = get_bits(&s->gb, 2);
  1216. if (g->block_type == 0) {
  1217. av_log(s->avctx, AV_LOG_ERROR, "invalid block type\n");
  1218. return AVERROR_INVALIDDATA;
  1219. }
  1220. g->switch_point = get_bits1(&s->gb);
  1221. for (i = 0; i < 2; i++)
  1222. g->table_select[i] = get_bits(&s->gb, 5);
  1223. for (i = 0; i < 3; i++)
  1224. g->subblock_gain[i] = get_bits(&s->gb, 3);
  1225. init_short_region(s, g);
  1226. } else {
  1227. int region_address1, region_address2;
  1228. g->block_type = 0;
  1229. g->switch_point = 0;
  1230. for (i = 0; i < 3; i++)
  1231. g->table_select[i] = get_bits(&s->gb, 5);
  1232. /* compute huffman coded region sizes */
  1233. region_address1 = get_bits(&s->gb, 4);
  1234. region_address2 = get_bits(&s->gb, 3);
  1235. av_dlog(s->avctx, "region1=%d region2=%d\n",
  1236. region_address1, region_address2);
  1237. init_long_region(s, g, region_address1, region_address2);
  1238. }
  1239. region_offset2size(g);
  1240. compute_band_indexes(s, g);
  1241. g->preflag = 0;
  1242. if (!s->lsf)
  1243. g->preflag = get_bits1(&s->gb);
  1244. g->scalefac_scale = get_bits1(&s->gb);
  1245. g->count1table_select = get_bits1(&s->gb);
  1246. av_dlog(s->avctx, "block_type=%d switch_point=%d\n",
  1247. g->block_type, g->switch_point);
  1248. }
  1249. }
  1250. if (!s->adu_mode) {
  1251. int skip;
  1252. const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
  1253. int extrasize = av_clip(get_bits_left(&s->gb) >> 3, 0, EXTRABYTES);
  1254. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1255. /* now we get bits from the main_data_begin offset */
  1256. av_dlog(s->avctx, "seekback:%d, lastbuf:%d\n",
  1257. main_data_begin, s->last_buf_size);
  1258. memcpy(s->last_buf + s->last_buf_size, ptr, extrasize);
  1259. s->in_gb = s->gb;
  1260. init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
  1261. #if !UNCHECKED_BITSTREAM_READER
  1262. s->gb.size_in_bits_plus8 += FFMAX(extrasize, LAST_BUF_SIZE - s->last_buf_size) * 8;
  1263. #endif
  1264. s->last_buf_size <<= 3;
  1265. for (gr = 0; gr < nb_granules && (s->last_buf_size >> 3) < main_data_begin; gr++) {
  1266. for (ch = 0; ch < s->nb_channels; ch++) {
  1267. g = &s->granules[ch][gr];
  1268. s->last_buf_size += g->part2_3_length;
  1269. memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
  1270. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1271. }
  1272. }
  1273. skip = s->last_buf_size - 8 * main_data_begin;
  1274. if (skip >= s->gb.size_in_bits && s->in_gb.buffer) {
  1275. skip_bits_long(&s->in_gb, skip - s->gb.size_in_bits);
  1276. s->gb = s->in_gb;
  1277. s->in_gb.buffer = NULL;
  1278. } else {
  1279. skip_bits_long(&s->gb, skip);
  1280. }
  1281. } else {
  1282. gr = 0;
  1283. }
  1284. for (; gr < nb_granules; gr++) {
  1285. for (ch = 0; ch < s->nb_channels; ch++) {
  1286. g = &s->granules[ch][gr];
  1287. bits_pos = get_bits_count(&s->gb);
  1288. if (!s->lsf) {
  1289. uint8_t *sc;
  1290. int slen, slen1, slen2;
  1291. /* MPEG1 scale factors */
  1292. slen1 = slen_table[0][g->scalefac_compress];
  1293. slen2 = slen_table[1][g->scalefac_compress];
  1294. av_dlog(s->avctx, "slen1=%d slen2=%d\n", slen1, slen2);
  1295. if (g->block_type == 2) {
  1296. n = g->switch_point ? 17 : 18;
  1297. j = 0;
  1298. if (slen1) {
  1299. for (i = 0; i < n; i++)
  1300. g->scale_factors[j++] = get_bits(&s->gb, slen1);
  1301. } else {
  1302. for (i = 0; i < n; i++)
  1303. g->scale_factors[j++] = 0;
  1304. }
  1305. if (slen2) {
  1306. for (i = 0; i < 18; i++)
  1307. g->scale_factors[j++] = get_bits(&s->gb, slen2);
  1308. for (i = 0; i < 3; i++)
  1309. g->scale_factors[j++] = 0;
  1310. } else {
  1311. for (i = 0; i < 21; i++)
  1312. g->scale_factors[j++] = 0;
  1313. }
  1314. } else {
  1315. sc = s->granules[ch][0].scale_factors;
  1316. j = 0;
  1317. for (k = 0; k < 4; k++) {
  1318. n = k == 0 ? 6 : 5;
  1319. if ((g->scfsi & (0x8 >> k)) == 0) {
  1320. slen = (k < 2) ? slen1 : slen2;
  1321. if (slen) {
  1322. for (i = 0; i < n; i++)
  1323. g->scale_factors[j++] = get_bits(&s->gb, slen);
  1324. } else {
  1325. for (i = 0; i < n; i++)
  1326. g->scale_factors[j++] = 0;
  1327. }
  1328. } else {
  1329. /* simply copy from last granule */
  1330. for (i = 0; i < n; i++) {
  1331. g->scale_factors[j] = sc[j];
  1332. j++;
  1333. }
  1334. }
  1335. }
  1336. g->scale_factors[j++] = 0;
  1337. }
  1338. } else {
  1339. int tindex, tindex2, slen[4], sl, sf;
  1340. /* LSF scale factors */
  1341. if (g->block_type == 2)
  1342. tindex = g->switch_point ? 2 : 1;
  1343. else
  1344. tindex = 0;
  1345. sf = g->scalefac_compress;
  1346. if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) {
  1347. /* intensity stereo case */
  1348. sf >>= 1;
  1349. if (sf < 180) {
  1350. lsf_sf_expand(slen, sf, 6, 6, 0);
  1351. tindex2 = 3;
  1352. } else if (sf < 244) {
  1353. lsf_sf_expand(slen, sf - 180, 4, 4, 0);
  1354. tindex2 = 4;
  1355. } else {
  1356. lsf_sf_expand(slen, sf - 244, 3, 0, 0);
  1357. tindex2 = 5;
  1358. }
  1359. } else {
  1360. /* normal case */
  1361. if (sf < 400) {
  1362. lsf_sf_expand(slen, sf, 5, 4, 4);
  1363. tindex2 = 0;
  1364. } else if (sf < 500) {
  1365. lsf_sf_expand(slen, sf - 400, 5, 4, 0);
  1366. tindex2 = 1;
  1367. } else {
  1368. lsf_sf_expand(slen, sf - 500, 3, 0, 0);
  1369. tindex2 = 2;
  1370. g->preflag = 1;
  1371. }
  1372. }
  1373. j = 0;
  1374. for (k = 0; k < 4; k++) {
  1375. n = lsf_nsf_table[tindex2][tindex][k];
  1376. sl = slen[k];
  1377. if (sl) {
  1378. for (i = 0; i < n; i++)
  1379. g->scale_factors[j++] = get_bits(&s->gb, sl);
  1380. } else {
  1381. for (i = 0; i < n; i++)
  1382. g->scale_factors[j++] = 0;
  1383. }
  1384. }
  1385. /* XXX: should compute exact size */
  1386. for (; j < 40; j++)
  1387. g->scale_factors[j] = 0;
  1388. }
  1389. exponents_from_scale_factors(s, g, exponents);
  1390. /* read Huffman coded residue */
  1391. huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
  1392. } /* ch */
  1393. if (s->mode == MPA_JSTEREO)
  1394. compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]);
  1395. for (ch = 0; ch < s->nb_channels; ch++) {
  1396. g = &s->granules[ch][gr];
  1397. reorder_block(s, g);
  1398. compute_antialias(s, g);
  1399. compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]);
  1400. }
  1401. } /* gr */
  1402. if (get_bits_count(&s->gb) < 0)
  1403. skip_bits_long(&s->gb, -get_bits_count(&s->gb));
  1404. return nb_granules * 18;
  1405. }
  1406. static int mp_decode_frame(MPADecodeContext *s, OUT_INT **samples,
  1407. const uint8_t *buf, int buf_size)
  1408. {
  1409. int i, nb_frames, ch, ret;
  1410. OUT_INT *samples_ptr;
  1411. init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
  1412. /* skip error protection field */
  1413. if (s->error_protection)
  1414. skip_bits(&s->gb, 16);
  1415. switch(s->layer) {
  1416. case 1:
  1417. s->avctx->frame_size = 384;
  1418. nb_frames = mp_decode_layer1(s);
  1419. break;
  1420. case 2:
  1421. s->avctx->frame_size = 1152;
  1422. nb_frames = mp_decode_layer2(s);
  1423. break;
  1424. case 3:
  1425. s->avctx->frame_size = s->lsf ? 576 : 1152;
  1426. default:
  1427. nb_frames = mp_decode_layer3(s);
  1428. s->last_buf_size=0;
  1429. if (s->in_gb.buffer) {
  1430. align_get_bits(&s->gb);
  1431. i = get_bits_left(&s->gb)>>3;
  1432. if (i >= 0 && i <= BACKSTEP_SIZE) {
  1433. memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
  1434. s->last_buf_size=i;
  1435. } else
  1436. av_log(s->avctx, AV_LOG_ERROR, "invalid old backstep %d\n", i);
  1437. s->gb = s->in_gb;
  1438. s->in_gb.buffer = NULL;
  1439. }
  1440. align_get_bits(&s->gb);
  1441. av_assert1((get_bits_count(&s->gb) & 7) == 0);
  1442. i = get_bits_left(&s->gb) >> 3;
  1443. if (i < 0 || i > BACKSTEP_SIZE || nb_frames < 0) {
  1444. if (i < 0)
  1445. av_log(s->avctx, AV_LOG_ERROR, "invalid new backstep %d\n", i);
  1446. i = FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
  1447. }
  1448. av_assert1(i <= buf_size - HEADER_SIZE && i >= 0);
  1449. memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
  1450. s->last_buf_size += i;
  1451. }
  1452. if(nb_frames < 0)
  1453. return nb_frames;
  1454. /* get output buffer */
  1455. if (!samples) {
  1456. av_assert0(s->frame != NULL);
  1457. s->frame->nb_samples = s->avctx->frame_size;
  1458. if ((ret = ff_get_buffer(s->avctx, s->frame, 0)) < 0)
  1459. return ret;
  1460. samples = (OUT_INT **)s->frame->extended_data;
  1461. }
  1462. /* apply the synthesis filter */
  1463. for (ch = 0; ch < s->nb_channels; ch++) {
  1464. int sample_stride;
  1465. if (s->avctx->sample_fmt == OUT_FMT_P) {
  1466. samples_ptr = samples[ch];
  1467. sample_stride = 1;
  1468. } else {
  1469. samples_ptr = samples[0] + ch;
  1470. sample_stride = s->nb_channels;
  1471. }
  1472. for (i = 0; i < nb_frames; i++) {
  1473. RENAME(ff_mpa_synth_filter)(&s->mpadsp, s->synth_buf[ch],
  1474. &(s->synth_buf_offset[ch]),
  1475. RENAME(ff_mpa_synth_window),
  1476. &s->dither_state, samples_ptr,
  1477. sample_stride, s->sb_samples[ch][i]);
  1478. samples_ptr += 32 * sample_stride;
  1479. }
  1480. }
  1481. return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
  1482. }
  1483. static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
  1484. AVPacket *avpkt)
  1485. {
  1486. const uint8_t *buf = avpkt->data;
  1487. int buf_size = avpkt->size;
  1488. MPADecodeContext *s = avctx->priv_data;
  1489. uint32_t header;
  1490. int ret;
  1491. while(buf_size && !*buf){
  1492. buf++;
  1493. buf_size--;
  1494. }
  1495. if (buf_size < HEADER_SIZE)
  1496. return AVERROR_INVALIDDATA;
  1497. header = AV_RB32(buf);
  1498. if (header>>8 == AV_RB32("TAG")>>8) {
  1499. av_log(avctx, AV_LOG_DEBUG, "discarding ID3 tag\n");
  1500. return buf_size;
  1501. }
  1502. if (ff_mpa_check_header(header) < 0) {
  1503. av_log(avctx, AV_LOG_ERROR, "Header missing\n");
  1504. return AVERROR_INVALIDDATA;
  1505. }
  1506. if (avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) {
  1507. /* free format: prepare to compute frame size */
  1508. s->frame_size = -1;
  1509. return AVERROR_INVALIDDATA;
  1510. }
  1511. /* update codec info */
  1512. avctx->channels = s->nb_channels;
  1513. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1514. if (!avctx->bit_rate)
  1515. avctx->bit_rate = s->bit_rate;
  1516. if (s->frame_size <= 0 || s->frame_size > buf_size) {
  1517. av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
  1518. return AVERROR_INVALIDDATA;
  1519. } else if (s->frame_size < buf_size) {
  1520. av_log(avctx, AV_LOG_DEBUG, "incorrect frame size - multiple frames in buffer?\n");
  1521. buf_size= s->frame_size;
  1522. }
  1523. s->frame = data;
  1524. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1525. if (ret >= 0) {
  1526. s->frame->nb_samples = avctx->frame_size;
  1527. *got_frame_ptr = 1;
  1528. avctx->sample_rate = s->sample_rate;
  1529. //FIXME maybe move the other codec info stuff from above here too
  1530. } else {
  1531. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1532. /* Only return an error if the bad frame makes up the whole packet or
  1533. * the error is related to buffer management.
  1534. * If there is more data in the packet, just consume the bad frame
  1535. * instead of returning an error, which would discard the whole
  1536. * packet. */
  1537. *got_frame_ptr = 0;
  1538. if (buf_size == avpkt->size || ret != AVERROR_INVALIDDATA)
  1539. return ret;
  1540. }
  1541. s->frame_size = 0;
  1542. return buf_size;
  1543. }
  1544. static void mp_flush(MPADecodeContext *ctx)
  1545. {
  1546. memset(ctx->synth_buf, 0, sizeof(ctx->synth_buf));
  1547. ctx->last_buf_size = 0;
  1548. }
  1549. static void flush(AVCodecContext *avctx)
  1550. {
  1551. mp_flush(avctx->priv_data);
  1552. }
  1553. #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
  1554. static int decode_frame_adu(AVCodecContext *avctx, void *data,
  1555. int *got_frame_ptr, AVPacket *avpkt)
  1556. {
  1557. const uint8_t *buf = avpkt->data;
  1558. int buf_size = avpkt->size;
  1559. MPADecodeContext *s = avctx->priv_data;
  1560. uint32_t header;
  1561. int len, ret;
  1562. int av_unused out_size;
  1563. len = buf_size;
  1564. // Discard too short frames
  1565. if (buf_size < HEADER_SIZE) {
  1566. av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
  1567. return AVERROR_INVALIDDATA;
  1568. }
  1569. if (len > MPA_MAX_CODED_FRAME_SIZE)
  1570. len = MPA_MAX_CODED_FRAME_SIZE;
  1571. // Get header and restore sync word
  1572. header = AV_RB32(buf) | 0xffe00000;
  1573. if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
  1574. av_log(avctx, AV_LOG_ERROR, "Invalid frame header\n");
  1575. return AVERROR_INVALIDDATA;
  1576. }
  1577. avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header);
  1578. /* update codec info */
  1579. avctx->sample_rate = s->sample_rate;
  1580. avctx->channels = s->nb_channels;
  1581. avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  1582. if (!avctx->bit_rate)
  1583. avctx->bit_rate = s->bit_rate;
  1584. s->frame_size = len;
  1585. s->frame = data;
  1586. ret = mp_decode_frame(s, NULL, buf, buf_size);
  1587. if (ret < 0) {
  1588. av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n");
  1589. return ret;
  1590. }
  1591. *got_frame_ptr = 1;
  1592. return buf_size;
  1593. }
  1594. #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
  1595. #if CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER
  1596. /**
  1597. * Context for MP3On4 decoder
  1598. */
  1599. typedef struct MP3On4DecodeContext {
  1600. int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
  1601. int syncword; ///< syncword patch
  1602. const uint8_t *coff; ///< channel offsets in output buffer
  1603. MPADecodeContext *mp3decctx[5]; ///< MPADecodeContext for every decoder instance
  1604. } MP3On4DecodeContext;
  1605. #include "mpeg4audio.h"
  1606. /* Next 3 arrays are indexed by channel config number (passed via codecdata) */
  1607. /* number of mp3 decoder instances */
  1608. static const uint8_t mp3Frames[8] = { 0, 1, 1, 2, 3, 3, 4, 5 };
  1609. /* offsets into output buffer, assume output order is FL FR C LFE BL BR SL SR */
  1610. static const uint8_t chan_offset[8][5] = {
  1611. { 0 },
  1612. { 0 }, // C
  1613. { 0 }, // FLR
  1614. { 2, 0 }, // C FLR
  1615. { 2, 0, 3 }, // C FLR BS
  1616. { 2, 0, 3 }, // C FLR BLRS
  1617. { 2, 0, 4, 3 }, // C FLR BLRS LFE
  1618. { 2, 0, 6, 4, 3 }, // C FLR BLRS BLR LFE
  1619. };
  1620. /* mp3on4 channel layouts */
  1621. static const int16_t chan_layout[8] = {
  1622. 0,
  1623. AV_CH_LAYOUT_MONO,
  1624. AV_CH_LAYOUT_STEREO,
  1625. AV_CH_LAYOUT_SURROUND,
  1626. AV_CH_LAYOUT_4POINT0,
  1627. AV_CH_LAYOUT_5POINT0,
  1628. AV_CH_LAYOUT_5POINT1,
  1629. AV_CH_LAYOUT_7POINT1
  1630. };
  1631. static av_cold int decode_close_mp3on4(AVCodecContext * avctx)
  1632. {
  1633. MP3On4DecodeContext *s = avctx->priv_data;
  1634. int i;
  1635. for (i = 0; i < s->frames; i++)
  1636. av_free(s->mp3decctx[i]);
  1637. return 0;
  1638. }
  1639. static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
  1640. {
  1641. MP3On4DecodeContext *s = avctx->priv_data;
  1642. MPEG4AudioConfig cfg;
  1643. int i;
  1644. if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) {
  1645. av_log(avctx, AV_LOG_ERROR, "Codec extradata missing or too short.\n");
  1646. return AVERROR_INVALIDDATA;
  1647. }
  1648. avpriv_mpeg4audio_get_config(&cfg, avctx->extradata,
  1649. avctx->extradata_size * 8, 1);
  1650. if (!cfg.chan_config || cfg.chan_config > 7) {
  1651. av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n");
  1652. return AVERROR_INVALIDDATA;
  1653. }
  1654. s->frames = mp3Frames[cfg.chan_config];
  1655. s->coff = chan_offset[cfg.chan_config];
  1656. avctx->channels = ff_mpeg4audio_channels[cfg.chan_config];
  1657. avctx->channel_layout = chan_layout[cfg.chan_config];
  1658. if (cfg.sample_rate < 16000)
  1659. s->syncword = 0xffe00000;
  1660. else
  1661. s->syncword = 0xfff00000;
  1662. /* Init the first mp3 decoder in standard way, so that all tables get builded
  1663. * We replace avctx->priv_data with the context of the first decoder so that
  1664. * decode_init() does not have to be changed.
  1665. * Other decoders will be initialized here copying data from the first context
  1666. */
  1667. // Allocate zeroed memory for the first decoder context
  1668. s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext));
  1669. if (!s->mp3decctx[0])
  1670. goto alloc_fail;
  1671. // Put decoder context in place to make init_decode() happy
  1672. avctx->priv_data = s->mp3decctx[0];
  1673. decode_init(avctx);
  1674. // Restore mp3on4 context pointer
  1675. avctx->priv_data = s;
  1676. s->mp3decctx[0]->adu_mode = 1; // Set adu mode
  1677. /* Create a separate codec/context for each frame (first is already ok).
  1678. * Each frame is 1 or 2 channels - up to 5 frames allowed
  1679. */
  1680. for (i = 1; i < s->frames; i++) {
  1681. s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
  1682. if (!s->mp3decctx[i])
  1683. goto alloc_fail;
  1684. s->mp3decctx[i]->adu_mode = 1;
  1685. s->mp3decctx[i]->avctx = avctx;
  1686. s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
  1687. }
  1688. return 0;
  1689. alloc_fail:
  1690. decode_close_mp3on4(avctx);
  1691. return AVERROR(ENOMEM);
  1692. }
  1693. static void flush_mp3on4(AVCodecContext *avctx)
  1694. {
  1695. int i;
  1696. MP3On4DecodeContext *s = avctx->priv_data;
  1697. for (i = 0; i < s->frames; i++)
  1698. mp_flush(s->mp3decctx[i]);
  1699. }
  1700. static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
  1701. int *got_frame_ptr, AVPacket *avpkt)
  1702. {
  1703. AVFrame *frame = data;
  1704. const uint8_t *buf = avpkt->data;
  1705. int buf_size = avpkt->size;
  1706. MP3On4DecodeContext *s = avctx->priv_data;
  1707. MPADecodeContext *m;
  1708. int fsize, len = buf_size, out_size = 0;
  1709. uint32_t header;
  1710. OUT_INT **out_samples;
  1711. OUT_INT *outptr[2];
  1712. int fr, ch, ret;
  1713. /* get output buffer */
  1714. frame->nb_samples = MPA_FRAME_SIZE;
  1715. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  1716. return ret;
  1717. out_samples = (OUT_INT **)frame->extended_data;
  1718. // Discard too short frames
  1719. if (buf_size < HEADER_SIZE)
  1720. return AVERROR_INVALIDDATA;
  1721. avctx->bit_rate = 0;
  1722. ch = 0;
  1723. for (fr = 0; fr < s->frames; fr++) {
  1724. fsize = AV_RB16(buf) >> 4;
  1725. fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
  1726. m = s->mp3decctx[fr];
  1727. av_assert1(m);
  1728. if (fsize < HEADER_SIZE) {
  1729. av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
  1730. return AVERROR_INVALIDDATA;
  1731. }
  1732. header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
  1733. if (ff_mpa_check_header(header) < 0) {
  1734. av_log(avctx, AV_LOG_ERROR, "Bad header, discard block\n");
  1735. return AVERROR_INVALIDDATA;
  1736. }
  1737. avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
  1738. if (ch + m->nb_channels > avctx->channels ||
  1739. s->coff[fr] + m->nb_channels > avctx->channels) {
  1740. av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
  1741. "channel count\n");
  1742. return AVERROR_INVALIDDATA;
  1743. }
  1744. ch += m->nb_channels;
  1745. outptr[0] = out_samples[s->coff[fr]];
  1746. if (m->nb_channels > 1)
  1747. outptr[1] = out_samples[s->coff[fr] + 1];
  1748. if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0)
  1749. return ret;
  1750. out_size += ret;
  1751. buf += fsize;
  1752. len -= fsize;
  1753. avctx->bit_rate += m->bit_rate;
  1754. }
  1755. /* update codec info */
  1756. avctx->sample_rate = s->mp3decctx[0]->sample_rate;
  1757. frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
  1758. *got_frame_ptr = 1;
  1759. return buf_size;
  1760. }
  1761. #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */