You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2143 lines
75KB

  1. /*
  2. * AAC decoder
  3. * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
  4. * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * AAC decoder
  25. * @author Oded Shimon ( ods15 ods15 dyndns org )
  26. * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
  27. */
  28. /*
  29. * supported tools
  30. *
  31. * Support? Name
  32. * N (code in SoC repo) gain control
  33. * Y block switching
  34. * Y window shapes - standard
  35. * N window shapes - Low Delay
  36. * Y filterbank - standard
  37. * N (code in SoC repo) filterbank - Scalable Sample Rate
  38. * Y Temporal Noise Shaping
  39. * N (code in SoC repo) Long Term Prediction
  40. * Y intensity stereo
  41. * Y channel coupling
  42. * Y frequency domain prediction
  43. * Y Perceptual Noise Substitution
  44. * Y Mid/Side stereo
  45. * N Scalable Inverse AAC Quantization
  46. * N Frequency Selective Switch
  47. * N upsampling filter
  48. * Y quantization & coding - AAC
  49. * N quantization & coding - TwinVQ
  50. * N quantization & coding - BSAC
  51. * N AAC Error Resilience tools
  52. * N Error Resilience payload syntax
  53. * N Error Protection tool
  54. * N CELP
  55. * N Silence Compression
  56. * N HVXC
  57. * N HVXC 4kbits/s VR
  58. * N Structured Audio tools
  59. * N Structured Audio Sample Bank Format
  60. * N MIDI
  61. * N Harmonic and Individual Lines plus Noise
  62. * N Text-To-Speech Interface
  63. * Y Spectral Band Replication
  64. * Y (not in this code) Layer-1
  65. * Y (not in this code) Layer-2
  66. * Y (not in this code) Layer-3
  67. * N SinuSoidal Coding (Transient, Sinusoid, Noise)
  68. * Y Parametric Stereo
  69. * N Direct Stream Transfer
  70. *
  71. * Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
  72. * - HE AAC v2 comprises LC AAC with Spectral Band Replication and
  73. Parametric Stereo.
  74. */
  75. #include "avcodec.h"
  76. #include "internal.h"
  77. #include "get_bits.h"
  78. #include "dsputil.h"
  79. #include "fft.h"
  80. #include "lpc.h"
  81. #include "aac.h"
  82. #include "aactab.h"
  83. #include "aacdectab.h"
  84. #include "cbrt_tablegen.h"
  85. #include "sbr.h"
  86. #include "aacsbr.h"
  87. #include "mpeg4audio.h"
  88. #include "aac_parser.h"
  89. #include <assert.h>
  90. #include <errno.h>
  91. #include <math.h>
  92. #include <string.h>
  93. #if ARCH_ARM
  94. # include "arm/aac.h"
  95. #endif
  96. union float754 {
  97. float f;
  98. uint32_t i;
  99. };
  100. static VLC vlc_scalefactors;
  101. static VLC vlc_spectral[11];
  102. static const char overread_err[] = "Input buffer exhausted before END element found\n";
  103. static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
  104. {
  105. /* Some buggy encoders appear to set all elem_ids to zero and rely on
  106. channels always occurring in the same order. This is expressly forbidden
  107. by the spec but we will try to work around it.
  108. */
  109. int err_printed = 0;
  110. while (ac->tags_seen_this_frame[type][elem_id] && elem_id < MAX_ELEM_ID) {
  111. if (ac->output_configured < OC_LOCKED && !err_printed) {
  112. av_log(ac->avctx, AV_LOG_WARNING, "Duplicate channel tag found, attempting to remap.\n");
  113. err_printed = 1;
  114. }
  115. elem_id++;
  116. }
  117. if (elem_id == MAX_ELEM_ID)
  118. return NULL;
  119. ac->tags_seen_this_frame[type][elem_id] = 1;
  120. if (ac->tag_che_map[type][elem_id]) {
  121. return ac->tag_che_map[type][elem_id];
  122. }
  123. if (ac->tags_mapped >= tags_per_config[ac->m4ac.chan_config]) {
  124. return NULL;
  125. }
  126. switch (ac->m4ac.chan_config) {
  127. case 7:
  128. if (ac->tags_mapped == 3 && type == TYPE_CPE) {
  129. ac->tags_mapped++;
  130. return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
  131. }
  132. case 6:
  133. /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
  134. instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
  135. encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
  136. if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
  137. ac->tags_mapped++;
  138. return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
  139. }
  140. case 5:
  141. if (ac->tags_mapped == 2 && type == TYPE_CPE) {
  142. ac->tags_mapped++;
  143. return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
  144. }
  145. case 4:
  146. if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
  147. ac->tags_mapped++;
  148. return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
  149. }
  150. case 3:
  151. case 2:
  152. if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
  153. ac->tags_mapped++;
  154. return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
  155. } else if (ac->m4ac.chan_config == 2) {
  156. return NULL;
  157. }
  158. case 1:
  159. if (!ac->tags_mapped && type == TYPE_SCE) {
  160. ac->tags_mapped++;
  161. return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
  162. }
  163. default:
  164. return NULL;
  165. }
  166. }
  167. /**
  168. * Check for the channel element in the current channel position configuration.
  169. * If it exists, make sure the appropriate element is allocated and map the
  170. * channel order to match the internal FFmpeg channel layout.
  171. *
  172. * @param che_pos current channel position configuration
  173. * @param type channel element type
  174. * @param id channel element id
  175. * @param channels count of the number of channels in the configuration
  176. *
  177. * @return Returns error status. 0 - OK, !0 - error
  178. */
  179. static av_cold int che_configure(AACContext *ac,
  180. enum ChannelPosition che_pos[4][MAX_ELEM_ID],
  181. int type, int id,
  182. int *channels)
  183. {
  184. if (che_pos[type][id]) {
  185. if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
  186. return AVERROR(ENOMEM);
  187. ff_aac_sbr_ctx_init(&ac->che[type][id]->sbr);
  188. if (type != TYPE_CCE) {
  189. ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
  190. if (type == TYPE_CPE ||
  191. (type == TYPE_SCE && ac->m4ac.ps == 1)) {
  192. ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
  193. }
  194. }
  195. } else {
  196. if (ac->che[type][id])
  197. ff_aac_sbr_ctx_close(&ac->che[type][id]->sbr);
  198. av_freep(&ac->che[type][id]);
  199. }
  200. return 0;
  201. }
  202. /**
  203. * Configure output channel order based on the current program configuration element.
  204. *
  205. * @param che_pos current channel position configuration
  206. * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
  207. *
  208. * @return Returns error status. 0 - OK, !0 - error
  209. */
  210. static av_cold int output_configure(AACContext *ac,
  211. enum ChannelPosition che_pos[4][MAX_ELEM_ID],
  212. enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
  213. int channel_config, enum OCStatus oc_type)
  214. {
  215. AVCodecContext *avctx = ac->avctx;
  216. int i, type, channels = 0, ret;
  217. if (new_che_pos != che_pos)
  218. memcpy(che_pos, new_che_pos, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
  219. if (channel_config) {
  220. for (i = 0; i < tags_per_config[channel_config]; i++) {
  221. if ((ret = che_configure(ac, che_pos,
  222. aac_channel_layout_map[channel_config - 1][i][0],
  223. aac_channel_layout_map[channel_config - 1][i][1],
  224. &channels)))
  225. return ret;
  226. }
  227. memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
  228. ac->tags_mapped = 0;
  229. avctx->channel_layout = aac_channel_layout[channel_config - 1];
  230. } else {
  231. /* Allocate or free elements depending on if they are in the
  232. * current program configuration.
  233. *
  234. * Set up default 1:1 output mapping.
  235. *
  236. * For a 5.1 stream the output order will be:
  237. * [ Center ] [ Front Left ] [ Front Right ] [ LFE ] [ Surround Left ] [ Surround Right ]
  238. */
  239. for (i = 0; i < MAX_ELEM_ID; i++) {
  240. for (type = 0; type < 4; type++) {
  241. if ((ret = che_configure(ac, che_pos, type, i, &channels)))
  242. return ret;
  243. }
  244. }
  245. memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
  246. ac->tags_mapped = 4 * MAX_ELEM_ID;
  247. avctx->channel_layout = 0;
  248. }
  249. avctx->channels = channels;
  250. ac->output_configured = oc_type;
  251. return 0;
  252. }
  253. /**
  254. * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
  255. *
  256. * @param cpe_map Stereo (Channel Pair Element) map, NULL if stereo bit is not present.
  257. * @param sce_map mono (Single Channel Element) map
  258. * @param type speaker type/position for these channels
  259. */
  260. static void decode_channel_map(enum ChannelPosition *cpe_map,
  261. enum ChannelPosition *sce_map,
  262. enum ChannelPosition type,
  263. GetBitContext *gb, int n)
  264. {
  265. while (n--) {
  266. enum ChannelPosition *map = cpe_map && get_bits1(gb) ? cpe_map : sce_map; // stereo or mono map
  267. map[get_bits(gb, 4)] = type;
  268. }
  269. }
  270. /**
  271. * Decode program configuration element; reference: table 4.2.
  272. *
  273. * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
  274. *
  275. * @return Returns error status. 0 - OK, !0 - error
  276. */
  277. static int decode_pce(AACContext *ac, enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
  278. GetBitContext *gb)
  279. {
  280. int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
  281. int comment_len;
  282. skip_bits(gb, 2); // object_type
  283. sampling_index = get_bits(gb, 4);
  284. if (ac->m4ac.sampling_index != sampling_index)
  285. av_log(ac->avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
  286. num_front = get_bits(gb, 4);
  287. num_side = get_bits(gb, 4);
  288. num_back = get_bits(gb, 4);
  289. num_lfe = get_bits(gb, 2);
  290. num_assoc_data = get_bits(gb, 3);
  291. num_cc = get_bits(gb, 4);
  292. if (get_bits1(gb))
  293. skip_bits(gb, 4); // mono_mixdown_tag
  294. if (get_bits1(gb))
  295. skip_bits(gb, 4); // stereo_mixdown_tag
  296. if (get_bits1(gb))
  297. skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
  298. decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front);
  299. decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side );
  300. decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back );
  301. decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe );
  302. skip_bits_long(gb, 4 * num_assoc_data);
  303. decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc );
  304. align_get_bits(gb);
  305. /* comment field, first byte is length */
  306. comment_len = get_bits(gb, 8) * 8;
  307. if (get_bits_left(gb) < comment_len) {
  308. av_log(ac->avctx, AV_LOG_ERROR, overread_err);
  309. return -1;
  310. }
  311. skip_bits_long(gb, comment_len);
  312. return 0;
  313. }
  314. /**
  315. * Set up channel positions based on a default channel configuration
  316. * as specified in table 1.17.
  317. *
  318. * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
  319. *
  320. * @return Returns error status. 0 - OK, !0 - error
  321. */
  322. static av_cold int set_default_channel_config(AACContext *ac,
  323. enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
  324. int channel_config)
  325. {
  326. if (channel_config < 1 || channel_config > 7) {
  327. av_log(ac->avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
  328. channel_config);
  329. return -1;
  330. }
  331. /* default channel configurations:
  332. *
  333. * 1ch : front center (mono)
  334. * 2ch : L + R (stereo)
  335. * 3ch : front center + L + R
  336. * 4ch : front center + L + R + back center
  337. * 5ch : front center + L + R + back stereo
  338. * 6ch : front center + L + R + back stereo + LFE
  339. * 7ch : front center + L + R + outer front left + outer front right + back stereo + LFE
  340. */
  341. if (channel_config != 2)
  342. new_che_pos[TYPE_SCE][0] = AAC_CHANNEL_FRONT; // front center (or mono)
  343. if (channel_config > 1)
  344. new_che_pos[TYPE_CPE][0] = AAC_CHANNEL_FRONT; // L + R (or stereo)
  345. if (channel_config == 4)
  346. new_che_pos[TYPE_SCE][1] = AAC_CHANNEL_BACK; // back center
  347. if (channel_config > 4)
  348. new_che_pos[TYPE_CPE][(channel_config == 7) + 1]
  349. = AAC_CHANNEL_BACK; // back stereo
  350. if (channel_config > 5)
  351. new_che_pos[TYPE_LFE][0] = AAC_CHANNEL_LFE; // LFE
  352. if (channel_config == 7)
  353. new_che_pos[TYPE_CPE][1] = AAC_CHANNEL_FRONT; // outer front left + outer front right
  354. return 0;
  355. }
  356. /**
  357. * Decode GA "General Audio" specific configuration; reference: table 4.1.
  358. *
  359. * @return Returns error status. 0 - OK, !0 - error
  360. */
  361. static int decode_ga_specific_config(AACContext *ac, GetBitContext *gb,
  362. int channel_config)
  363. {
  364. enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
  365. int extension_flag, ret;
  366. if (get_bits1(gb)) { // frameLengthFlag
  367. av_log_missing_feature(ac->avctx, "960/120 MDCT window is", 1);
  368. return -1;
  369. }
  370. if (get_bits1(gb)) // dependsOnCoreCoder
  371. skip_bits(gb, 14); // coreCoderDelay
  372. extension_flag = get_bits1(gb);
  373. if (ac->m4ac.object_type == AOT_AAC_SCALABLE ||
  374. ac->m4ac.object_type == AOT_ER_AAC_SCALABLE)
  375. skip_bits(gb, 3); // layerNr
  376. memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
  377. if (channel_config == 0) {
  378. skip_bits(gb, 4); // element_instance_tag
  379. if ((ret = decode_pce(ac, new_che_pos, gb)))
  380. return ret;
  381. } else {
  382. if ((ret = set_default_channel_config(ac, new_che_pos, channel_config)))
  383. return ret;
  384. }
  385. if ((ret = output_configure(ac, ac->che_pos, new_che_pos, channel_config, OC_GLOBAL_HDR)))
  386. return ret;
  387. if (extension_flag) {
  388. switch (ac->m4ac.object_type) {
  389. case AOT_ER_BSAC:
  390. skip_bits(gb, 5); // numOfSubFrame
  391. skip_bits(gb, 11); // layer_length
  392. break;
  393. case AOT_ER_AAC_LC:
  394. case AOT_ER_AAC_LTP:
  395. case AOT_ER_AAC_SCALABLE:
  396. case AOT_ER_AAC_LD:
  397. skip_bits(gb, 3); /* aacSectionDataResilienceFlag
  398. * aacScalefactorDataResilienceFlag
  399. * aacSpectralDataResilienceFlag
  400. */
  401. break;
  402. }
  403. skip_bits1(gb); // extensionFlag3 (TBD in version 3)
  404. }
  405. return 0;
  406. }
  407. /**
  408. * Decode audio specific configuration; reference: table 1.13.
  409. *
  410. * @param data pointer to AVCodecContext extradata
  411. * @param data_size size of AVCCodecContext extradata
  412. *
  413. * @return Returns error status. 0 - OK, !0 - error
  414. */
  415. static int decode_audio_specific_config(AACContext *ac, void *data,
  416. int data_size)
  417. {
  418. GetBitContext gb;
  419. int i;
  420. init_get_bits(&gb, data, data_size * 8);
  421. if ((i = ff_mpeg4audio_get_config(&ac->m4ac, data, data_size)) < 0)
  422. return -1;
  423. if (ac->m4ac.sampling_index > 12) {
  424. av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index);
  425. return -1;
  426. }
  427. if (ac->m4ac.sbr == 1 && ac->m4ac.ps == -1)
  428. ac->m4ac.ps = 1;
  429. skip_bits_long(&gb, i);
  430. switch (ac->m4ac.object_type) {
  431. case AOT_AAC_MAIN:
  432. case AOT_AAC_LC:
  433. if (decode_ga_specific_config(ac, &gb, ac->m4ac.chan_config))
  434. return -1;
  435. break;
  436. default:
  437. av_log(ac->avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
  438. ac->m4ac.sbr == 1? "SBR+" : "", ac->m4ac.object_type);
  439. return -1;
  440. }
  441. return 0;
  442. }
  443. /**
  444. * linear congruential pseudorandom number generator
  445. *
  446. * @param previous_val pointer to the current state of the generator
  447. *
  448. * @return Returns a 32-bit pseudorandom integer
  449. */
  450. static av_always_inline int lcg_random(int previous_val)
  451. {
  452. return previous_val * 1664525 + 1013904223;
  453. }
  454. static av_always_inline void reset_predict_state(PredictorState *ps)
  455. {
  456. ps->r0 = 0.0f;
  457. ps->r1 = 0.0f;
  458. ps->cor0 = 0.0f;
  459. ps->cor1 = 0.0f;
  460. ps->var0 = 1.0f;
  461. ps->var1 = 1.0f;
  462. }
  463. static void reset_all_predictors(PredictorState *ps)
  464. {
  465. int i;
  466. for (i = 0; i < MAX_PREDICTORS; i++)
  467. reset_predict_state(&ps[i]);
  468. }
  469. static void reset_predictor_group(PredictorState *ps, int group_num)
  470. {
  471. int i;
  472. for (i = group_num - 1; i < MAX_PREDICTORS; i += 30)
  473. reset_predict_state(&ps[i]);
  474. }
  475. #define AAC_INIT_VLC_STATIC(num, size) \
  476. INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
  477. ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
  478. ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
  479. size);
  480. static av_cold int aac_decode_init(AVCodecContext *avctx)
  481. {
  482. AACContext *ac = avctx->priv_data;
  483. ac->avctx = avctx;
  484. ac->m4ac.sample_rate = avctx->sample_rate;
  485. if (avctx->extradata_size > 0) {
  486. if (decode_audio_specific_config(ac, avctx->extradata, avctx->extradata_size))
  487. return -1;
  488. }
  489. avctx->sample_fmt = SAMPLE_FMT_S16;
  490. AAC_INIT_VLC_STATIC( 0, 304);
  491. AAC_INIT_VLC_STATIC( 1, 270);
  492. AAC_INIT_VLC_STATIC( 2, 550);
  493. AAC_INIT_VLC_STATIC( 3, 300);
  494. AAC_INIT_VLC_STATIC( 4, 328);
  495. AAC_INIT_VLC_STATIC( 5, 294);
  496. AAC_INIT_VLC_STATIC( 6, 306);
  497. AAC_INIT_VLC_STATIC( 7, 268);
  498. AAC_INIT_VLC_STATIC( 8, 510);
  499. AAC_INIT_VLC_STATIC( 9, 366);
  500. AAC_INIT_VLC_STATIC(10, 462);
  501. ff_aac_sbr_init();
  502. dsputil_init(&ac->dsp, avctx);
  503. ac->random_state = 0x1f2e3d4c;
  504. // -1024 - Compensate wrong IMDCT method.
  505. // 32768 - Required to scale values to the correct range for the bias method
  506. // for float to int16 conversion.
  507. if (ac->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) {
  508. ac->add_bias = 385.0f;
  509. ac->sf_scale = 1. / (-1024. * 32768.);
  510. ac->sf_offset = 0;
  511. } else {
  512. ac->add_bias = 0.0f;
  513. ac->sf_scale = 1. / -1024.;
  514. ac->sf_offset = 60;
  515. }
  516. ff_aac_tableinit();
  517. INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
  518. ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
  519. ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
  520. 352);
  521. ff_mdct_init(&ac->mdct, 11, 1, 1.0);
  522. ff_mdct_init(&ac->mdct_small, 8, 1, 1.0);
  523. // window initialization
  524. ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
  525. ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
  526. ff_init_ff_sine_windows(10);
  527. ff_init_ff_sine_windows( 7);
  528. cbrt_tableinit();
  529. return 0;
  530. }
  531. /**
  532. * Skip data_stream_element; reference: table 4.10.
  533. */
  534. static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
  535. {
  536. int byte_align = get_bits1(gb);
  537. int count = get_bits(gb, 8);
  538. if (count == 255)
  539. count += get_bits(gb, 8);
  540. if (byte_align)
  541. align_get_bits(gb);
  542. if (get_bits_left(gb) < 8 * count) {
  543. av_log(ac->avctx, AV_LOG_ERROR, overread_err);
  544. return -1;
  545. }
  546. skip_bits_long(gb, 8 * count);
  547. return 0;
  548. }
  549. static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
  550. GetBitContext *gb)
  551. {
  552. int sfb;
  553. if (get_bits1(gb)) {
  554. ics->predictor_reset_group = get_bits(gb, 5);
  555. if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
  556. av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
  557. return -1;
  558. }
  559. }
  560. for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->m4ac.sampling_index]); sfb++) {
  561. ics->prediction_used[sfb] = get_bits1(gb);
  562. }
  563. return 0;
  564. }
  565. /**
  566. * Decode Individual Channel Stream info; reference: table 4.6.
  567. *
  568. * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
  569. */
  570. static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
  571. GetBitContext *gb, int common_window)
  572. {
  573. if (get_bits1(gb)) {
  574. av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
  575. memset(ics, 0, sizeof(IndividualChannelStream));
  576. return -1;
  577. }
  578. ics->window_sequence[1] = ics->window_sequence[0];
  579. ics->window_sequence[0] = get_bits(gb, 2);
  580. ics->use_kb_window[1] = ics->use_kb_window[0];
  581. ics->use_kb_window[0] = get_bits1(gb);
  582. ics->num_window_groups = 1;
  583. ics->group_len[0] = 1;
  584. if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
  585. int i;
  586. ics->max_sfb = get_bits(gb, 4);
  587. for (i = 0; i < 7; i++) {
  588. if (get_bits1(gb)) {
  589. ics->group_len[ics->num_window_groups - 1]++;
  590. } else {
  591. ics->num_window_groups++;
  592. ics->group_len[ics->num_window_groups - 1] = 1;
  593. }
  594. }
  595. ics->num_windows = 8;
  596. ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index];
  597. ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index];
  598. ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index];
  599. ics->predictor_present = 0;
  600. } else {
  601. ics->max_sfb = get_bits(gb, 6);
  602. ics->num_windows = 1;
  603. ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index];
  604. ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index];
  605. ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index];
  606. ics->predictor_present = get_bits1(gb);
  607. ics->predictor_reset_group = 0;
  608. if (ics->predictor_present) {
  609. if (ac->m4ac.object_type == AOT_AAC_MAIN) {
  610. if (decode_prediction(ac, ics, gb)) {
  611. memset(ics, 0, sizeof(IndividualChannelStream));
  612. return -1;
  613. }
  614. } else if (ac->m4ac.object_type == AOT_AAC_LC) {
  615. av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
  616. memset(ics, 0, sizeof(IndividualChannelStream));
  617. return -1;
  618. } else {
  619. av_log_missing_feature(ac->avctx, "Predictor bit set but LTP is", 1);
  620. memset(ics, 0, sizeof(IndividualChannelStream));
  621. return -1;
  622. }
  623. }
  624. }
  625. if (ics->max_sfb > ics->num_swb) {
  626. av_log(ac->avctx, AV_LOG_ERROR,
  627. "Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
  628. ics->max_sfb, ics->num_swb);
  629. memset(ics, 0, sizeof(IndividualChannelStream));
  630. return -1;
  631. }
  632. return 0;
  633. }
  634. /**
  635. * Decode band types (section_data payload); reference: table 4.46.
  636. *
  637. * @param band_type array of the used band type
  638. * @param band_type_run_end array of the last scalefactor band of a band type run
  639. *
  640. * @return Returns error status. 0 - OK, !0 - error
  641. */
  642. static int decode_band_types(AACContext *ac, enum BandType band_type[120],
  643. int band_type_run_end[120], GetBitContext *gb,
  644. IndividualChannelStream *ics)
  645. {
  646. int g, idx = 0;
  647. const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5;
  648. for (g = 0; g < ics->num_window_groups; g++) {
  649. int k = 0;
  650. while (k < ics->max_sfb) {
  651. uint8_t sect_end = k;
  652. int sect_len_incr;
  653. int sect_band_type = get_bits(gb, 4);
  654. if (sect_band_type == 12) {
  655. av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
  656. return -1;
  657. }
  658. while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1)
  659. sect_end += sect_len_incr;
  660. sect_end += sect_len_incr;
  661. if (get_bits_left(gb) < 0) {
  662. av_log(ac->avctx, AV_LOG_ERROR, overread_err);
  663. return -1;
  664. }
  665. if (sect_end > ics->max_sfb) {
  666. av_log(ac->avctx, AV_LOG_ERROR,
  667. "Number of bands (%d) exceeds limit (%d).\n",
  668. sect_end, ics->max_sfb);
  669. return -1;
  670. }
  671. for (; k < sect_end; k++) {
  672. band_type [idx] = sect_band_type;
  673. band_type_run_end[idx++] = sect_end;
  674. }
  675. }
  676. }
  677. return 0;
  678. }
  679. /**
  680. * Decode scalefactors; reference: table 4.47.
  681. *
  682. * @param global_gain first scalefactor value as scalefactors are differentially coded
  683. * @param band_type array of the used band type
  684. * @param band_type_run_end array of the last scalefactor band of a band type run
  685. * @param sf array of scalefactors or intensity stereo positions
  686. *
  687. * @return Returns error status. 0 - OK, !0 - error
  688. */
  689. static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
  690. unsigned int global_gain,
  691. IndividualChannelStream *ics,
  692. enum BandType band_type[120],
  693. int band_type_run_end[120])
  694. {
  695. const int sf_offset = ac->sf_offset + (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE ? 12 : 0);
  696. int g, i, idx = 0;
  697. int offset[3] = { global_gain, global_gain - 90, 100 };
  698. int noise_flag = 1;
  699. static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" };
  700. for (g = 0; g < ics->num_window_groups; g++) {
  701. for (i = 0; i < ics->max_sfb;) {
  702. int run_end = band_type_run_end[idx];
  703. if (band_type[idx] == ZERO_BT) {
  704. for (; i < run_end; i++, idx++)
  705. sf[idx] = 0.;
  706. } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
  707. for (; i < run_end; i++, idx++) {
  708. offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
  709. if (offset[2] > 255U) {
  710. av_log(ac->avctx, AV_LOG_ERROR,
  711. "%s (%d) out of range.\n", sf_str[2], offset[2]);
  712. return -1;
  713. }
  714. sf[idx] = ff_aac_pow2sf_tab[-offset[2] + 300];
  715. }
  716. } else if (band_type[idx] == NOISE_BT) {
  717. for (; i < run_end; i++, idx++) {
  718. if (noise_flag-- > 0)
  719. offset[1] += get_bits(gb, 9) - 256;
  720. else
  721. offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
  722. if (offset[1] > 255U) {
  723. av_log(ac->avctx, AV_LOG_ERROR,
  724. "%s (%d) out of range.\n", sf_str[1], offset[1]);
  725. return -1;
  726. }
  727. sf[idx] = -ff_aac_pow2sf_tab[offset[1] + sf_offset + 100];
  728. }
  729. } else {
  730. for (; i < run_end; i++, idx++) {
  731. offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
  732. if (offset[0] > 255U) {
  733. av_log(ac->avctx, AV_LOG_ERROR,
  734. "%s (%d) out of range.\n", sf_str[0], offset[0]);
  735. return -1;
  736. }
  737. sf[idx] = -ff_aac_pow2sf_tab[ offset[0] + sf_offset];
  738. }
  739. }
  740. }
  741. }
  742. return 0;
  743. }
  744. /**
  745. * Decode pulse data; reference: table 4.7.
  746. */
  747. static int decode_pulses(Pulse *pulse, GetBitContext *gb,
  748. const uint16_t *swb_offset, int num_swb)
  749. {
  750. int i, pulse_swb;
  751. pulse->num_pulse = get_bits(gb, 2) + 1;
  752. pulse_swb = get_bits(gb, 6);
  753. if (pulse_swb >= num_swb)
  754. return -1;
  755. pulse->pos[0] = swb_offset[pulse_swb];
  756. pulse->pos[0] += get_bits(gb, 5);
  757. if (pulse->pos[0] > 1023)
  758. return -1;
  759. pulse->amp[0] = get_bits(gb, 4);
  760. for (i = 1; i < pulse->num_pulse; i++) {
  761. pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
  762. if (pulse->pos[i] > 1023)
  763. return -1;
  764. pulse->amp[i] = get_bits(gb, 4);
  765. }
  766. return 0;
  767. }
  768. /**
  769. * Decode Temporal Noise Shaping data; reference: table 4.48.
  770. *
  771. * @return Returns error status. 0 - OK, !0 - error
  772. */
  773. static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
  774. GetBitContext *gb, const IndividualChannelStream *ics)
  775. {
  776. int w, filt, i, coef_len, coef_res, coef_compress;
  777. const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
  778. const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
  779. for (w = 0; w < ics->num_windows; w++) {
  780. if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
  781. coef_res = get_bits1(gb);
  782. for (filt = 0; filt < tns->n_filt[w]; filt++) {
  783. int tmp2_idx;
  784. tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
  785. if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
  786. av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
  787. tns->order[w][filt], tns_max_order);
  788. tns->order[w][filt] = 0;
  789. return -1;
  790. }
  791. if (tns->order[w][filt]) {
  792. tns->direction[w][filt] = get_bits1(gb);
  793. coef_compress = get_bits1(gb);
  794. coef_len = coef_res + 3 - coef_compress;
  795. tmp2_idx = 2 * coef_compress + coef_res;
  796. for (i = 0; i < tns->order[w][filt]; i++)
  797. tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)];
  798. }
  799. }
  800. }
  801. }
  802. return 0;
  803. }
  804. /**
  805. * Decode Mid/Side data; reference: table 4.54.
  806. *
  807. * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
  808. * [1] mask is decoded from bitstream; [2] mask is all 1s;
  809. * [3] reserved for scalable AAC
  810. */
  811. static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
  812. int ms_present)
  813. {
  814. int idx;
  815. if (ms_present == 1) {
  816. for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
  817. cpe->ms_mask[idx] = get_bits1(gb);
  818. } else if (ms_present == 2) {
  819. memset(cpe->ms_mask, 1, cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb * sizeof(cpe->ms_mask[0]));
  820. }
  821. }
  822. #ifndef VMUL2
  823. static inline float *VMUL2(float *dst, const float *v, unsigned idx,
  824. const float *scale)
  825. {
  826. float s = *scale;
  827. *dst++ = v[idx & 15] * s;
  828. *dst++ = v[idx>>4 & 15] * s;
  829. return dst;
  830. }
  831. #endif
  832. #ifndef VMUL4
  833. static inline float *VMUL4(float *dst, const float *v, unsigned idx,
  834. const float *scale)
  835. {
  836. float s = *scale;
  837. *dst++ = v[idx & 3] * s;
  838. *dst++ = v[idx>>2 & 3] * s;
  839. *dst++ = v[idx>>4 & 3] * s;
  840. *dst++ = v[idx>>6 & 3] * s;
  841. return dst;
  842. }
  843. #endif
  844. #ifndef VMUL2S
  845. static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
  846. unsigned sign, const float *scale)
  847. {
  848. union float754 s0, s1;
  849. s0.f = s1.f = *scale;
  850. s0.i ^= sign >> 1 << 31;
  851. s1.i ^= sign << 31;
  852. *dst++ = v[idx & 15] * s0.f;
  853. *dst++ = v[idx>>4 & 15] * s1.f;
  854. return dst;
  855. }
  856. #endif
  857. #ifndef VMUL4S
  858. static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
  859. unsigned sign, const float *scale)
  860. {
  861. unsigned nz = idx >> 12;
  862. union float754 s = { .f = *scale };
  863. union float754 t;
  864. t.i = s.i ^ (sign & 1<<31);
  865. *dst++ = v[idx & 3] * t.f;
  866. sign <<= nz & 1; nz >>= 1;
  867. t.i = s.i ^ (sign & 1<<31);
  868. *dst++ = v[idx>>2 & 3] * t.f;
  869. sign <<= nz & 1; nz >>= 1;
  870. t.i = s.i ^ (sign & 1<<31);
  871. *dst++ = v[idx>>4 & 3] * t.f;
  872. sign <<= nz & 1; nz >>= 1;
  873. t.i = s.i ^ (sign & 1<<31);
  874. *dst++ = v[idx>>6 & 3] * t.f;
  875. return dst;
  876. }
  877. #endif
  878. /**
  879. * Decode spectral data; reference: table 4.50.
  880. * Dequantize and scale spectral data; reference: 4.6.3.3.
  881. *
  882. * @param coef array of dequantized, scaled spectral data
  883. * @param sf array of scalefactors or intensity stereo positions
  884. * @param pulse_present set if pulses are present
  885. * @param pulse pointer to pulse data struct
  886. * @param band_type array of the used band type
  887. *
  888. * @return Returns error status. 0 - OK, !0 - error
  889. */
  890. static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
  891. GetBitContext *gb, const float sf[120],
  892. int pulse_present, const Pulse *pulse,
  893. const IndividualChannelStream *ics,
  894. enum BandType band_type[120])
  895. {
  896. int i, k, g, idx = 0;
  897. const int c = 1024 / ics->num_windows;
  898. const uint16_t *offsets = ics->swb_offset;
  899. float *coef_base = coef;
  900. int err_idx;
  901. for (g = 0; g < ics->num_windows; g++)
  902. memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));
  903. for (g = 0; g < ics->num_window_groups; g++) {
  904. unsigned g_len = ics->group_len[g];
  905. for (i = 0; i < ics->max_sfb; i++, idx++) {
  906. const unsigned cbt_m1 = band_type[idx] - 1;
  907. float *cfo = coef + offsets[i];
  908. int off_len = offsets[i + 1] - offsets[i];
  909. int group;
  910. if (cbt_m1 >= INTENSITY_BT2 - 1) {
  911. for (group = 0; group < g_len; group++, cfo+=128) {
  912. memset(cfo, 0, off_len * sizeof(float));
  913. }
  914. } else if (cbt_m1 == NOISE_BT - 1) {
  915. for (group = 0; group < g_len; group++, cfo+=128) {
  916. float scale;
  917. float band_energy;
  918. for (k = 0; k < off_len; k++) {
  919. ac->random_state = lcg_random(ac->random_state);
  920. cfo[k] = ac->random_state;
  921. }
  922. band_energy = ac->dsp.scalarproduct_float(cfo, cfo, off_len);
  923. scale = sf[idx] / sqrtf(band_energy);
  924. ac->dsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
  925. }
  926. } else {
  927. const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
  928. const uint16_t *cb_vector_idx = ff_aac_codebook_vector_idx[cbt_m1];
  929. VLC_TYPE (*vlc_tab)[2] = vlc_spectral[cbt_m1].table;
  930. const int cb_size = ff_aac_spectral_sizes[cbt_m1];
  931. OPEN_READER(re, gb);
  932. switch (cbt_m1 >> 1) {
  933. case 0:
  934. for (group = 0; group < g_len; group++, cfo+=128) {
  935. float *cf = cfo;
  936. int len = off_len;
  937. do {
  938. int code;
  939. unsigned cb_idx;
  940. UPDATE_CACHE(re, gb);
  941. GET_VLC(code, re, gb, vlc_tab, 8, 2);
  942. if (code >= cb_size) {
  943. err_idx = code;
  944. goto err_cb_overflow;
  945. }
  946. cb_idx = cb_vector_idx[code];
  947. cf = VMUL4(cf, vq, cb_idx, sf + idx);
  948. } while (len -= 4);
  949. }
  950. break;
  951. case 1:
  952. for (group = 0; group < g_len; group++, cfo+=128) {
  953. float *cf = cfo;
  954. int len = off_len;
  955. do {
  956. int code;
  957. unsigned nnz;
  958. unsigned cb_idx;
  959. uint32_t bits;
  960. UPDATE_CACHE(re, gb);
  961. GET_VLC(code, re, gb, vlc_tab, 8, 2);
  962. if (code >= cb_size) {
  963. err_idx = code;
  964. goto err_cb_overflow;
  965. }
  966. #if MIN_CACHE_BITS < 20
  967. UPDATE_CACHE(re, gb);
  968. #endif
  969. cb_idx = cb_vector_idx[code];
  970. nnz = cb_idx >> 8 & 15;
  971. bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
  972. LAST_SKIP_BITS(re, gb, nnz);
  973. cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
  974. } while (len -= 4);
  975. }
  976. break;
  977. case 2:
  978. for (group = 0; group < g_len; group++, cfo+=128) {
  979. float *cf = cfo;
  980. int len = off_len;
  981. do {
  982. int code;
  983. unsigned cb_idx;
  984. UPDATE_CACHE(re, gb);
  985. GET_VLC(code, re, gb, vlc_tab, 8, 2);
  986. if (code >= cb_size) {
  987. err_idx = code;
  988. goto err_cb_overflow;
  989. }
  990. cb_idx = cb_vector_idx[code];
  991. cf = VMUL2(cf, vq, cb_idx, sf + idx);
  992. } while (len -= 2);
  993. }
  994. break;
  995. case 3:
  996. case 4:
  997. for (group = 0; group < g_len; group++, cfo+=128) {
  998. float *cf = cfo;
  999. int len = off_len;
  1000. do {
  1001. int code;
  1002. unsigned nnz;
  1003. unsigned cb_idx;
  1004. unsigned sign;
  1005. UPDATE_CACHE(re, gb);
  1006. GET_VLC(code, re, gb, vlc_tab, 8, 2);
  1007. if (code >= cb_size) {
  1008. err_idx = code;
  1009. goto err_cb_overflow;
  1010. }
  1011. cb_idx = cb_vector_idx[code];
  1012. nnz = cb_idx >> 8 & 15;
  1013. sign = SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12);
  1014. LAST_SKIP_BITS(re, gb, nnz);
  1015. cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
  1016. } while (len -= 2);
  1017. }
  1018. break;
  1019. default:
  1020. for (group = 0; group < g_len; group++, cfo+=128) {
  1021. float *cf = cfo;
  1022. uint32_t *icf = (uint32_t *) cf;
  1023. int len = off_len;
  1024. do {
  1025. int code;
  1026. unsigned nzt, nnz;
  1027. unsigned cb_idx;
  1028. uint32_t bits;
  1029. int j;
  1030. UPDATE_CACHE(re, gb);
  1031. GET_VLC(code, re, gb, vlc_tab, 8, 2);
  1032. if (!code) {
  1033. *icf++ = 0;
  1034. *icf++ = 0;
  1035. continue;
  1036. }
  1037. if (code >= cb_size) {
  1038. err_idx = code;
  1039. goto err_cb_overflow;
  1040. }
  1041. cb_idx = cb_vector_idx[code];
  1042. nnz = cb_idx >> 12;
  1043. nzt = cb_idx >> 8;
  1044. bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
  1045. LAST_SKIP_BITS(re, gb, nnz);
  1046. for (j = 0; j < 2; j++) {
  1047. if (nzt & 1<<j) {
  1048. uint32_t b;
  1049. int n;
  1050. /* The total length of escape_sequence must be < 22 bits according
  1051. to the specification (i.e. max is 111111110xxxxxxxxxxxx). */
  1052. UPDATE_CACHE(re, gb);
  1053. b = GET_CACHE(re, gb);
  1054. b = 31 - av_log2(~b);
  1055. if (b > 8) {
  1056. av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
  1057. return -1;
  1058. }
  1059. #if MIN_CACHE_BITS < 21
  1060. LAST_SKIP_BITS(re, gb, b + 1);
  1061. UPDATE_CACHE(re, gb);
  1062. #else
  1063. SKIP_BITS(re, gb, b + 1);
  1064. #endif
  1065. b += 4;
  1066. n = (1 << b) + SHOW_UBITS(re, gb, b);
  1067. LAST_SKIP_BITS(re, gb, b);
  1068. *icf++ = cbrt_tab[n] | (bits & 1<<31);
  1069. bits <<= 1;
  1070. } else {
  1071. unsigned v = ((const uint32_t*)vq)[cb_idx & 15];
  1072. *icf++ = (bits & 1<<31) | v;
  1073. bits <<= !!v;
  1074. }
  1075. cb_idx >>= 4;
  1076. }
  1077. } while (len -= 2);
  1078. ac->dsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
  1079. }
  1080. }
  1081. CLOSE_READER(re, gb);
  1082. }
  1083. }
  1084. coef += g_len << 7;
  1085. }
  1086. if (pulse_present) {
  1087. idx = 0;
  1088. for (i = 0; i < pulse->num_pulse; i++) {
  1089. float co = coef_base[ pulse->pos[i] ];
  1090. while (offsets[idx + 1] <= pulse->pos[i])
  1091. idx++;
  1092. if (band_type[idx] != NOISE_BT && sf[idx]) {
  1093. float ico = -pulse->amp[i];
  1094. if (co) {
  1095. co /= sf[idx];
  1096. ico = co / sqrtf(sqrtf(fabsf(co))) + (co > 0 ? -ico : ico);
  1097. }
  1098. coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico * sf[idx];
  1099. }
  1100. }
  1101. }
  1102. return 0;
  1103. err_cb_overflow:
  1104. av_log(ac->avctx, AV_LOG_ERROR,
  1105. "Read beyond end of ff_aac_codebook_vectors[%d][]. index %d >= %d\n",
  1106. band_type[idx], err_idx, ff_aac_spectral_sizes[band_type[idx]]);
  1107. return -1;
  1108. }
  1109. static av_always_inline float flt16_round(float pf)
  1110. {
  1111. union float754 tmp;
  1112. tmp.f = pf;
  1113. tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U;
  1114. return tmp.f;
  1115. }
  1116. static av_always_inline float flt16_even(float pf)
  1117. {
  1118. union float754 tmp;
  1119. tmp.f = pf;
  1120. tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U;
  1121. return tmp.f;
  1122. }
  1123. static av_always_inline float flt16_trunc(float pf)
  1124. {
  1125. union float754 pun;
  1126. pun.f = pf;
  1127. pun.i &= 0xFFFF0000U;
  1128. return pun.f;
  1129. }
  1130. static av_always_inline void predict(AACContext *ac, PredictorState *ps, float *coef,
  1131. int output_enable)
  1132. {
  1133. const float a = 0.953125; // 61.0 / 64
  1134. const float alpha = 0.90625; // 29.0 / 32
  1135. float e0, e1;
  1136. float pv;
  1137. float k1, k2;
  1138. k1 = ps->var0 > 1 ? ps->cor0 * flt16_even(a / ps->var0) : 0;
  1139. k2 = ps->var1 > 1 ? ps->cor1 * flt16_even(a / ps->var1) : 0;
  1140. pv = flt16_round(k1 * ps->r0 + k2 * ps->r1);
  1141. if (output_enable)
  1142. *coef += pv * ac->sf_scale;
  1143. e0 = *coef / ac->sf_scale;
  1144. e1 = e0 - k1 * ps->r0;
  1145. ps->cor1 = flt16_trunc(alpha * ps->cor1 + ps->r1 * e1);
  1146. ps->var1 = flt16_trunc(alpha * ps->var1 + 0.5 * (ps->r1 * ps->r1 + e1 * e1));
  1147. ps->cor0 = flt16_trunc(alpha * ps->cor0 + ps->r0 * e0);
  1148. ps->var0 = flt16_trunc(alpha * ps->var0 + 0.5 * (ps->r0 * ps->r0 + e0 * e0));
  1149. ps->r1 = flt16_trunc(a * (ps->r0 - k1 * e0));
  1150. ps->r0 = flt16_trunc(a * e0);
  1151. }
  1152. /**
  1153. * Apply AAC-Main style frequency domain prediction.
  1154. */
  1155. static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
  1156. {
  1157. int sfb, k;
  1158. if (!sce->ics.predictor_initialized) {
  1159. reset_all_predictors(sce->predictor_state);
  1160. sce->ics.predictor_initialized = 1;
  1161. }
  1162. if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
  1163. for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) {
  1164. for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
  1165. predict(ac, &sce->predictor_state[k], &sce->coeffs[k],
  1166. sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
  1167. }
  1168. }
  1169. if (sce->ics.predictor_reset_group)
  1170. reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group);
  1171. } else
  1172. reset_all_predictors(sce->predictor_state);
  1173. }
  1174. /**
  1175. * Decode an individual_channel_stream payload; reference: table 4.44.
  1176. *
  1177. * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
  1178. * @param scale_flag scalable [1] or non-scalable [0] AAC (Unused until scalable AAC is implemented.)
  1179. *
  1180. * @return Returns error status. 0 - OK, !0 - error
  1181. */
  1182. static int decode_ics(AACContext *ac, SingleChannelElement *sce,
  1183. GetBitContext *gb, int common_window, int scale_flag)
  1184. {
  1185. Pulse pulse;
  1186. TemporalNoiseShaping *tns = &sce->tns;
  1187. IndividualChannelStream *ics = &sce->ics;
  1188. float *out = sce->coeffs;
  1189. int global_gain, pulse_present = 0;
  1190. /* This assignment is to silence a GCC warning about the variable being used
  1191. * uninitialized when in fact it always is.
  1192. */
  1193. pulse.num_pulse = 0;
  1194. global_gain = get_bits(gb, 8);
  1195. if (!common_window && !scale_flag) {
  1196. if (decode_ics_info(ac, ics, gb, 0) < 0)
  1197. return -1;
  1198. }
  1199. if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
  1200. return -1;
  1201. if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0)
  1202. return -1;
  1203. pulse_present = 0;
  1204. if (!scale_flag) {
  1205. if ((pulse_present = get_bits1(gb))) {
  1206. if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
  1207. av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n");
  1208. return -1;
  1209. }
  1210. if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
  1211. av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n");
  1212. return -1;
  1213. }
  1214. }
  1215. if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
  1216. return -1;
  1217. if (get_bits1(gb)) {
  1218. av_log_missing_feature(ac->avctx, "SSR", 1);
  1219. return -1;
  1220. }
  1221. }
  1222. if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
  1223. return -1;
  1224. if (ac->m4ac.object_type == AOT_AAC_MAIN && !common_window)
  1225. apply_prediction(ac, sce);
  1226. return 0;
  1227. }
  1228. /**
  1229. * Mid/Side stereo decoding; reference: 4.6.8.1.3.
  1230. */
  1231. static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
  1232. {
  1233. const IndividualChannelStream *ics = &cpe->ch[0].ics;
  1234. float *ch0 = cpe->ch[0].coeffs;
  1235. float *ch1 = cpe->ch[1].coeffs;
  1236. int g, i, group, idx = 0;
  1237. const uint16_t *offsets = ics->swb_offset;
  1238. for (g = 0; g < ics->num_window_groups; g++) {
  1239. for (i = 0; i < ics->max_sfb; i++, idx++) {
  1240. if (cpe->ms_mask[idx] &&
  1241. cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
  1242. for (group = 0; group < ics->group_len[g]; group++) {
  1243. ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
  1244. ch1 + group * 128 + offsets[i],
  1245. offsets[i+1] - offsets[i]);
  1246. }
  1247. }
  1248. }
  1249. ch0 += ics->group_len[g] * 128;
  1250. ch1 += ics->group_len[g] * 128;
  1251. }
  1252. }
  1253. /**
  1254. * intensity stereo decoding; reference: 4.6.8.2.3
  1255. *
  1256. * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
  1257. * [1] mask is decoded from bitstream; [2] mask is all 1s;
  1258. * [3] reserved for scalable AAC
  1259. */
  1260. static void apply_intensity_stereo(ChannelElement *cpe, int ms_present)
  1261. {
  1262. const IndividualChannelStream *ics = &cpe->ch[1].ics;
  1263. SingleChannelElement *sce1 = &cpe->ch[1];
  1264. float *coef0 = cpe->ch[0].coeffs, *coef1 = cpe->ch[1].coeffs;
  1265. const uint16_t *offsets = ics->swb_offset;
  1266. int g, group, i, k, idx = 0;
  1267. int c;
  1268. float scale;
  1269. for (g = 0; g < ics->num_window_groups; g++) {
  1270. for (i = 0; i < ics->max_sfb;) {
  1271. if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) {
  1272. const int bt_run_end = sce1->band_type_run_end[idx];
  1273. for (; i < bt_run_end; i++, idx++) {
  1274. c = -1 + 2 * (sce1->band_type[idx] - 14);
  1275. if (ms_present)
  1276. c *= 1 - 2 * cpe->ms_mask[idx];
  1277. scale = c * sce1->sf[idx];
  1278. for (group = 0; group < ics->group_len[g]; group++)
  1279. for (k = offsets[i]; k < offsets[i + 1]; k++)
  1280. coef1[group * 128 + k] = scale * coef0[group * 128 + k];
  1281. }
  1282. } else {
  1283. int bt_run_end = sce1->band_type_run_end[idx];
  1284. idx += bt_run_end - i;
  1285. i = bt_run_end;
  1286. }
  1287. }
  1288. coef0 += ics->group_len[g] * 128;
  1289. coef1 += ics->group_len[g] * 128;
  1290. }
  1291. }
  1292. /**
  1293. * Decode a channel_pair_element; reference: table 4.4.
  1294. *
  1295. * @param elem_id Identifies the instance of a syntax element.
  1296. *
  1297. * @return Returns error status. 0 - OK, !0 - error
  1298. */
  1299. static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
  1300. {
  1301. int i, ret, common_window, ms_present = 0;
  1302. common_window = get_bits1(gb);
  1303. if (common_window) {
  1304. if (decode_ics_info(ac, &cpe->ch[0].ics, gb, 1))
  1305. return -1;
  1306. i = cpe->ch[1].ics.use_kb_window[0];
  1307. cpe->ch[1].ics = cpe->ch[0].ics;
  1308. cpe->ch[1].ics.use_kb_window[1] = i;
  1309. ms_present = get_bits(gb, 2);
  1310. if (ms_present == 3) {
  1311. av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
  1312. return -1;
  1313. } else if (ms_present)
  1314. decode_mid_side_stereo(cpe, gb, ms_present);
  1315. }
  1316. if ((ret = decode_ics(ac, &cpe->ch[0], gb, common_window, 0)))
  1317. return ret;
  1318. if ((ret = decode_ics(ac, &cpe->ch[1], gb, common_window, 0)))
  1319. return ret;
  1320. if (common_window) {
  1321. if (ms_present)
  1322. apply_mid_side_stereo(ac, cpe);
  1323. if (ac->m4ac.object_type == AOT_AAC_MAIN) {
  1324. apply_prediction(ac, &cpe->ch[0]);
  1325. apply_prediction(ac, &cpe->ch[1]);
  1326. }
  1327. }
  1328. apply_intensity_stereo(cpe, ms_present);
  1329. return 0;
  1330. }
  1331. /**
  1332. * Decode coupling_channel_element; reference: table 4.8.
  1333. *
  1334. * @param elem_id Identifies the instance of a syntax element.
  1335. *
  1336. * @return Returns error status. 0 - OK, !0 - error
  1337. */
  1338. static int decode_cce(AACContext *ac, GetBitContext *gb, ChannelElement *che)
  1339. {
  1340. int num_gain = 0;
  1341. int c, g, sfb, ret;
  1342. int sign;
  1343. float scale;
  1344. SingleChannelElement *sce = &che->ch[0];
  1345. ChannelCoupling *coup = &che->coup;
  1346. coup->coupling_point = 2 * get_bits1(gb);
  1347. coup->num_coupled = get_bits(gb, 3);
  1348. for (c = 0; c <= coup->num_coupled; c++) {
  1349. num_gain++;
  1350. coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE;
  1351. coup->id_select[c] = get_bits(gb, 4);
  1352. if (coup->type[c] == TYPE_CPE) {
  1353. coup->ch_select[c] = get_bits(gb, 2);
  1354. if (coup->ch_select[c] == 3)
  1355. num_gain++;
  1356. } else
  1357. coup->ch_select[c] = 2;
  1358. }
  1359. coup->coupling_point += get_bits1(gb) || (coup->coupling_point >> 1);
  1360. sign = get_bits(gb, 1);
  1361. scale = pow(2., pow(2., (int)get_bits(gb, 2) - 3));
  1362. if ((ret = decode_ics(ac, sce, gb, 0, 0)))
  1363. return ret;
  1364. for (c = 0; c < num_gain; c++) {
  1365. int idx = 0;
  1366. int cge = 1;
  1367. int gain = 0;
  1368. float gain_cache = 1.;
  1369. if (c) {
  1370. cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb);
  1371. gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0;
  1372. gain_cache = pow(scale, -gain);
  1373. }
  1374. if (coup->coupling_point == AFTER_IMDCT) {
  1375. coup->gain[c][0] = gain_cache;
  1376. } else {
  1377. for (g = 0; g < sce->ics.num_window_groups; g++) {
  1378. for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) {
  1379. if (sce->band_type[idx] != ZERO_BT) {
  1380. if (!cge) {
  1381. int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
  1382. if (t) {
  1383. int s = 1;
  1384. t = gain += t;
  1385. if (sign) {
  1386. s -= 2 * (t & 0x1);
  1387. t >>= 1;
  1388. }
  1389. gain_cache = pow(scale, -t) * s;
  1390. }
  1391. }
  1392. coup->gain[c][idx] = gain_cache;
  1393. }
  1394. }
  1395. }
  1396. }
  1397. }
  1398. return 0;
  1399. }
  1400. /**
  1401. * Parse whether channels are to be excluded from Dynamic Range Compression; reference: table 4.53.
  1402. *
  1403. * @return Returns number of bytes consumed.
  1404. */
  1405. static int decode_drc_channel_exclusions(DynamicRangeControl *che_drc,
  1406. GetBitContext *gb)
  1407. {
  1408. int i;
  1409. int num_excl_chan = 0;
  1410. do {
  1411. for (i = 0; i < 7; i++)
  1412. che_drc->exclude_mask[num_excl_chan++] = get_bits1(gb);
  1413. } while (num_excl_chan < MAX_CHANNELS - 7 && get_bits1(gb));
  1414. return num_excl_chan / 7;
  1415. }
  1416. /**
  1417. * Decode dynamic range information; reference: table 4.52.
  1418. *
  1419. * @param cnt length of TYPE_FIL syntactic element in bytes
  1420. *
  1421. * @return Returns number of bytes consumed.
  1422. */
  1423. static int decode_dynamic_range(DynamicRangeControl *che_drc,
  1424. GetBitContext *gb, int cnt)
  1425. {
  1426. int n = 1;
  1427. int drc_num_bands = 1;
  1428. int i;
  1429. /* pce_tag_present? */
  1430. if (get_bits1(gb)) {
  1431. che_drc->pce_instance_tag = get_bits(gb, 4);
  1432. skip_bits(gb, 4); // tag_reserved_bits
  1433. n++;
  1434. }
  1435. /* excluded_chns_present? */
  1436. if (get_bits1(gb)) {
  1437. n += decode_drc_channel_exclusions(che_drc, gb);
  1438. }
  1439. /* drc_bands_present? */
  1440. if (get_bits1(gb)) {
  1441. che_drc->band_incr = get_bits(gb, 4);
  1442. che_drc->interpolation_scheme = get_bits(gb, 4);
  1443. n++;
  1444. drc_num_bands += che_drc->band_incr;
  1445. for (i = 0; i < drc_num_bands; i++) {
  1446. che_drc->band_top[i] = get_bits(gb, 8);
  1447. n++;
  1448. }
  1449. }
  1450. /* prog_ref_level_present? */
  1451. if (get_bits1(gb)) {
  1452. che_drc->prog_ref_level = get_bits(gb, 7);
  1453. skip_bits1(gb); // prog_ref_level_reserved_bits
  1454. n++;
  1455. }
  1456. for (i = 0; i < drc_num_bands; i++) {
  1457. che_drc->dyn_rng_sgn[i] = get_bits1(gb);
  1458. che_drc->dyn_rng_ctl[i] = get_bits(gb, 7);
  1459. n++;
  1460. }
  1461. return n;
  1462. }
  1463. /**
  1464. * Decode extension data (incomplete); reference: table 4.51.
  1465. *
  1466. * @param cnt length of TYPE_FIL syntactic element in bytes
  1467. *
  1468. * @return Returns number of bytes consumed
  1469. */
  1470. static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
  1471. ChannelElement *che, enum RawDataBlockType elem_type)
  1472. {
  1473. int crc_flag = 0;
  1474. int res = cnt;
  1475. switch (get_bits(gb, 4)) { // extension type
  1476. case EXT_SBR_DATA_CRC:
  1477. crc_flag++;
  1478. case EXT_SBR_DATA:
  1479. if (!che) {
  1480. av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
  1481. return res;
  1482. } else if (!ac->m4ac.sbr) {
  1483. av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
  1484. skip_bits_long(gb, 8 * cnt - 4);
  1485. return res;
  1486. } else if (ac->m4ac.sbr == -1 && ac->output_configured == OC_LOCKED) {
  1487. av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
  1488. skip_bits_long(gb, 8 * cnt - 4);
  1489. return res;
  1490. } else if (ac->m4ac.ps == -1 && ac->output_configured < OC_LOCKED && ac->avctx->channels == 1) {
  1491. ac->m4ac.sbr = 1;
  1492. ac->m4ac.ps = 1;
  1493. output_configure(ac, ac->che_pos, ac->che_pos, ac->m4ac.chan_config, ac->output_configured);
  1494. } else {
  1495. ac->m4ac.sbr = 1;
  1496. }
  1497. res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
  1498. break;
  1499. case EXT_DYNAMIC_RANGE:
  1500. res = decode_dynamic_range(&ac->che_drc, gb, cnt);
  1501. break;
  1502. case EXT_FILL:
  1503. case EXT_FILL_DATA:
  1504. case EXT_DATA_ELEMENT:
  1505. default:
  1506. skip_bits_long(gb, 8 * cnt - 4);
  1507. break;
  1508. };
  1509. return res;
  1510. }
  1511. /**
  1512. * Decode Temporal Noise Shaping filter coefficients and apply all-pole filters; reference: 4.6.9.3.
  1513. *
  1514. * @param decode 1 if tool is used normally, 0 if tool is used in LTP.
  1515. * @param coef spectral coefficients
  1516. */
  1517. static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
  1518. IndividualChannelStream *ics, int decode)
  1519. {
  1520. const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb);
  1521. int w, filt, m, i;
  1522. int bottom, top, order, start, end, size, inc;
  1523. float lpc[TNS_MAX_ORDER];
  1524. for (w = 0; w < ics->num_windows; w++) {
  1525. bottom = ics->num_swb;
  1526. for (filt = 0; filt < tns->n_filt[w]; filt++) {
  1527. top = bottom;
  1528. bottom = FFMAX(0, top - tns->length[w][filt]);
  1529. order = tns->order[w][filt];
  1530. if (order == 0)
  1531. continue;
  1532. // tns_decode_coef
  1533. compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0);
  1534. start = ics->swb_offset[FFMIN(bottom, mmm)];
  1535. end = ics->swb_offset[FFMIN( top, mmm)];
  1536. if ((size = end - start) <= 0)
  1537. continue;
  1538. if (tns->direction[w][filt]) {
  1539. inc = -1;
  1540. start = end - 1;
  1541. } else {
  1542. inc = 1;
  1543. }
  1544. start += w * 128;
  1545. // ar filter
  1546. for (m = 0; m < size; m++, start += inc)
  1547. for (i = 1; i <= FFMIN(m, order); i++)
  1548. coef[start] -= coef[start - i * inc] * lpc[i - 1];
  1549. }
  1550. }
  1551. }
  1552. /**
  1553. * Conduct IMDCT and windowing.
  1554. */
  1555. static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce, float bias)
  1556. {
  1557. IndividualChannelStream *ics = &sce->ics;
  1558. float *in = sce->coeffs;
  1559. float *out = sce->ret;
  1560. float *saved = sce->saved;
  1561. const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
  1562. const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
  1563. const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
  1564. float *buf = ac->buf_mdct;
  1565. float *temp = ac->temp;
  1566. int i;
  1567. // imdct
  1568. if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
  1569. if (ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE)
  1570. av_log(ac->avctx, AV_LOG_WARNING,
  1571. "Transition from an ONLY_LONG or LONG_STOP to an EIGHT_SHORT sequence detected. "
  1572. "If you heard an audible artifact, please submit the sample to the FFmpeg developers.\n");
  1573. for (i = 0; i < 1024; i += 128)
  1574. ff_imdct_half(&ac->mdct_small, buf + i, in + i);
  1575. } else
  1576. ff_imdct_half(&ac->mdct, buf, in);
  1577. /* window overlapping
  1578. * NOTE: To simplify the overlapping code, all 'meaningless' short to long
  1579. * and long to short transitions are considered to be short to short
  1580. * transitions. This leaves just two cases (long to long and short to short)
  1581. * with a little special sauce for EIGHT_SHORT_SEQUENCE.
  1582. */
  1583. if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
  1584. (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
  1585. ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, bias, 512);
  1586. } else {
  1587. for (i = 0; i < 448; i++)
  1588. out[i] = saved[i] + bias;
  1589. if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
  1590. ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, bias, 64);
  1591. ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, bias, 64);
  1592. ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, bias, 64);
  1593. ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, bias, 64);
  1594. ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, bias, 64);
  1595. memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
  1596. } else {
  1597. ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, bias, 64);
  1598. for (i = 576; i < 1024; i++)
  1599. out[i] = buf[i-512] + bias;
  1600. }
  1601. }
  1602. // buffer update
  1603. if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
  1604. for (i = 0; i < 64; i++)
  1605. saved[i] = temp[64 + i] - bias;
  1606. ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 0, 64);
  1607. ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 0, 64);
  1608. ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 0, 64);
  1609. memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
  1610. } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
  1611. memcpy( saved, buf + 512, 448 * sizeof(float));
  1612. memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
  1613. } else { // LONG_STOP or ONLY_LONG
  1614. memcpy( saved, buf + 512, 512 * sizeof(float));
  1615. }
  1616. }
  1617. /**
  1618. * Apply dependent channel coupling (applied before IMDCT).
  1619. *
  1620. * @param index index into coupling gain array
  1621. */
  1622. static void apply_dependent_coupling(AACContext *ac,
  1623. SingleChannelElement *target,
  1624. ChannelElement *cce, int index)
  1625. {
  1626. IndividualChannelStream *ics = &cce->ch[0].ics;
  1627. const uint16_t *offsets = ics->swb_offset;
  1628. float *dest = target->coeffs;
  1629. const float *src = cce->ch[0].coeffs;
  1630. int g, i, group, k, idx = 0;
  1631. if (ac->m4ac.object_type == AOT_AAC_LTP) {
  1632. av_log(ac->avctx, AV_LOG_ERROR,
  1633. "Dependent coupling is not supported together with LTP\n");
  1634. return;
  1635. }
  1636. for (g = 0; g < ics->num_window_groups; g++) {
  1637. for (i = 0; i < ics->max_sfb; i++, idx++) {
  1638. if (cce->ch[0].band_type[idx] != ZERO_BT) {
  1639. const float gain = cce->coup.gain[index][idx];
  1640. for (group = 0; group < ics->group_len[g]; group++) {
  1641. for (k = offsets[i]; k < offsets[i + 1]; k++) {
  1642. // XXX dsputil-ize
  1643. dest[group * 128 + k] += gain * src[group * 128 + k];
  1644. }
  1645. }
  1646. }
  1647. }
  1648. dest += ics->group_len[g] * 128;
  1649. src += ics->group_len[g] * 128;
  1650. }
  1651. }
  1652. /**
  1653. * Apply independent channel coupling (applied after IMDCT).
  1654. *
  1655. * @param index index into coupling gain array
  1656. */
  1657. static void apply_independent_coupling(AACContext *ac,
  1658. SingleChannelElement *target,
  1659. ChannelElement *cce, int index)
  1660. {
  1661. int i;
  1662. const float gain = cce->coup.gain[index][0];
  1663. const float bias = ac->add_bias;
  1664. const float *src = cce->ch[0].ret;
  1665. float *dest = target->ret;
  1666. const int len = 1024 << (ac->m4ac.sbr == 1);
  1667. for (i = 0; i < len; i++)
  1668. dest[i] += gain * (src[i] - bias);
  1669. }
  1670. /**
  1671. * channel coupling transformation interface
  1672. *
  1673. * @param index index into coupling gain array
  1674. * @param apply_coupling_method pointer to (in)dependent coupling function
  1675. */
  1676. static void apply_channel_coupling(AACContext *ac, ChannelElement *cc,
  1677. enum RawDataBlockType type, int elem_id,
  1678. enum CouplingPoint coupling_point,
  1679. void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index))
  1680. {
  1681. int i, c;
  1682. for (i = 0; i < MAX_ELEM_ID; i++) {
  1683. ChannelElement *cce = ac->che[TYPE_CCE][i];
  1684. int index = 0;
  1685. if (cce && cce->coup.coupling_point == coupling_point) {
  1686. ChannelCoupling *coup = &cce->coup;
  1687. for (c = 0; c <= coup->num_coupled; c++) {
  1688. if (coup->type[c] == type && coup->id_select[c] == elem_id) {
  1689. if (coup->ch_select[c] != 1) {
  1690. apply_coupling_method(ac, &cc->ch[0], cce, index);
  1691. if (coup->ch_select[c] != 0)
  1692. index++;
  1693. }
  1694. if (coup->ch_select[c] != 2)
  1695. apply_coupling_method(ac, &cc->ch[1], cce, index++);
  1696. } else
  1697. index += 1 + (coup->ch_select[c] == 3);
  1698. }
  1699. }
  1700. }
  1701. }
  1702. /**
  1703. * Convert spectral data to float samples, applying all supported tools as appropriate.
  1704. */
  1705. static void spectral_to_sample(AACContext *ac)
  1706. {
  1707. int i, type;
  1708. float imdct_bias = (ac->m4ac.sbr <= 0) ? ac->add_bias : 0.0f;
  1709. for (type = 3; type >= 0; type--) {
  1710. for (i = 0; i < MAX_ELEM_ID; i++) {
  1711. ChannelElement *che = ac->che[type][i];
  1712. if (che) {
  1713. if (type <= TYPE_CPE)
  1714. apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
  1715. if (che->ch[0].tns.present)
  1716. apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
  1717. if (che->ch[1].tns.present)
  1718. apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
  1719. if (type <= TYPE_CPE)
  1720. apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
  1721. if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
  1722. imdct_and_windowing(ac, &che->ch[0], imdct_bias);
  1723. if (type == TYPE_CPE) {
  1724. imdct_and_windowing(ac, &che->ch[1], imdct_bias);
  1725. }
  1726. if (ac->m4ac.sbr > 0) {
  1727. ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
  1728. }
  1729. }
  1730. if (type <= TYPE_CCE)
  1731. apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
  1732. }
  1733. }
  1734. }
  1735. }
  1736. static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
  1737. {
  1738. int size;
  1739. AACADTSHeaderInfo hdr_info;
  1740. size = ff_aac_parse_header(gb, &hdr_info);
  1741. if (size > 0) {
  1742. if (ac->output_configured != OC_LOCKED && hdr_info.chan_config) {
  1743. enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
  1744. memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
  1745. ac->m4ac.chan_config = hdr_info.chan_config;
  1746. if (set_default_channel_config(ac, new_che_pos, hdr_info.chan_config))
  1747. return -7;
  1748. if (output_configure(ac, ac->che_pos, new_che_pos, hdr_info.chan_config, OC_TRIAL_FRAME))
  1749. return -7;
  1750. } else if (ac->output_configured != OC_LOCKED) {
  1751. ac->output_configured = OC_NONE;
  1752. }
  1753. if (ac->output_configured != OC_LOCKED) {
  1754. ac->m4ac.sbr = -1;
  1755. ac->m4ac.ps = -1;
  1756. }
  1757. ac->m4ac.sample_rate = hdr_info.sample_rate;
  1758. ac->m4ac.sampling_index = hdr_info.sampling_index;
  1759. ac->m4ac.object_type = hdr_info.object_type;
  1760. if (!ac->avctx->sample_rate)
  1761. ac->avctx->sample_rate = hdr_info.sample_rate;
  1762. if (hdr_info.num_aac_frames == 1) {
  1763. if (!hdr_info.crc_absent)
  1764. skip_bits(gb, 16);
  1765. } else {
  1766. av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0);
  1767. return -1;
  1768. }
  1769. }
  1770. return size;
  1771. }
  1772. static int aac_decode_frame(AVCodecContext *avctx, void *data,
  1773. int *data_size, AVPacket *avpkt)
  1774. {
  1775. const uint8_t *buf = avpkt->data;
  1776. int buf_size = avpkt->size;
  1777. AACContext *ac = avctx->priv_data;
  1778. ChannelElement *che = NULL, *che_prev = NULL;
  1779. GetBitContext gb;
  1780. enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
  1781. int err, elem_id, data_size_tmp;
  1782. int buf_consumed;
  1783. int samples = 0, multiplier;
  1784. int buf_offset;
  1785. init_get_bits(&gb, buf, buf_size * 8);
  1786. if (show_bits(&gb, 12) == 0xfff) {
  1787. if (parse_adts_frame_header(ac, &gb) < 0) {
  1788. av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
  1789. return -1;
  1790. }
  1791. if (ac->m4ac.sampling_index > 12) {
  1792. av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index);
  1793. return -1;
  1794. }
  1795. }
  1796. memset(ac->tags_seen_this_frame, 0, sizeof(ac->tags_seen_this_frame));
  1797. // parse
  1798. while ((elem_type = get_bits(&gb, 3)) != TYPE_END) {
  1799. elem_id = get_bits(&gb, 4);
  1800. if (elem_type < TYPE_DSE) {
  1801. if (!(che=get_che(ac, elem_type, elem_id))) {
  1802. av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
  1803. elem_type, elem_id);
  1804. return -1;
  1805. }
  1806. samples = 1024;
  1807. }
  1808. switch (elem_type) {
  1809. case TYPE_SCE:
  1810. err = decode_ics(ac, &che->ch[0], &gb, 0, 0);
  1811. break;
  1812. case TYPE_CPE:
  1813. err = decode_cpe(ac, &gb, che);
  1814. break;
  1815. case TYPE_CCE:
  1816. err = decode_cce(ac, &gb, che);
  1817. break;
  1818. case TYPE_LFE:
  1819. err = decode_ics(ac, &che->ch[0], &gb, 0, 0);
  1820. break;
  1821. case TYPE_DSE:
  1822. err = skip_data_stream_element(ac, &gb);
  1823. break;
  1824. case TYPE_PCE: {
  1825. enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
  1826. memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
  1827. if ((err = decode_pce(ac, new_che_pos, &gb)))
  1828. break;
  1829. if (ac->output_configured > OC_TRIAL_PCE)
  1830. av_log(avctx, AV_LOG_ERROR,
  1831. "Not evaluating a further program_config_element as this construct is dubious at best.\n");
  1832. else
  1833. err = output_configure(ac, ac->che_pos, new_che_pos, 0, OC_TRIAL_PCE);
  1834. break;
  1835. }
  1836. case TYPE_FIL:
  1837. if (elem_id == 15)
  1838. elem_id += get_bits(&gb, 8) - 1;
  1839. if (get_bits_left(&gb) < 8 * elem_id) {
  1840. av_log(avctx, AV_LOG_ERROR, overread_err);
  1841. return -1;
  1842. }
  1843. while (elem_id > 0)
  1844. elem_id -= decode_extension_payload(ac, &gb, elem_id, che_prev, elem_type_prev);
  1845. err = 0; /* FIXME */
  1846. break;
  1847. default:
  1848. err = -1; /* should not happen, but keeps compiler happy */
  1849. break;
  1850. }
  1851. che_prev = che;
  1852. elem_type_prev = elem_type;
  1853. if (err)
  1854. return err;
  1855. if (get_bits_left(&gb) < 3) {
  1856. av_log(avctx, AV_LOG_ERROR, overread_err);
  1857. return -1;
  1858. }
  1859. }
  1860. spectral_to_sample(ac);
  1861. multiplier = (ac->m4ac.sbr == 1) ? ac->m4ac.ext_sample_rate > ac->m4ac.sample_rate : 0;
  1862. samples <<= multiplier;
  1863. if (ac->output_configured < OC_LOCKED) {
  1864. avctx->sample_rate = ac->m4ac.sample_rate << multiplier;
  1865. avctx->frame_size = samples;
  1866. }
  1867. data_size_tmp = samples * avctx->channels * sizeof(int16_t);
  1868. if (*data_size < data_size_tmp) {
  1869. av_log(avctx, AV_LOG_ERROR,
  1870. "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n",
  1871. *data_size, data_size_tmp);
  1872. return -1;
  1873. }
  1874. *data_size = data_size_tmp;
  1875. if (samples)
  1876. ac->dsp.float_to_int16_interleave(data, (const float **)ac->output_data, samples, avctx->channels);
  1877. if (ac->output_configured)
  1878. ac->output_configured = OC_LOCKED;
  1879. buf_consumed = (get_bits_count(&gb) + 7) >> 3;
  1880. for (buf_offset = buf_consumed; buf_offset < buf_size; buf_offset++)
  1881. if (buf[buf_offset])
  1882. break;
  1883. return buf_size > buf_offset ? buf_consumed : buf_size;
  1884. }
  1885. static av_cold int aac_decode_close(AVCodecContext *avctx)
  1886. {
  1887. AACContext *ac = avctx->priv_data;
  1888. int i, type;
  1889. for (i = 0; i < MAX_ELEM_ID; i++) {
  1890. for (type = 0; type < 4; type++) {
  1891. if (ac->che[type][i])
  1892. ff_aac_sbr_ctx_close(&ac->che[type][i]->sbr);
  1893. av_freep(&ac->che[type][i]);
  1894. }
  1895. }
  1896. ff_mdct_end(&ac->mdct);
  1897. ff_mdct_end(&ac->mdct_small);
  1898. return 0;
  1899. }
  1900. AVCodec aac_decoder = {
  1901. "aac",
  1902. AVMEDIA_TYPE_AUDIO,
  1903. CODEC_ID_AAC,
  1904. sizeof(AACContext),
  1905. aac_decode_init,
  1906. NULL,
  1907. aac_decode_close,
  1908. aac_decode_frame,
  1909. .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
  1910. .sample_fmts = (const enum SampleFormat[]) {
  1911. SAMPLE_FMT_S16,SAMPLE_FMT_NONE
  1912. },
  1913. .channel_layouts = aac_channel_layout,
  1914. };