You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

397 lines
15KB

  1. /*
  2. * ATRAC3+ compatible decoder
  3. *
  4. * Copyright (c) 2010-2013 Maxim Poliakovski
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Sony ATRAC3+ compatible decoder.
  25. *
  26. * Container formats used to store its data:
  27. * RIFF WAV (.at3) and Sony OpenMG (.oma, .aa3).
  28. *
  29. * Technical description of this codec can be found here:
  30. * http://wiki.multimedia.cx/index.php?title=ATRAC3plus
  31. *
  32. * Kudos to Benjamin Larsson and Michael Karcher
  33. * for their precious technical help!
  34. */
  35. #include <stdint.h>
  36. #include <string.h>
  37. #include "libavutil/channel_layout.h"
  38. #include "libavutil/float_dsp.h"
  39. #include "avcodec.h"
  40. #include "get_bits.h"
  41. #include "internal.h"
  42. #include "atrac.h"
  43. #include "atrac3plus.h"
  44. typedef struct ATRAC3PContext {
  45. GetBitContext gb;
  46. AVFloatDSPContext fdsp;
  47. DECLARE_ALIGNED(32, float, samples)[2][ATRAC3P_FRAME_SAMPLES]; ///< quantized MDCT spectrum
  48. DECLARE_ALIGNED(32, float, mdct_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the IMDCT
  49. DECLARE_ALIGNED(32, float, time_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the gain compensation
  50. DECLARE_ALIGNED(32, float, outp_buf)[2][ATRAC3P_FRAME_SAMPLES];
  51. AtracGCContext gainc_ctx; ///< gain compensation context
  52. FFTContext mdct_ctx;
  53. FFTContext ipqf_dct_ctx; ///< IDCT context used by IPQF
  54. Atrac3pChanUnitCtx *ch_units; ///< global channel units
  55. int num_channel_blocks; ///< number of channel blocks
  56. uint8_t channel_blocks[5]; ///< channel configuration descriptor
  57. uint64_t my_channel_layout; ///< current channel layout
  58. } ATRAC3PContext;
  59. static av_cold int atrac3p_decode_close(AVCodecContext *avctx)
  60. {
  61. av_free(((ATRAC3PContext *)(avctx->priv_data))->ch_units);
  62. return 0;
  63. }
  64. static av_cold int set_channel_params(ATRAC3PContext *ctx,
  65. AVCodecContext *avctx)
  66. {
  67. memset(ctx->channel_blocks, 0, sizeof(ctx->channel_blocks));
  68. switch (avctx->channels) {
  69. case 1:
  70. if (avctx->channel_layout != AV_CH_FRONT_LEFT)
  71. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  72. ctx->num_channel_blocks = 1;
  73. ctx->channel_blocks[0] = CH_UNIT_MONO;
  74. break;
  75. case 2:
  76. avctx->channel_layout = AV_CH_LAYOUT_STEREO;
  77. ctx->num_channel_blocks = 1;
  78. ctx->channel_blocks[0] = CH_UNIT_STEREO;
  79. break;
  80. case 3:
  81. avctx->channel_layout = AV_CH_LAYOUT_SURROUND;
  82. ctx->num_channel_blocks = 2;
  83. ctx->channel_blocks[0] = CH_UNIT_STEREO;
  84. ctx->channel_blocks[1] = CH_UNIT_MONO;
  85. break;
  86. case 4:
  87. avctx->channel_layout = AV_CH_LAYOUT_4POINT0;
  88. ctx->num_channel_blocks = 3;
  89. ctx->channel_blocks[0] = CH_UNIT_STEREO;
  90. ctx->channel_blocks[1] = CH_UNIT_MONO;
  91. ctx->channel_blocks[2] = CH_UNIT_MONO;
  92. break;
  93. case 6:
  94. avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK;
  95. ctx->num_channel_blocks = 4;
  96. ctx->channel_blocks[0] = CH_UNIT_STEREO;
  97. ctx->channel_blocks[1] = CH_UNIT_MONO;
  98. ctx->channel_blocks[2] = CH_UNIT_STEREO;
  99. ctx->channel_blocks[3] = CH_UNIT_MONO;
  100. break;
  101. case 7:
  102. avctx->channel_layout = AV_CH_LAYOUT_6POINT1_BACK;
  103. ctx->num_channel_blocks = 5;
  104. ctx->channel_blocks[0] = CH_UNIT_STEREO;
  105. ctx->channel_blocks[1] = CH_UNIT_MONO;
  106. ctx->channel_blocks[2] = CH_UNIT_STEREO;
  107. ctx->channel_blocks[3] = CH_UNIT_MONO;
  108. ctx->channel_blocks[4] = CH_UNIT_MONO;
  109. break;
  110. case 8:
  111. avctx->channel_layout = AV_CH_LAYOUT_7POINT1;
  112. ctx->num_channel_blocks = 5;
  113. ctx->channel_blocks[0] = CH_UNIT_STEREO;
  114. ctx->channel_blocks[1] = CH_UNIT_MONO;
  115. ctx->channel_blocks[2] = CH_UNIT_STEREO;
  116. ctx->channel_blocks[3] = CH_UNIT_STEREO;
  117. ctx->channel_blocks[4] = CH_UNIT_MONO;
  118. break;
  119. default:
  120. av_log(avctx, AV_LOG_ERROR,
  121. "Unsupported channel count: %d!\n", avctx->channels);
  122. return AVERROR_INVALIDDATA;
  123. }
  124. return 0;
  125. }
  126. static av_cold int atrac3p_decode_init(AVCodecContext *avctx)
  127. {
  128. ATRAC3PContext *ctx = avctx->priv_data;
  129. int i, ch, ret;
  130. if (!avctx->block_align) {
  131. av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
  132. return AVERROR(EINVAL);
  133. }
  134. avpriv_float_dsp_init(&ctx->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
  135. /* initialize IPQF */
  136. ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0);
  137. ff_atrac3p_init_imdct(avctx, &ctx->mdct_ctx);
  138. ff_atrac_init_gain_compensation(&ctx->gainc_ctx, 6, 2);
  139. ff_atrac3p_init_wave_synth();
  140. if ((ret = set_channel_params(ctx, avctx)) < 0)
  141. return ret;
  142. ctx->my_channel_layout = avctx->channel_layout;
  143. ctx->ch_units = av_mallocz(sizeof(*ctx->ch_units) *
  144. ctx->num_channel_blocks);
  145. if (!ctx->ch_units) {
  146. atrac3p_decode_close(avctx);
  147. return AVERROR(ENOMEM);
  148. }
  149. for (i = 0; i < ctx->num_channel_blocks; i++) {
  150. for (ch = 0; ch < 2; ch++) {
  151. ctx->ch_units[i].channels[ch].ch_num = ch;
  152. ctx->ch_units[i].channels[ch].wnd_shape = &ctx->ch_units[i].channels[ch].wnd_shape_hist[0][0];
  153. ctx->ch_units[i].channels[ch].wnd_shape_prev = &ctx->ch_units[i].channels[ch].wnd_shape_hist[1][0];
  154. ctx->ch_units[i].channels[ch].gain_data = &ctx->ch_units[i].channels[ch].gain_data_hist[0][0];
  155. ctx->ch_units[i].channels[ch].gain_data_prev = &ctx->ch_units[i].channels[ch].gain_data_hist[1][0];
  156. ctx->ch_units[i].channels[ch].tones_info = &ctx->ch_units[i].channels[ch].tones_info_hist[0][0];
  157. ctx->ch_units[i].channels[ch].tones_info_prev = &ctx->ch_units[i].channels[ch].tones_info_hist[1][0];
  158. }
  159. ctx->ch_units[i].waves_info = &ctx->ch_units[i].wave_synth_hist[0];
  160. ctx->ch_units[i].waves_info_prev = &ctx->ch_units[i].wave_synth_hist[1];
  161. }
  162. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  163. return 0;
  164. }
  165. static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx,
  166. float out[2][ATRAC3P_FRAME_SAMPLES],
  167. int num_channels,
  168. AVCodecContext *avctx)
  169. {
  170. int i, sb, ch, qu, nspeclines, RNG_index;
  171. float *dst, q;
  172. int16_t *src;
  173. /* calculate RNG table index for each subband */
  174. int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 };
  175. if (ctx->mute_flag) {
  176. for (ch = 0; ch < num_channels; ch++)
  177. memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
  178. return;
  179. }
  180. for (qu = 0, RNG_index = 0; qu < ctx->used_quant_units; qu++)
  181. RNG_index += ctx->channels[0].qu_sf_idx[qu] +
  182. ctx->channels[1].qu_sf_idx[qu];
  183. for (sb = 0; sb < ctx->num_coded_subbands; sb++, RNG_index += 128)
  184. sb_RNG_index[sb] = RNG_index & 0x3FC;
  185. /* inverse quant and power compensation */
  186. for (ch = 0; ch < num_channels; ch++) {
  187. /* clear channel's residual spectrum */
  188. memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
  189. for (qu = 0; qu < ctx->used_quant_units; qu++) {
  190. src = &ctx->channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]];
  191. dst = &out[ch][ff_atrac3p_qu_to_spec_pos[qu]];
  192. nspeclines = ff_atrac3p_qu_to_spec_pos[qu + 1] -
  193. ff_atrac3p_qu_to_spec_pos[qu];
  194. if (ctx->channels[ch].qu_wordlen[qu] > 0) {
  195. q = ff_atrac3p_sf_tab[ctx->channels[ch].qu_sf_idx[qu]] *
  196. ff_atrac3p_mant_tab[ctx->channels[ch].qu_wordlen[qu]];
  197. for (i = 0; i < nspeclines; i++)
  198. dst[i] = src[i] * q;
  199. }
  200. }
  201. for (sb = 0; sb < ctx->num_coded_subbands; sb++)
  202. ff_atrac3p_power_compensation(ctx, ch, &out[ch][0],
  203. sb_RNG_index[sb], sb);
  204. }
  205. if (ctx->unit_type == CH_UNIT_STEREO) {
  206. for (sb = 0; sb < ctx->num_coded_subbands; sb++) {
  207. if (ctx->swap_channels[sb]) {
  208. for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
  209. FFSWAP(float, out[0][sb * ATRAC3P_SUBBAND_SAMPLES + i],
  210. out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
  211. }
  212. /* flip coefficients' sign if requested */
  213. if (ctx->negate_coeffs[sb])
  214. for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
  215. out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i] = -(out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
  216. }
  217. }
  218. }
  219. static void reconstruct_frame(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit,
  220. int num_channels, AVCodecContext *avctx)
  221. {
  222. int ch, sb;
  223. for (ch = 0; ch < num_channels; ch++) {
  224. for (sb = 0; sb < ch_unit->num_subbands; sb++) {
  225. /* inverse transform and windowing */
  226. ff_atrac3p_imdct(&ctx->fdsp, &ctx->mdct_ctx,
  227. &ctx->samples[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
  228. &ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
  229. (ch_unit->channels[ch].wnd_shape_prev[sb] << 1) +
  230. ch_unit->channels[ch].wnd_shape[sb], sb);
  231. /* gain compensation and overlapping */
  232. ff_atrac_gain_compensation(&ctx->gainc_ctx,
  233. &ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
  234. &ch_unit->prev_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],
  235. &ch_unit->channels[ch].gain_data_prev[sb],
  236. &ch_unit->channels[ch].gain_data[sb],
  237. ATRAC3P_SUBBAND_SAMPLES,
  238. &ctx->time_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES]);
  239. }
  240. /* zero unused subbands in both output and overlapping buffers */
  241. memset(&ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],
  242. 0,
  243. (ATRAC3P_SUBBANDS - ch_unit->num_subbands) *
  244. ATRAC3P_SUBBAND_SAMPLES *
  245. sizeof(ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));
  246. memset(&ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],
  247. 0,
  248. (ATRAC3P_SUBBANDS - ch_unit->num_subbands) *
  249. ATRAC3P_SUBBAND_SAMPLES *
  250. sizeof(ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));
  251. /* resynthesize and add tonal signal */
  252. if (ch_unit->waves_info->tones_present ||
  253. ch_unit->waves_info_prev->tones_present) {
  254. for (sb = 0; sb < ch_unit->num_subbands; sb++)
  255. if (ch_unit->channels[ch].tones_info[sb].num_wavs ||
  256. ch_unit->channels[ch].tones_info_prev[sb].num_wavs) {
  257. ff_atrac3p_generate_tones(ch_unit, &ctx->fdsp, ch, sb,
  258. &ctx->time_buf[ch][sb * 128]);
  259. }
  260. }
  261. /* subband synthesis and acoustic signal output */
  262. ff_atrac3p_ipqf(&ctx->ipqf_dct_ctx, &ch_unit->ipqf_ctx[ch],
  263. &ctx->time_buf[ch][0], &ctx->outp_buf[ch][0]);
  264. }
  265. /* swap window shape and gain control buffers. */
  266. for (ch = 0; ch < num_channels; ch++) {
  267. FFSWAP(uint8_t *, ch_unit->channels[ch].wnd_shape,
  268. ch_unit->channels[ch].wnd_shape_prev);
  269. FFSWAP(AtracGainInfo *, ch_unit->channels[ch].gain_data,
  270. ch_unit->channels[ch].gain_data_prev);
  271. FFSWAP(Atrac3pWavesData *, ch_unit->channels[ch].tones_info,
  272. ch_unit->channels[ch].tones_info_prev);
  273. }
  274. FFSWAP(Atrac3pWaveSynthParams *, ch_unit->waves_info, ch_unit->waves_info_prev);
  275. }
  276. static int atrac3p_decode_frame(AVCodecContext *avctx, void *data,
  277. int *got_frame_ptr, AVPacket *avpkt)
  278. {
  279. ATRAC3PContext *ctx = avctx->priv_data;
  280. AVFrame *frame = data;
  281. int i, ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;
  282. float **samples_p = (float **)frame->extended_data;
  283. frame->nb_samples = ATRAC3P_FRAME_SAMPLES;
  284. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
  285. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  286. return ret;
  287. }
  288. if ((ret = init_get_bits8(&ctx->gb, avpkt->data, avpkt->size)) < 0)
  289. return ret;
  290. if (get_bits1(&ctx->gb)) {
  291. av_log(avctx, AV_LOG_ERROR, "Invalid start bit!\n");
  292. return AVERROR_INVALIDDATA;
  293. }
  294. while (get_bits_left(&ctx->gb) >= 2 &&
  295. (ch_unit_id = get_bits(&ctx->gb, 2)) != CH_UNIT_TERMINATOR) {
  296. if (ch_unit_id == CH_UNIT_EXTENSION) {
  297. avpriv_report_missing_feature(avctx, "Channel unit extension");
  298. return AVERROR_PATCHWELCOME;
  299. }
  300. if (ch_block >= ctx->num_channel_blocks ||
  301. ctx->channel_blocks[ch_block] != ch_unit_id) {
  302. av_log(avctx, AV_LOG_ERROR,
  303. "Frame data doesn't match channel configuration!\n");
  304. return AVERROR_INVALIDDATA;
  305. }
  306. ctx->ch_units[ch_block].unit_type = ch_unit_id;
  307. channels_to_process = ch_unit_id + 1;
  308. if ((ret = ff_atrac3p_decode_channel_unit(&ctx->gb,
  309. &ctx->ch_units[ch_block],
  310. channels_to_process,
  311. avctx)) < 0)
  312. return ret;
  313. decode_residual_spectrum(&ctx->ch_units[ch_block], ctx->samples,
  314. channels_to_process, avctx);
  315. reconstruct_frame(ctx, &ctx->ch_units[ch_block],
  316. channels_to_process, avctx);
  317. for (i = 0; i < channels_to_process; i++)
  318. memcpy(samples_p[out_ch_index + i], ctx->outp_buf[i],
  319. ATRAC3P_FRAME_SAMPLES * sizeof(**samples_p));
  320. ch_block++;
  321. out_ch_index += channels_to_process;
  322. }
  323. *got_frame_ptr = 1;
  324. return avctx->block_align;
  325. }
  326. AVCodec ff_atrac3p_decoder = {
  327. .name = "atrac3plus",
  328. .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
  329. .type = AVMEDIA_TYPE_AUDIO,
  330. .id = AV_CODEC_ID_ATRAC3P,
  331. .capabilities = AV_CODEC_CAP_DR1,
  332. .priv_data_size = sizeof(ATRAC3PContext),
  333. .init = atrac3p_decode_init,
  334. .init_static_data = ff_atrac3p_init_vlcs,
  335. .close = atrac3p_decode_close,
  336. .decode = atrac3p_decode_frame,
  337. };