You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1193 lines
39KB

  1. /*
  2. * TwinVQ decoder
  3. * Copyright (c) 2009 Vitor Sessak
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/channel_layout.h"
  22. #include "libavutil/float_dsp.h"
  23. #include "avcodec.h"
  24. #include "get_bits.h"
  25. #include "dsputil.h"
  26. #include "fft.h"
  27. #include "internal.h"
  28. #include "lsp.h"
  29. #include "sinewin.h"
  30. #include <math.h>
  31. #include <stdint.h>
  32. #include "twinvq_data.h"
  33. enum FrameType {
  34. FT_SHORT = 0, ///< Short frame (divided in n sub-blocks)
  35. FT_MEDIUM, ///< Medium frame (divided in m<n sub-blocks)
  36. FT_LONG, ///< Long frame (single sub-block + PPC)
  37. FT_PPC, ///< Periodic Peak Component (part of the long frame)
  38. };
  39. /**
  40. * Parameters and tables that are different for each frame type
  41. */
  42. struct FrameMode {
  43. uint8_t sub; ///< Number subblocks in each frame
  44. const uint16_t *bark_tab;
  45. /** number of distinct bark scale envelope values */
  46. uint8_t bark_env_size;
  47. const int16_t *bark_cb; ///< codebook for the bark scale envelope (BSE)
  48. uint8_t bark_n_coef;///< number of BSE CB coefficients to read
  49. uint8_t bark_n_bit; ///< number of bits of the BSE coefs
  50. //@{
  51. /** main codebooks for spectrum data */
  52. const int16_t *cb0;
  53. const int16_t *cb1;
  54. //@}
  55. uint8_t cb_len_read; ///< number of spectrum coefficients to read
  56. };
  57. /**
  58. * Parameters and tables that are different for every combination of
  59. * bitrate/sample rate
  60. */
  61. typedef struct {
  62. struct FrameMode fmode[3]; ///< frame type-dependant parameters
  63. uint16_t size; ///< frame size in samples
  64. uint8_t n_lsp; ///< number of lsp coefficients
  65. const float *lspcodebook;
  66. /* number of bits of the different LSP CB coefficients */
  67. uint8_t lsp_bit0;
  68. uint8_t lsp_bit1;
  69. uint8_t lsp_bit2;
  70. uint8_t lsp_split; ///< number of CB entries for the LSP decoding
  71. const int16_t *ppc_shape_cb; ///< PPC shape CB
  72. /** number of the bits for the PPC period value */
  73. uint8_t ppc_period_bit;
  74. uint8_t ppc_shape_bit; ///< number of bits of the PPC shape CB coeffs
  75. uint8_t ppc_shape_len; ///< size of PPC shape CB
  76. uint8_t pgain_bit; ///< bits for PPC gain
  77. /** constant for peak period to peak width conversion */
  78. uint16_t peak_per2wid;
  79. } ModeTab;
  80. static const ModeTab mode_08_08 = {
  81. {
  82. { 8, bark_tab_s08_64, 10, tab.fcb08s , 1, 5, tab.cb0808s0, tab.cb0808s1, 18},
  83. { 2, bark_tab_m08_256, 20, tab.fcb08m , 2, 5, tab.cb0808m0, tab.cb0808m1, 16},
  84. { 1, bark_tab_l08_512, 30, tab.fcb08l , 3, 6, tab.cb0808l0, tab.cb0808l1, 17}
  85. },
  86. 512 , 12, tab.lsp08, 1, 5, 3, 3, tab.shape08 , 8, 28, 20, 6, 40
  87. };
  88. static const ModeTab mode_11_08 = {
  89. {
  90. { 8, bark_tab_s11_64, 10, tab.fcb11s , 1, 5, tab.cb1108s0, tab.cb1108s1, 29},
  91. { 2, bark_tab_m11_256, 20, tab.fcb11m , 2, 5, tab.cb1108m0, tab.cb1108m1, 24},
  92. { 1, bark_tab_l11_512, 30, tab.fcb11l , 3, 6, tab.cb1108l0, tab.cb1108l1, 27}
  93. },
  94. 512 , 16, tab.lsp11, 1, 6, 4, 3, tab.shape11 , 9, 36, 30, 7, 90
  95. };
  96. static const ModeTab mode_11_10 = {
  97. {
  98. { 8, bark_tab_s11_64, 10, tab.fcb11s , 1, 5, tab.cb1110s0, tab.cb1110s1, 21},
  99. { 2, bark_tab_m11_256, 20, tab.fcb11m , 2, 5, tab.cb1110m0, tab.cb1110m1, 18},
  100. { 1, bark_tab_l11_512, 30, tab.fcb11l , 3, 6, tab.cb1110l0, tab.cb1110l1, 20}
  101. },
  102. 512 , 16, tab.lsp11, 1, 6, 4, 3, tab.shape11 , 9, 36, 30, 7, 90
  103. };
  104. static const ModeTab mode_16_16 = {
  105. {
  106. { 8, bark_tab_s16_128, 10, tab.fcb16s , 1, 5, tab.cb1616s0, tab.cb1616s1, 16},
  107. { 2, bark_tab_m16_512, 20, tab.fcb16m , 2, 5, tab.cb1616m0, tab.cb1616m1, 15},
  108. { 1, bark_tab_l16_1024,30, tab.fcb16l , 3, 6, tab.cb1616l0, tab.cb1616l1, 16}
  109. },
  110. 1024, 16, tab.lsp16, 1, 6, 4, 3, tab.shape16 , 9, 56, 60, 7, 180
  111. };
  112. static const ModeTab mode_22_20 = {
  113. {
  114. { 8, bark_tab_s22_128, 10, tab.fcb22s_1, 1, 6, tab.cb2220s0, tab.cb2220s1, 18},
  115. { 2, bark_tab_m22_512, 20, tab.fcb22m_1, 2, 6, tab.cb2220m0, tab.cb2220m1, 17},
  116. { 1, bark_tab_l22_1024,32, tab.fcb22l_1, 4, 6, tab.cb2220l0, tab.cb2220l1, 18}
  117. },
  118. 1024, 16, tab.lsp22_1, 1, 6, 4, 3, tab.shape22_1, 9, 56, 36, 7, 144
  119. };
  120. static const ModeTab mode_22_24 = {
  121. {
  122. { 8, bark_tab_s22_128, 10, tab.fcb22s_1, 1, 6, tab.cb2224s0, tab.cb2224s1, 15},
  123. { 2, bark_tab_m22_512, 20, tab.fcb22m_1, 2, 6, tab.cb2224m0, tab.cb2224m1, 14},
  124. { 1, bark_tab_l22_1024,32, tab.fcb22l_1, 4, 6, tab.cb2224l0, tab.cb2224l1, 15}
  125. },
  126. 1024, 16, tab.lsp22_1, 1, 6, 4, 3, tab.shape22_1, 9, 56, 36, 7, 144
  127. };
  128. static const ModeTab mode_22_32 = {
  129. {
  130. { 4, bark_tab_s22_128, 10, tab.fcb22s_2, 1, 6, tab.cb2232s0, tab.cb2232s1, 11},
  131. { 2, bark_tab_m22_256, 20, tab.fcb22m_2, 2, 6, tab.cb2232m0, tab.cb2232m1, 11},
  132. { 1, bark_tab_l22_512, 32, tab.fcb22l_2, 4, 6, tab.cb2232l0, tab.cb2232l1, 12}
  133. },
  134. 512 , 16, tab.lsp22_2, 1, 6, 4, 4, tab.shape22_2, 9, 56, 36, 7, 72
  135. };
  136. static const ModeTab mode_44_40 = {
  137. {
  138. {16, bark_tab_s44_128, 10, tab.fcb44s , 1, 6, tab.cb4440s0, tab.cb4440s1, 18},
  139. { 4, bark_tab_m44_512, 20, tab.fcb44m , 2, 6, tab.cb4440m0, tab.cb4440m1, 17},
  140. { 1, bark_tab_l44_2048,40, tab.fcb44l , 4, 6, tab.cb4440l0, tab.cb4440l1, 17}
  141. },
  142. 2048, 20, tab.lsp44, 1, 6, 4, 4, tab.shape44 , 9, 84, 54, 7, 432
  143. };
  144. static const ModeTab mode_44_48 = {
  145. {
  146. {16, bark_tab_s44_128, 10, tab.fcb44s , 1, 6, tab.cb4448s0, tab.cb4448s1, 15},
  147. { 4, bark_tab_m44_512, 20, tab.fcb44m , 2, 6, tab.cb4448m0, tab.cb4448m1, 14},
  148. { 1, bark_tab_l44_2048,40, tab.fcb44l , 4, 6, tab.cb4448l0, tab.cb4448l1, 14}
  149. },
  150. 2048, 20, tab.lsp44, 1, 6, 4, 4, tab.shape44 , 9, 84, 54, 7, 432
  151. };
  152. typedef struct TwinContext {
  153. AVCodecContext *avctx;
  154. AVFrame frame;
  155. AVFloatDSPContext fdsp;
  156. FFTContext mdct_ctx[3];
  157. const ModeTab *mtab;
  158. // history
  159. float lsp_hist[2][20]; ///< LSP coefficients of the last frame
  160. float bark_hist[3][2][40]; ///< BSE coefficients of last frame
  161. // bitstream parameters
  162. int16_t permut[4][4096];
  163. uint8_t length[4][2]; ///< main codebook stride
  164. uint8_t length_change[4];
  165. uint8_t bits_main_spec[2][4][2]; ///< bits for the main codebook
  166. int bits_main_spec_change[4];
  167. int n_div[4];
  168. float *spectrum;
  169. float *curr_frame; ///< non-interleaved output
  170. float *prev_frame; ///< non-interleaved previous frame
  171. int last_block_pos[2];
  172. int discarded_packets;
  173. float *cos_tabs[3];
  174. // scratch buffers
  175. float *tmp_buf;
  176. } TwinContext;
  177. #define PPC_SHAPE_CB_SIZE 64
  178. #define PPC_SHAPE_LEN_MAX 60
  179. #define SUB_AMP_MAX 4500.0
  180. #define MULAW_MU 100.0
  181. #define GAIN_BITS 8
  182. #define AMP_MAX 13000.0
  183. #define SUB_GAIN_BITS 5
  184. #define WINDOW_TYPE_BITS 4
  185. #define PGAIN_MU 200
  186. #define LSP_COEFS_MAX 20
  187. #define LSP_SPLIT_MAX 4
  188. #define CHANNELS_MAX 2
  189. #define SUBBLOCKS_MAX 16
  190. #define BARK_N_COEF_MAX 4
  191. /** @note not speed critical, hence not optimized */
  192. static void memset_float(float *buf, float val, int size)
  193. {
  194. while (size--)
  195. *buf++ = val;
  196. }
  197. /**
  198. * Evaluate a single LPC amplitude spectrum envelope coefficient from the line
  199. * spectrum pairs.
  200. *
  201. * @param lsp a vector of the cosinus of the LSP values
  202. * @param cos_val cos(PI*i/N) where i is the index of the LPC amplitude
  203. * @param order the order of the LSP (and the size of the *lsp buffer). Must
  204. * be a multiple of four.
  205. * @return the LPC value
  206. *
  207. * @todo reuse code from Vorbis decoder: vorbis_floor0_decode
  208. */
  209. static float eval_lpc_spectrum(const float *lsp, float cos_val, int order)
  210. {
  211. int j;
  212. float p = 0.5f;
  213. float q = 0.5f;
  214. float two_cos_w = 2.0f*cos_val;
  215. for (j = 0; j + 1 < order; j += 2*2) {
  216. // Unroll the loop once since order is a multiple of four
  217. q *= lsp[j ] - two_cos_w;
  218. p *= lsp[j+1] - two_cos_w;
  219. q *= lsp[j+2] - two_cos_w;
  220. p *= lsp[j+3] - two_cos_w;
  221. }
  222. p *= p * (2.0f - two_cos_w);
  223. q *= q * (2.0f + two_cos_w);
  224. return 0.5 / (p + q);
  225. }
  226. /**
  227. * Evaluate the LPC amplitude spectrum envelope from the line spectrum pairs.
  228. */
  229. static void eval_lpcenv(TwinContext *tctx, const float *cos_vals, float *lpc)
  230. {
  231. int i;
  232. const ModeTab *mtab = tctx->mtab;
  233. int size_s = mtab->size / mtab->fmode[FT_SHORT].sub;
  234. for (i = 0; i < size_s/2; i++) {
  235. float cos_i = tctx->cos_tabs[0][i];
  236. lpc[i] = eval_lpc_spectrum(cos_vals, cos_i, mtab->n_lsp);
  237. lpc[size_s-i-1] = eval_lpc_spectrum(cos_vals, -cos_i, mtab->n_lsp);
  238. }
  239. }
  240. static void interpolate(float *out, float v1, float v2, int size)
  241. {
  242. int i;
  243. float step = (v1 - v2)/(size + 1);
  244. for (i = 0; i < size; i++) {
  245. v2 += step;
  246. out[i] = v2;
  247. }
  248. }
  249. static inline float get_cos(int idx, int part, const float *cos_tab, int size)
  250. {
  251. return part ? -cos_tab[size - idx - 1] :
  252. cos_tab[ idx ];
  253. }
  254. /**
  255. * Evaluate the LPC amplitude spectrum envelope from the line spectrum pairs.
  256. * Probably for speed reasons, the coefficients are evaluated as
  257. * siiiibiiiisiiiibiiiisiiiibiiiisiiiibiiiis ...
  258. * where s is an evaluated value, i is a value interpolated from the others
  259. * and b might be either calculated or interpolated, depending on an
  260. * unexplained condition.
  261. *
  262. * @param step the size of a block "siiiibiiii"
  263. * @param in the cosinus of the LSP data
  264. * @param part is 0 for 0...PI (positive cossinus values) and 1 for PI...2PI
  265. (negative cossinus values)
  266. * @param size the size of the whole output
  267. */
  268. static inline void eval_lpcenv_or_interp(TwinContext *tctx,
  269. enum FrameType ftype,
  270. float *out, const float *in,
  271. int size, int step, int part)
  272. {
  273. int i;
  274. const ModeTab *mtab = tctx->mtab;
  275. const float *cos_tab = tctx->cos_tabs[ftype];
  276. // Fill the 's'
  277. for (i = 0; i < size; i += step)
  278. out[i] =
  279. eval_lpc_spectrum(in,
  280. get_cos(i, part, cos_tab, size),
  281. mtab->n_lsp);
  282. // Fill the 'iiiibiiii'
  283. for (i = step; i <= size - 2*step; i += step) {
  284. if (out[i + step] + out[i - step] > 1.95*out[i] ||
  285. out[i + step] >= out[i - step]) {
  286. interpolate(out + i - step + 1, out[i], out[i-step], step - 1);
  287. } else {
  288. out[i - step/2] =
  289. eval_lpc_spectrum(in,
  290. get_cos(i-step/2, part, cos_tab, size),
  291. mtab->n_lsp);
  292. interpolate(out + i - step + 1, out[i-step/2], out[i-step ], step/2 - 1);
  293. interpolate(out + i - step/2 + 1, out[i ], out[i-step/2], step/2 - 1);
  294. }
  295. }
  296. interpolate(out + size - 2*step + 1, out[size-step], out[size - 2*step], step - 1);
  297. }
  298. static void eval_lpcenv_2parts(TwinContext *tctx, enum FrameType ftype,
  299. const float *buf, float *lpc,
  300. int size, int step)
  301. {
  302. eval_lpcenv_or_interp(tctx, ftype, lpc , buf, size/2, step, 0);
  303. eval_lpcenv_or_interp(tctx, ftype, lpc + size/2, buf, size/2, 2*step, 1);
  304. interpolate(lpc+size/2-step+1, lpc[size/2], lpc[size/2-step], step);
  305. memset_float(lpc + size - 2*step + 1, lpc[size - 2*step], 2*step - 1);
  306. }
  307. /**
  308. * Inverse quantization. Read CB coefficients for cb1 and cb2 from the
  309. * bitstream, sum the corresponding vectors and write the result to *out
  310. * after permutation.
  311. */
  312. static void dequant(TwinContext *tctx, GetBitContext *gb, float *out,
  313. enum FrameType ftype,
  314. const int16_t *cb0, const int16_t *cb1, int cb_len)
  315. {
  316. int pos = 0;
  317. int i, j;
  318. for (i = 0; i < tctx->n_div[ftype]; i++) {
  319. int tmp0, tmp1;
  320. int sign0 = 1;
  321. int sign1 = 1;
  322. const int16_t *tab0, *tab1;
  323. int length = tctx->length[ftype][i >= tctx->length_change[ftype]];
  324. int bitstream_second_part = (i >= tctx->bits_main_spec_change[ftype]);
  325. int bits = tctx->bits_main_spec[0][ftype][bitstream_second_part];
  326. if (bits == 7) {
  327. if (get_bits1(gb))
  328. sign0 = -1;
  329. bits = 6;
  330. }
  331. tmp0 = get_bits(gb, bits);
  332. bits = tctx->bits_main_spec[1][ftype][bitstream_second_part];
  333. if (bits == 7) {
  334. if (get_bits1(gb))
  335. sign1 = -1;
  336. bits = 6;
  337. }
  338. tmp1 = get_bits(gb, bits);
  339. tab0 = cb0 + tmp0*cb_len;
  340. tab1 = cb1 + tmp1*cb_len;
  341. for (j = 0; j < length; j++)
  342. out[tctx->permut[ftype][pos+j]] = sign0*tab0[j] + sign1*tab1[j];
  343. pos += length;
  344. }
  345. }
  346. static inline float mulawinv(float y, float clip, float mu)
  347. {
  348. y = av_clipf(y/clip, -1, 1);
  349. return clip * FFSIGN(y) * (exp(log(1+mu) * fabs(y)) - 1) / mu;
  350. }
  351. /**
  352. * Evaluate a*b/400 rounded to the nearest integer. When, for example,
  353. * a*b == 200 and the nearest integer is ill-defined, use a table to emulate
  354. * the following broken float-based implementation used by the binary decoder:
  355. *
  356. * @code
  357. * static int very_broken_op(int a, int b)
  358. * {
  359. * static float test; // Ugh, force gcc to do the division first...
  360. *
  361. * test = a/400.;
  362. * return b * test + 0.5;
  363. * }
  364. * @endcode
  365. *
  366. * @note if this function is replaced by just ROUNDED_DIV(a*b,400.), the stddev
  367. * between the original file (before encoding with Yamaha encoder) and the
  368. * decoded output increases, which leads one to believe that the encoder expects
  369. * exactly this broken calculation.
  370. */
  371. static int very_broken_op(int a, int b)
  372. {
  373. int x = a*b + 200;
  374. int size;
  375. const uint8_t *rtab;
  376. if (x%400 || b%5)
  377. return x/400;
  378. x /= 400;
  379. size = tabs[b/5].size;
  380. rtab = tabs[b/5].tab;
  381. return x - rtab[size*av_log2(2*(x - 1)/size)+(x - 1)%size];
  382. }
  383. /**
  384. * Sum to data a periodic peak of a given period, width and shape.
  385. *
  386. * @param period the period of the peak divised by 400.0
  387. */
  388. static void add_peak(int period, int width, const float *shape,
  389. float ppc_gain, float *speech, int len)
  390. {
  391. int i, j;
  392. const float *shape_end = shape + len;
  393. int center;
  394. // First peak centered around zero
  395. for (i = 0; i < width/2; i++)
  396. speech[i] += ppc_gain * *shape++;
  397. for (i = 1; i < ROUNDED_DIV(len,width) ; i++) {
  398. center = very_broken_op(period, i);
  399. for (j = -width/2; j < (width+1)/2; j++)
  400. speech[j+center] += ppc_gain * *shape++;
  401. }
  402. // For the last block, be careful not to go beyond the end of the buffer
  403. center = very_broken_op(period, i);
  404. for (j = -width/2; j < (width + 1)/2 && shape < shape_end; j++)
  405. speech[j+center] += ppc_gain * *shape++;
  406. }
  407. static void decode_ppc(TwinContext *tctx, int period_coef, const float *shape,
  408. float ppc_gain, float *speech)
  409. {
  410. const ModeTab *mtab = tctx->mtab;
  411. int isampf = tctx->avctx->sample_rate/1000;
  412. int ibps = tctx->avctx->bit_rate/(1000 * tctx->avctx->channels);
  413. int min_period = ROUNDED_DIV( 40*2*mtab->size, isampf);
  414. int max_period = ROUNDED_DIV(6*40*2*mtab->size, isampf);
  415. int period_range = max_period - min_period;
  416. // This is actually the period multiplied by 400. It is just linearly coded
  417. // between its maximum and minimum value.
  418. int period = min_period +
  419. ROUNDED_DIV(period_coef*period_range, (1 << mtab->ppc_period_bit) - 1);
  420. int width;
  421. if (isampf == 22 && ibps == 32) {
  422. // For some unknown reason, NTT decided to code this case differently...
  423. width = ROUNDED_DIV((period + 800)* mtab->peak_per2wid, 400*mtab->size);
  424. } else
  425. width = (period )* mtab->peak_per2wid/(400*mtab->size);
  426. add_peak(period, width, shape, ppc_gain, speech, mtab->ppc_shape_len);
  427. }
  428. static void dec_gain(TwinContext *tctx, GetBitContext *gb, enum FrameType ftype,
  429. float *out)
  430. {
  431. const ModeTab *mtab = tctx->mtab;
  432. int i, j;
  433. int sub = mtab->fmode[ftype].sub;
  434. float step = AMP_MAX / ((1 << GAIN_BITS) - 1);
  435. float sub_step = SUB_AMP_MAX / ((1 << SUB_GAIN_BITS) - 1);
  436. if (ftype == FT_LONG) {
  437. for (i = 0; i < tctx->avctx->channels; i++)
  438. out[i] = (1./(1<<13)) *
  439. mulawinv(step * 0.5 + step * get_bits(gb, GAIN_BITS),
  440. AMP_MAX, MULAW_MU);
  441. } else {
  442. for (i = 0; i < tctx->avctx->channels; i++) {
  443. float val = (1./(1<<23)) *
  444. mulawinv(step * 0.5 + step * get_bits(gb, GAIN_BITS),
  445. AMP_MAX, MULAW_MU);
  446. for (j = 0; j < sub; j++) {
  447. out[i*sub + j] =
  448. val*mulawinv(sub_step* 0.5 +
  449. sub_step* get_bits(gb, SUB_GAIN_BITS),
  450. SUB_AMP_MAX, MULAW_MU);
  451. }
  452. }
  453. }
  454. }
  455. /**
  456. * Rearrange the LSP coefficients so that they have a minimum distance of
  457. * min_dist. This function does it exactly as described in section of 3.2.4
  458. * of the G.729 specification (but interestingly is different from what the
  459. * reference decoder actually does).
  460. */
  461. static void rearrange_lsp(int order, float *lsp, float min_dist)
  462. {
  463. int i;
  464. float min_dist2 = min_dist * 0.5;
  465. for (i = 1; i < order; i++)
  466. if (lsp[i] - lsp[i-1] < min_dist) {
  467. float avg = (lsp[i] + lsp[i-1]) * 0.5;
  468. lsp[i-1] = avg - min_dist2;
  469. lsp[i ] = avg + min_dist2;
  470. }
  471. }
  472. static void decode_lsp(TwinContext *tctx, int lpc_idx1, uint8_t *lpc_idx2,
  473. int lpc_hist_idx, float *lsp, float *hist)
  474. {
  475. const ModeTab *mtab = tctx->mtab;
  476. int i, j;
  477. const float *cb = mtab->lspcodebook;
  478. const float *cb2 = cb + (1 << mtab->lsp_bit1)*mtab->n_lsp;
  479. const float *cb3 = cb2 + (1 << mtab->lsp_bit2)*mtab->n_lsp;
  480. const int8_t funny_rounding[4] = {
  481. -2,
  482. mtab->lsp_split == 4 ? -2 : 1,
  483. mtab->lsp_split == 4 ? -2 : 1,
  484. 0
  485. };
  486. j = 0;
  487. for (i = 0; i < mtab->lsp_split; i++) {
  488. int chunk_end = ((i + 1)*mtab->n_lsp + funny_rounding[i])/mtab->lsp_split;
  489. for (; j < chunk_end; j++)
  490. lsp[j] = cb [lpc_idx1 * mtab->n_lsp + j] +
  491. cb2[lpc_idx2[i] * mtab->n_lsp + j];
  492. }
  493. rearrange_lsp(mtab->n_lsp, lsp, 0.0001);
  494. for (i = 0; i < mtab->n_lsp; i++) {
  495. float tmp1 = 1. - cb3[lpc_hist_idx*mtab->n_lsp + i];
  496. float tmp2 = hist[i] * cb3[lpc_hist_idx*mtab->n_lsp + i];
  497. hist[i] = lsp[i];
  498. lsp[i] = lsp[i] * tmp1 + tmp2;
  499. }
  500. rearrange_lsp(mtab->n_lsp, lsp, 0.0001);
  501. rearrange_lsp(mtab->n_lsp, lsp, 0.000095);
  502. ff_sort_nearly_sorted_floats(lsp, mtab->n_lsp);
  503. }
  504. static void dec_lpc_spectrum_inv(TwinContext *tctx, float *lsp,
  505. enum FrameType ftype, float *lpc)
  506. {
  507. int i;
  508. int size = tctx->mtab->size / tctx->mtab->fmode[ftype].sub;
  509. for (i = 0; i < tctx->mtab->n_lsp; i++)
  510. lsp[i] = 2*cos(lsp[i]);
  511. switch (ftype) {
  512. case FT_LONG:
  513. eval_lpcenv_2parts(tctx, ftype, lsp, lpc, size, 8);
  514. break;
  515. case FT_MEDIUM:
  516. eval_lpcenv_2parts(tctx, ftype, lsp, lpc, size, 2);
  517. break;
  518. case FT_SHORT:
  519. eval_lpcenv(tctx, lsp, lpc);
  520. break;
  521. }
  522. }
  523. static void imdct_and_window(TwinContext *tctx, enum FrameType ftype, int wtype,
  524. float *in, float *prev, int ch)
  525. {
  526. FFTContext *mdct = &tctx->mdct_ctx[ftype];
  527. const ModeTab *mtab = tctx->mtab;
  528. int bsize = mtab->size / mtab->fmode[ftype].sub;
  529. int size = mtab->size;
  530. float *buf1 = tctx->tmp_buf;
  531. int j;
  532. int wsize; // Window size
  533. float *out = tctx->curr_frame + 2*ch*mtab->size;
  534. float *out2 = out;
  535. float *prev_buf;
  536. int first_wsize;
  537. static const uint8_t wtype_to_wsize[] = {0, 0, 2, 2, 2, 1, 0, 1, 1};
  538. int types_sizes[] = {
  539. mtab->size / mtab->fmode[FT_LONG ].sub,
  540. mtab->size / mtab->fmode[FT_MEDIUM].sub,
  541. mtab->size / (2*mtab->fmode[FT_SHORT ].sub),
  542. };
  543. wsize = types_sizes[wtype_to_wsize[wtype]];
  544. first_wsize = wsize;
  545. prev_buf = prev + (size - bsize)/2;
  546. for (j = 0; j < mtab->fmode[ftype].sub; j++) {
  547. int sub_wtype = ftype == FT_MEDIUM ? 8 : wtype;
  548. if (!j && wtype == 4)
  549. sub_wtype = 4;
  550. else if (j == mtab->fmode[ftype].sub-1 && wtype == 7)
  551. sub_wtype = 7;
  552. wsize = types_sizes[wtype_to_wsize[sub_wtype]];
  553. mdct->imdct_half(mdct, buf1 + bsize*j, in + bsize*j);
  554. tctx->fdsp.vector_fmul_window(out2, prev_buf + (bsize-wsize) / 2,
  555. buf1 + bsize * j,
  556. ff_sine_windows[av_log2(wsize)],
  557. wsize / 2);
  558. out2 += wsize;
  559. memcpy(out2, buf1 + bsize*j + wsize/2, (bsize - wsize/2)*sizeof(float));
  560. out2 += ftype == FT_MEDIUM ? (bsize-wsize)/2 : bsize - wsize;
  561. prev_buf = buf1 + bsize*j + bsize/2;
  562. }
  563. tctx->last_block_pos[ch] = (size + first_wsize)/2;
  564. }
  565. static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype,
  566. float **out)
  567. {
  568. const ModeTab *mtab = tctx->mtab;
  569. int size1, size2;
  570. float *prev_buf = tctx->prev_frame + tctx->last_block_pos[0];
  571. int i;
  572. for (i = 0; i < tctx->avctx->channels; i++) {
  573. imdct_and_window(tctx, ftype, wtype,
  574. tctx->spectrum + i*mtab->size,
  575. prev_buf + 2*i*mtab->size,
  576. i);
  577. }
  578. if (!out)
  579. return;
  580. size2 = tctx->last_block_pos[0];
  581. size1 = mtab->size - size2;
  582. memcpy(&out[0][0 ], prev_buf, size1 * sizeof(out[0][0]));
  583. memcpy(&out[0][size1], tctx->curr_frame, size2 * sizeof(out[0][0]));
  584. if (tctx->avctx->channels == 2) {
  585. memcpy(&out[1][0], &prev_buf[2*mtab->size], size1 * sizeof(out[1][0]));
  586. memcpy(&out[1][size1], &tctx->curr_frame[2*mtab->size], size2 * sizeof(out[1][0]));
  587. tctx->fdsp.butterflies_float(out[0], out[1], mtab->size);
  588. }
  589. }
  590. static void dec_bark_env(TwinContext *tctx, const uint8_t *in, int use_hist,
  591. int ch, float *out, float gain, enum FrameType ftype)
  592. {
  593. const ModeTab *mtab = tctx->mtab;
  594. int i,j;
  595. float *hist = tctx->bark_hist[ftype][ch];
  596. float val = ((const float []) {0.4, 0.35, 0.28})[ftype];
  597. int bark_n_coef = mtab->fmode[ftype].bark_n_coef;
  598. int fw_cb_len = mtab->fmode[ftype].bark_env_size / bark_n_coef;
  599. int idx = 0;
  600. for (i = 0; i < fw_cb_len; i++)
  601. for (j = 0; j < bark_n_coef; j++, idx++) {
  602. float tmp2 =
  603. mtab->fmode[ftype].bark_cb[fw_cb_len*in[j] + i] * (1./4096);
  604. float st = use_hist ?
  605. (1. - val) * tmp2 + val*hist[idx] + 1. : tmp2 + 1.;
  606. hist[idx] = tmp2;
  607. if (st < -1.) st = 1.;
  608. memset_float(out, st * gain, mtab->fmode[ftype].bark_tab[idx]);
  609. out += mtab->fmode[ftype].bark_tab[idx];
  610. }
  611. }
  612. static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb,
  613. float *out, enum FrameType ftype)
  614. {
  615. const ModeTab *mtab = tctx->mtab;
  616. int channels = tctx->avctx->channels;
  617. int sub = mtab->fmode[ftype].sub;
  618. int block_size = mtab->size / sub;
  619. float gain[CHANNELS_MAX*SUBBLOCKS_MAX];
  620. float ppc_shape[PPC_SHAPE_LEN_MAX * CHANNELS_MAX * 4];
  621. uint8_t bark1[CHANNELS_MAX][SUBBLOCKS_MAX][BARK_N_COEF_MAX];
  622. uint8_t bark_use_hist[CHANNELS_MAX][SUBBLOCKS_MAX];
  623. uint8_t lpc_idx1[CHANNELS_MAX];
  624. uint8_t lpc_idx2[CHANNELS_MAX][LSP_SPLIT_MAX];
  625. uint8_t lpc_hist_idx[CHANNELS_MAX];
  626. int i, j, k;
  627. dequant(tctx, gb, out, ftype,
  628. mtab->fmode[ftype].cb0, mtab->fmode[ftype].cb1,
  629. mtab->fmode[ftype].cb_len_read);
  630. for (i = 0; i < channels; i++)
  631. for (j = 0; j < sub; j++)
  632. for (k = 0; k < mtab->fmode[ftype].bark_n_coef; k++)
  633. bark1[i][j][k] =
  634. get_bits(gb, mtab->fmode[ftype].bark_n_bit);
  635. for (i = 0; i < channels; i++)
  636. for (j = 0; j < sub; j++)
  637. bark_use_hist[i][j] = get_bits1(gb);
  638. dec_gain(tctx, gb, ftype, gain);
  639. for (i = 0; i < channels; i++) {
  640. lpc_hist_idx[i] = get_bits(gb, tctx->mtab->lsp_bit0);
  641. lpc_idx1 [i] = get_bits(gb, tctx->mtab->lsp_bit1);
  642. for (j = 0; j < tctx->mtab->lsp_split; j++)
  643. lpc_idx2[i][j] = get_bits(gb, tctx->mtab->lsp_bit2);
  644. }
  645. if (ftype == FT_LONG) {
  646. int cb_len_p = (tctx->n_div[3] + mtab->ppc_shape_len*channels - 1)/
  647. tctx->n_div[3];
  648. dequant(tctx, gb, ppc_shape, FT_PPC, mtab->ppc_shape_cb,
  649. mtab->ppc_shape_cb + cb_len_p*PPC_SHAPE_CB_SIZE, cb_len_p);
  650. }
  651. for (i = 0; i < channels; i++) {
  652. float *chunk = out + mtab->size * i;
  653. float lsp[LSP_COEFS_MAX];
  654. for (j = 0; j < sub; j++) {
  655. dec_bark_env(tctx, bark1[i][j], bark_use_hist[i][j], i,
  656. tctx->tmp_buf, gain[sub*i+j], ftype);
  657. tctx->fdsp.vector_fmul(chunk + block_size*j, chunk + block_size*j,
  658. tctx->tmp_buf, block_size);
  659. }
  660. if (ftype == FT_LONG) {
  661. float pgain_step = 25000. / ((1 << mtab->pgain_bit) - 1);
  662. int p_coef = get_bits(gb, tctx->mtab->ppc_period_bit);
  663. int g_coef = get_bits(gb, tctx->mtab->pgain_bit);
  664. float v = 1./8192*
  665. mulawinv(pgain_step*g_coef+ pgain_step/2, 25000., PGAIN_MU);
  666. decode_ppc(tctx, p_coef, ppc_shape + i*mtab->ppc_shape_len, v,
  667. chunk);
  668. }
  669. decode_lsp(tctx, lpc_idx1[i], lpc_idx2[i], lpc_hist_idx[i], lsp,
  670. tctx->lsp_hist[i]);
  671. dec_lpc_spectrum_inv(tctx, lsp, ftype, tctx->tmp_buf);
  672. for (j = 0; j < mtab->fmode[ftype].sub; j++) {
  673. tctx->fdsp.vector_fmul(chunk, chunk, tctx->tmp_buf, block_size);
  674. chunk += block_size;
  675. }
  676. }
  677. }
  678. static int twin_decode_frame(AVCodecContext * avctx, void *data,
  679. int *got_frame_ptr, AVPacket *avpkt)
  680. {
  681. const uint8_t *buf = avpkt->data;
  682. int buf_size = avpkt->size;
  683. TwinContext *tctx = avctx->priv_data;
  684. GetBitContext gb;
  685. const ModeTab *mtab = tctx->mtab;
  686. float **out = NULL;
  687. enum FrameType ftype;
  688. int window_type, ret;
  689. static const enum FrameType wtype_to_ftype_table[] = {
  690. FT_LONG, FT_LONG, FT_SHORT, FT_LONG,
  691. FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM
  692. };
  693. if (buf_size*8 < avctx->bit_rate*mtab->size/avctx->sample_rate + 8) {
  694. av_log(avctx, AV_LOG_ERROR,
  695. "Frame too small (%d bytes). Truncated file?\n", buf_size);
  696. return AVERROR(EINVAL);
  697. }
  698. /* get output buffer */
  699. if (tctx->discarded_packets >= 2) {
  700. tctx->frame.nb_samples = mtab->size;
  701. if ((ret = ff_get_buffer(avctx, &tctx->frame)) < 0) {
  702. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  703. return ret;
  704. }
  705. out = (float **)tctx->frame.extended_data;
  706. }
  707. init_get_bits(&gb, buf, buf_size * 8);
  708. skip_bits(&gb, get_bits(&gb, 8));
  709. window_type = get_bits(&gb, WINDOW_TYPE_BITS);
  710. if (window_type > 8) {
  711. av_log(avctx, AV_LOG_ERROR, "Invalid window type, broken sample?\n");
  712. return -1;
  713. }
  714. ftype = wtype_to_ftype_table[window_type];
  715. read_and_decode_spectrum(tctx, &gb, tctx->spectrum, ftype);
  716. imdct_output(tctx, ftype, window_type, out);
  717. FFSWAP(float*, tctx->curr_frame, tctx->prev_frame);
  718. if (tctx->discarded_packets < 2) {
  719. tctx->discarded_packets++;
  720. *got_frame_ptr = 0;
  721. return buf_size;
  722. }
  723. *got_frame_ptr = 1;
  724. *(AVFrame *)data = tctx->frame;
  725. return buf_size;
  726. }
  727. /**
  728. * Init IMDCT and windowing tables
  729. */
  730. static av_cold int init_mdct_win(TwinContext *tctx)
  731. {
  732. int i, j, ret;
  733. const ModeTab *mtab = tctx->mtab;
  734. int size_s = mtab->size / mtab->fmode[FT_SHORT].sub;
  735. int size_m = mtab->size / mtab->fmode[FT_MEDIUM].sub;
  736. int channels = tctx->avctx->channels;
  737. float norm = channels == 1 ? 2. : 1.;
  738. for (i = 0; i < 3; i++) {
  739. int bsize = tctx->mtab->size/tctx->mtab->fmode[i].sub;
  740. if ((ret = ff_mdct_init(&tctx->mdct_ctx[i], av_log2(bsize) + 1, 1,
  741. -sqrt(norm/bsize) / (1<<15))))
  742. return ret;
  743. }
  744. FF_ALLOC_OR_GOTO(tctx->avctx, tctx->tmp_buf,
  745. mtab->size * sizeof(*tctx->tmp_buf), alloc_fail);
  746. FF_ALLOC_OR_GOTO(tctx->avctx, tctx->spectrum,
  747. 2 * mtab->size * channels * sizeof(*tctx->spectrum),
  748. alloc_fail);
  749. FF_ALLOC_OR_GOTO(tctx->avctx, tctx->curr_frame,
  750. 2 * mtab->size * channels * sizeof(*tctx->curr_frame),
  751. alloc_fail);
  752. FF_ALLOC_OR_GOTO(tctx->avctx, tctx->prev_frame,
  753. 2 * mtab->size * channels * sizeof(*tctx->prev_frame),
  754. alloc_fail);
  755. for (i = 0; i < 3; i++) {
  756. int m = 4*mtab->size/mtab->fmode[i].sub;
  757. double freq = 2*M_PI/m;
  758. FF_ALLOC_OR_GOTO(tctx->avctx, tctx->cos_tabs[i],
  759. (m / 4) * sizeof(*tctx->cos_tabs[i]), alloc_fail);
  760. for (j = 0; j <= m/8; j++)
  761. tctx->cos_tabs[i][j] = cos((2*j + 1)*freq);
  762. for (j = 1; j < m/8; j++)
  763. tctx->cos_tabs[i][m/4-j] = tctx->cos_tabs[i][j];
  764. }
  765. ff_init_ff_sine_windows(av_log2(size_m));
  766. ff_init_ff_sine_windows(av_log2(size_s/2));
  767. ff_init_ff_sine_windows(av_log2(mtab->size));
  768. return 0;
  769. alloc_fail:
  770. return AVERROR(ENOMEM);
  771. }
  772. /**
  773. * Interpret the data as if it were a num_blocks x line_len[0] matrix and for
  774. * each line do a cyclic permutation, i.e.
  775. * abcdefghijklm -> defghijklmabc
  776. * where the amount to be shifted is evaluated depending on the column.
  777. */
  778. static void permutate_in_line(int16_t *tab, int num_vect, int num_blocks,
  779. int block_size,
  780. const uint8_t line_len[2], int length_div,
  781. enum FrameType ftype)
  782. {
  783. int i,j;
  784. for (i = 0; i < line_len[0]; i++) {
  785. int shift;
  786. if (num_blocks == 1 ||
  787. (ftype == FT_LONG && num_vect % num_blocks) ||
  788. (ftype != FT_LONG && num_vect & 1 ) ||
  789. i == line_len[1]) {
  790. shift = 0;
  791. } else if (ftype == FT_LONG) {
  792. shift = i;
  793. } else
  794. shift = i*i;
  795. for (j = 0; j < num_vect && (j+num_vect*i < block_size*num_blocks); j++)
  796. tab[i*num_vect+j] = i*num_vect + (j + shift) % num_vect;
  797. }
  798. }
  799. /**
  800. * Interpret the input data as in the following table:
  801. *
  802. * @verbatim
  803. *
  804. * abcdefgh
  805. * ijklmnop
  806. * qrstuvw
  807. * x123456
  808. *
  809. * @endverbatim
  810. *
  811. * and transpose it, giving the output
  812. * aiqxbjr1cks2dlt3emu4fvn5gow6hp
  813. */
  814. static void transpose_perm(int16_t *out, int16_t *in, int num_vect,
  815. const uint8_t line_len[2], int length_div)
  816. {
  817. int i,j;
  818. int cont= 0;
  819. for (i = 0; i < num_vect; i++)
  820. for (j = 0; j < line_len[i >= length_div]; j++)
  821. out[cont++] = in[j*num_vect + i];
  822. }
  823. static void linear_perm(int16_t *out, int16_t *in, int n_blocks, int size)
  824. {
  825. int block_size = size/n_blocks;
  826. int i;
  827. for (i = 0; i < size; i++)
  828. out[i] = block_size * (in[i] % n_blocks) + in[i] / n_blocks;
  829. }
  830. static av_cold void construct_perm_table(TwinContext *tctx,enum FrameType ftype)
  831. {
  832. int block_size;
  833. const ModeTab *mtab = tctx->mtab;
  834. int size;
  835. int16_t *tmp_perm = (int16_t *) tctx->tmp_buf;
  836. if (ftype == FT_PPC) {
  837. size = tctx->avctx->channels;
  838. block_size = mtab->ppc_shape_len;
  839. } else {
  840. size = tctx->avctx->channels * mtab->fmode[ftype].sub;
  841. block_size = mtab->size / mtab->fmode[ftype].sub;
  842. }
  843. permutate_in_line(tmp_perm, tctx->n_div[ftype], size,
  844. block_size, tctx->length[ftype],
  845. tctx->length_change[ftype], ftype);
  846. transpose_perm(tctx->permut[ftype], tmp_perm, tctx->n_div[ftype],
  847. tctx->length[ftype], tctx->length_change[ftype]);
  848. linear_perm(tctx->permut[ftype], tctx->permut[ftype], size,
  849. size*block_size);
  850. }
  851. static av_cold void init_bitstream_params(TwinContext *tctx)
  852. {
  853. const ModeTab *mtab = tctx->mtab;
  854. int n_ch = tctx->avctx->channels;
  855. int total_fr_bits = tctx->avctx->bit_rate*mtab->size/
  856. tctx->avctx->sample_rate;
  857. int lsp_bits_per_block = n_ch*(mtab->lsp_bit0 + mtab->lsp_bit1 +
  858. mtab->lsp_split*mtab->lsp_bit2);
  859. int ppc_bits = n_ch*(mtab->pgain_bit + mtab->ppc_shape_bit +
  860. mtab->ppc_period_bit);
  861. int bsize_no_main_cb[3];
  862. int bse_bits[3];
  863. int i;
  864. enum FrameType frametype;
  865. for (i = 0; i < 3; i++)
  866. // +1 for history usage switch
  867. bse_bits[i] = n_ch *
  868. (mtab->fmode[i].bark_n_coef * mtab->fmode[i].bark_n_bit + 1);
  869. bsize_no_main_cb[2] = bse_bits[2] + lsp_bits_per_block + ppc_bits +
  870. WINDOW_TYPE_BITS + n_ch*GAIN_BITS;
  871. for (i = 0; i < 2; i++)
  872. bsize_no_main_cb[i] =
  873. lsp_bits_per_block + n_ch*GAIN_BITS + WINDOW_TYPE_BITS +
  874. mtab->fmode[i].sub*(bse_bits[i] + n_ch*SUB_GAIN_BITS);
  875. // The remaining bits are all used for the main spectrum coefficients
  876. for (i = 0; i < 4; i++) {
  877. int bit_size;
  878. int vect_size;
  879. int rounded_up, rounded_down, num_rounded_down, num_rounded_up;
  880. if (i == 3) {
  881. bit_size = n_ch * mtab->ppc_shape_bit;
  882. vect_size = n_ch * mtab->ppc_shape_len;
  883. } else {
  884. bit_size = total_fr_bits - bsize_no_main_cb[i];
  885. vect_size = n_ch * mtab->size;
  886. }
  887. tctx->n_div[i] = (bit_size + 13) / 14;
  888. rounded_up = (bit_size + tctx->n_div[i] - 1)/tctx->n_div[i];
  889. rounded_down = (bit_size )/tctx->n_div[i];
  890. num_rounded_down = rounded_up * tctx->n_div[i] - bit_size;
  891. num_rounded_up = tctx->n_div[i] - num_rounded_down;
  892. tctx->bits_main_spec[0][i][0] = (rounded_up + 1)/2;
  893. tctx->bits_main_spec[1][i][0] = (rounded_up )/2;
  894. tctx->bits_main_spec[0][i][1] = (rounded_down + 1)/2;
  895. tctx->bits_main_spec[1][i][1] = (rounded_down )/2;
  896. tctx->bits_main_spec_change[i] = num_rounded_up;
  897. rounded_up = (vect_size + tctx->n_div[i] - 1)/tctx->n_div[i];
  898. rounded_down = (vect_size )/tctx->n_div[i];
  899. num_rounded_down = rounded_up * tctx->n_div[i] - vect_size;
  900. num_rounded_up = tctx->n_div[i] - num_rounded_down;
  901. tctx->length[i][0] = rounded_up;
  902. tctx->length[i][1] = rounded_down;
  903. tctx->length_change[i] = num_rounded_up;
  904. }
  905. for (frametype = FT_SHORT; frametype <= FT_PPC; frametype++)
  906. construct_perm_table(tctx, frametype);
  907. }
  908. static av_cold int twin_decode_close(AVCodecContext *avctx)
  909. {
  910. TwinContext *tctx = avctx->priv_data;
  911. int i;
  912. for (i = 0; i < 3; i++) {
  913. ff_mdct_end(&tctx->mdct_ctx[i]);
  914. av_free(tctx->cos_tabs[i]);
  915. }
  916. av_free(tctx->curr_frame);
  917. av_free(tctx->spectrum);
  918. av_free(tctx->prev_frame);
  919. av_free(tctx->tmp_buf);
  920. return 0;
  921. }
  922. static av_cold int twin_decode_init(AVCodecContext *avctx)
  923. {
  924. int ret;
  925. TwinContext *tctx = avctx->priv_data;
  926. int isampf, ibps;
  927. tctx->avctx = avctx;
  928. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  929. if (!avctx->extradata || avctx->extradata_size < 12) {
  930. av_log(avctx, AV_LOG_ERROR, "Missing or incomplete extradata\n");
  931. return AVERROR_INVALIDDATA;
  932. }
  933. avctx->channels = AV_RB32(avctx->extradata ) + 1;
  934. avctx->bit_rate = AV_RB32(avctx->extradata + 4) * 1000;
  935. isampf = AV_RB32(avctx->extradata + 8);
  936. if (isampf < 8 || isampf > 44) {
  937. av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate\n");
  938. return AVERROR_INVALIDDATA;
  939. }
  940. switch (isampf) {
  941. case 44: avctx->sample_rate = 44100; break;
  942. case 22: avctx->sample_rate = 22050; break;
  943. case 11: avctx->sample_rate = 11025; break;
  944. default: avctx->sample_rate = isampf * 1000; break;
  945. }
  946. if (avctx->channels <= 0 || avctx->channels > CHANNELS_MAX) {
  947. av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %i\n",
  948. avctx->channels);
  949. return -1;
  950. }
  951. avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO :
  952. AV_CH_LAYOUT_STEREO;
  953. ibps = avctx->bit_rate / (1000 * avctx->channels);
  954. if (ibps > 255U) {
  955. av_log(avctx, AV_LOG_ERROR, "unsupported per channel bitrate %dkbps\n", ibps);
  956. return AVERROR_INVALIDDATA;
  957. }
  958. switch ((isampf << 8) + ibps) {
  959. case (8 <<8) + 8: tctx->mtab = &mode_08_08; break;
  960. case (11<<8) + 8: tctx->mtab = &mode_11_08; break;
  961. case (11<<8) + 10: tctx->mtab = &mode_11_10; break;
  962. case (16<<8) + 16: tctx->mtab = &mode_16_16; break;
  963. case (22<<8) + 20: tctx->mtab = &mode_22_20; break;
  964. case (22<<8) + 24: tctx->mtab = &mode_22_24; break;
  965. case (22<<8) + 32: tctx->mtab = &mode_22_32; break;
  966. case (44<<8) + 40: tctx->mtab = &mode_44_40; break;
  967. case (44<<8) + 48: tctx->mtab = &mode_44_48; break;
  968. default:
  969. av_log(avctx, AV_LOG_ERROR, "This version does not support %d kHz - %d kbit/s/ch mode.\n", isampf, isampf);
  970. return -1;
  971. }
  972. avpriv_float_dsp_init(&tctx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
  973. if ((ret = init_mdct_win(tctx))) {
  974. av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
  975. twin_decode_close(avctx);
  976. return ret;
  977. }
  978. init_bitstream_params(tctx);
  979. memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist));
  980. avcodec_get_frame_defaults(&tctx->frame);
  981. avctx->coded_frame = &tctx->frame;
  982. return 0;
  983. }
  984. AVCodec ff_twinvq_decoder = {
  985. .name = "twinvq",
  986. .type = AVMEDIA_TYPE_AUDIO,
  987. .id = AV_CODEC_ID_TWINVQ,
  988. .priv_data_size = sizeof(TwinContext),
  989. .init = twin_decode_init,
  990. .close = twin_decode_close,
  991. .decode = twin_decode_frame,
  992. .capabilities = CODEC_CAP_DR1,
  993. .long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
  994. .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
  995. AV_SAMPLE_FMT_NONE },
  996. };