You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1046 lines
33KB

  1. /*
  2. * G.723.1 compatible decoder
  3. * Copyright (c) 2006 Benjamin Larsson
  4. * Copyright (c) 2010 Mohamed Naufal Basheer
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * G.723.1 compatible decoder
  25. */
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/mem.h"
  28. #include "libavutil/opt.h"
  29. #define BITSTREAM_READER_LE
  30. #include "acelp_vectors.h"
  31. #include "avcodec.h"
  32. #include "bitstream.h"
  33. #include "celp_filters.h"
  34. #include "internal.h"
  35. #include "g723_1.h"
  36. #define CNG_RANDOM_SEED 12345
  37. static av_cold int g723_1_decode_init(AVCodecContext *avctx)
  38. {
  39. G723_1_Context *p = avctx->priv_data;
  40. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  41. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  42. avctx->channels = 1;
  43. avctx->sample_rate = 8000;
  44. p->pf_gain = 1 << 12;
  45. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  46. memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp));
  47. p->cng_random_seed = CNG_RANDOM_SEED;
  48. p->past_frame_type = SID_FRAME;
  49. return 0;
  50. }
  51. /**
  52. * Unpack the frame into parameters.
  53. *
  54. * @param p the context
  55. * @param buf pointer to the input buffer
  56. * @param buf_size size of the input buffer
  57. */
  58. static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf,
  59. int buf_size)
  60. {
  61. BitstreamContext bc;
  62. int ad_cb_len;
  63. int temp, info_bits, i;
  64. bitstream_init8(&bc, buf, buf_size);
  65. /* Extract frame type and rate info */
  66. info_bits = bitstream_read(&bc, 2);
  67. if (info_bits == 3) {
  68. p->cur_frame_type = UNTRANSMITTED_FRAME;
  69. return 0;
  70. }
  71. /* Extract 24 bit lsp indices, 8 bit for each band */
  72. p->lsp_index[2] = bitstream_read(&bc, 8);
  73. p->lsp_index[1] = bitstream_read(&bc, 8);
  74. p->lsp_index[0] = bitstream_read(&bc, 8);
  75. if (info_bits == 2) {
  76. p->cur_frame_type = SID_FRAME;
  77. p->subframe[0].amp_index = bitstream_read(&bc, 6);
  78. return 0;
  79. }
  80. /* Extract the info common to both rates */
  81. p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
  82. p->cur_frame_type = ACTIVE_FRAME;
  83. p->pitch_lag[0] = bitstream_read(&bc, 7);
  84. if (p->pitch_lag[0] > 123) /* test if forbidden code */
  85. return -1;
  86. p->pitch_lag[0] += PITCH_MIN;
  87. p->subframe[1].ad_cb_lag = bitstream_read(&bc, 2);
  88. p->pitch_lag[1] = bitstream_read(&bc, 7);
  89. if (p->pitch_lag[1] > 123)
  90. return -1;
  91. p->pitch_lag[1] += PITCH_MIN;
  92. p->subframe[3].ad_cb_lag = bitstream_read(&bc, 2);
  93. p->subframe[0].ad_cb_lag = 1;
  94. p->subframe[2].ad_cb_lag = 1;
  95. for (i = 0; i < SUBFRAMES; i++) {
  96. /* Extract combined gain */
  97. temp = bitstream_read(&bc, 12);
  98. ad_cb_len = 170;
  99. p->subframe[i].dirac_train = 0;
  100. if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
  101. p->subframe[i].dirac_train = temp >> 11;
  102. temp &= 0x7FF;
  103. ad_cb_len = 85;
  104. }
  105. p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
  106. if (p->subframe[i].ad_cb_gain < ad_cb_len) {
  107. p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
  108. GAIN_LEVELS;
  109. } else {
  110. return -1;
  111. }
  112. }
  113. p->subframe[0].grid_index = bitstream_read(&bc, 1);
  114. p->subframe[1].grid_index = bitstream_read(&bc, 1);
  115. p->subframe[2].grid_index = bitstream_read(&bc, 1);
  116. p->subframe[3].grid_index = bitstream_read(&bc, 1);
  117. if (p->cur_rate == RATE_6300) {
  118. bitstream_skip(&bc, 1); /* skip reserved bit */
  119. /* Compute pulse_pos index using the 13-bit combined position index */
  120. temp = bitstream_read(&bc, 13);
  121. p->subframe[0].pulse_pos = temp / 810;
  122. temp -= p->subframe[0].pulse_pos * 810;
  123. p->subframe[1].pulse_pos = FASTDIV(temp, 90);
  124. temp -= p->subframe[1].pulse_pos * 90;
  125. p->subframe[2].pulse_pos = FASTDIV(temp, 9);
  126. p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
  127. p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
  128. bitstream_read(&bc, 16);
  129. p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
  130. bitstream_read(&bc, 14);
  131. p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
  132. bitstream_read(&bc, 16);
  133. p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
  134. bitstream_read(&bc, 14);
  135. p->subframe[0].pulse_sign = bitstream_read(&bc, 6);
  136. p->subframe[1].pulse_sign = bitstream_read(&bc, 5);
  137. p->subframe[2].pulse_sign = bitstream_read(&bc, 6);
  138. p->subframe[3].pulse_sign = bitstream_read(&bc, 5);
  139. } else { /* 5300 bps */
  140. p->subframe[0].pulse_pos = bitstream_read(&bc, 12);
  141. p->subframe[1].pulse_pos = bitstream_read(&bc, 12);
  142. p->subframe[2].pulse_pos = bitstream_read(&bc, 12);
  143. p->subframe[3].pulse_pos = bitstream_read(&bc, 12);
  144. p->subframe[0].pulse_sign = bitstream_read(&bc, 4);
  145. p->subframe[1].pulse_sign = bitstream_read(&bc, 4);
  146. p->subframe[2].pulse_sign = bitstream_read(&bc, 4);
  147. p->subframe[3].pulse_sign = bitstream_read(&bc, 4);
  148. }
  149. return 0;
  150. }
  151. /**
  152. * Bitexact implementation of sqrt(val/2).
  153. */
  154. static int16_t square_root(int val)
  155. {
  156. int16_t res = 0;
  157. int16_t exp = 0x4000;
  158. int i;
  159. for (i = 0; i < 14; i ++) {
  160. int res_exp = res + exp;
  161. if (val >= res_exp * res_exp << 1)
  162. res += exp;
  163. exp >>= 1;
  164. }
  165. return res;
  166. }
  167. /**
  168. * Bitexact implementation of 2ab scaled by 1/2^16.
  169. *
  170. * @param a 32 bit multiplicand
  171. * @param b 16 bit multiplier
  172. */
  173. #define MULL2(a, b) \
  174. ((((a) >> 16) * (b) << 1) + (((a) & 0xffff) * (b) >> 15))
  175. /**
  176. * Generate fixed codebook excitation vector.
  177. *
  178. * @param vector decoded excitation vector
  179. * @param subfrm current subframe
  180. * @param cur_rate current bitrate
  181. * @param pitch_lag closed loop pitch lag
  182. * @param index current subframe index
  183. */
  184. static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm,
  185. enum Rate cur_rate, int pitch_lag, int index)
  186. {
  187. int temp, i, j;
  188. memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
  189. if (cur_rate == RATE_6300) {
  190. if (subfrm->pulse_pos >= max_pos[index])
  191. return;
  192. /* Decode amplitudes and positions */
  193. j = PULSE_MAX - pulses[index];
  194. temp = subfrm->pulse_pos;
  195. for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
  196. temp -= combinatorial_table[j][i];
  197. if (temp >= 0)
  198. continue;
  199. temp += combinatorial_table[j++][i];
  200. if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) {
  201. vector[subfrm->grid_index + GRID_SIZE * i] =
  202. -fixed_cb_gain[subfrm->amp_index];
  203. } else {
  204. vector[subfrm->grid_index + GRID_SIZE * i] =
  205. fixed_cb_gain[subfrm->amp_index];
  206. }
  207. if (j == PULSE_MAX)
  208. break;
  209. }
  210. if (subfrm->dirac_train == 1)
  211. ff_g723_1_gen_dirac_train(vector, pitch_lag);
  212. } else { /* 5300 bps */
  213. int cb_gain = fixed_cb_gain[subfrm->amp_index];
  214. int cb_shift = subfrm->grid_index;
  215. int cb_sign = subfrm->pulse_sign;
  216. int cb_pos = subfrm->pulse_pos;
  217. int offset, beta, lag;
  218. for (i = 0; i < 8; i += 2) {
  219. offset = ((cb_pos & 7) << 3) + cb_shift + i;
  220. vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
  221. cb_pos >>= 3;
  222. cb_sign >>= 1;
  223. }
  224. /* Enhance harmonic components */
  225. lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag +
  226. subfrm->ad_cb_lag - 1;
  227. beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1];
  228. if (lag < SUBFRAME_LEN - 2) {
  229. for (i = lag; i < SUBFRAME_LEN; i++)
  230. vector[i] += beta * vector[i - lag] >> 15;
  231. }
  232. }
  233. }
  234. /**
  235. * Estimate maximum auto-correlation around pitch lag.
  236. *
  237. * @param buf buffer with offset applied
  238. * @param offset offset of the excitation vector
  239. * @param ccr_max pointer to the maximum auto-correlation
  240. * @param pitch_lag decoded pitch lag
  241. * @param length length of autocorrelation
  242. * @param dir forward lag(1) / backward lag(-1)
  243. */
  244. static int autocorr_max(const int16_t *buf, int offset, int *ccr_max,
  245. int pitch_lag, int length, int dir)
  246. {
  247. int limit, ccr, lag = 0;
  248. int i;
  249. pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
  250. if (dir > 0)
  251. limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
  252. else
  253. limit = pitch_lag + 3;
  254. for (i = pitch_lag - 3; i <= limit; i++) {
  255. ccr = ff_g723_1_dot_product(buf, buf + dir * i, length);
  256. if (ccr > *ccr_max) {
  257. *ccr_max = ccr;
  258. lag = i;
  259. }
  260. }
  261. return lag;
  262. }
  263. /**
  264. * Calculate pitch postfilter optimal and scaling gains.
  265. *
  266. * @param lag pitch postfilter forward/backward lag
  267. * @param ppf pitch postfilter parameters
  268. * @param cur_rate current bitrate
  269. * @param tgt_eng target energy
  270. * @param ccr cross-correlation
  271. * @param res_eng residual energy
  272. */
  273. static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
  274. int tgt_eng, int ccr, int res_eng)
  275. {
  276. int pf_residual; /* square of postfiltered residual */
  277. int temp1, temp2;
  278. ppf->index = lag;
  279. temp1 = tgt_eng * res_eng >> 1;
  280. temp2 = ccr * ccr << 1;
  281. if (temp2 > temp1) {
  282. if (ccr >= res_eng) {
  283. ppf->opt_gain = ppf_gain_weight[cur_rate];
  284. } else {
  285. ppf->opt_gain = (ccr << 15) / res_eng *
  286. ppf_gain_weight[cur_rate] >> 15;
  287. }
  288. /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
  289. temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
  290. temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
  291. pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16;
  292. if (tgt_eng >= pf_residual << 1) {
  293. temp1 = 0x7fff;
  294. } else {
  295. temp1 = (tgt_eng << 14) / pf_residual;
  296. }
  297. /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
  298. ppf->sc_gain = square_root(temp1 << 16);
  299. } else {
  300. ppf->opt_gain = 0;
  301. ppf->sc_gain = 0x7fff;
  302. }
  303. ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
  304. }
  305. /**
  306. * Calculate pitch postfilter parameters.
  307. *
  308. * @param p the context
  309. * @param offset offset of the excitation vector
  310. * @param pitch_lag decoded pitch lag
  311. * @param ppf pitch postfilter parameters
  312. * @param cur_rate current bitrate
  313. */
  314. static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag,
  315. PPFParam *ppf, enum Rate cur_rate)
  316. {
  317. int16_t scale;
  318. int i;
  319. int temp1, temp2;
  320. /*
  321. * 0 - target energy
  322. * 1 - forward cross-correlation
  323. * 2 - forward residual energy
  324. * 3 - backward cross-correlation
  325. * 4 - backward residual energy
  326. */
  327. int energy[5] = {0, 0, 0, 0, 0};
  328. int16_t *buf = p->audio + LPC_ORDER + offset;
  329. int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag,
  330. SUBFRAME_LEN, 1);
  331. int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag,
  332. SUBFRAME_LEN, -1);
  333. ppf->index = 0;
  334. ppf->opt_gain = 0;
  335. ppf->sc_gain = 0x7fff;
  336. /* Case 0, Section 3.6 */
  337. if (!back_lag && !fwd_lag)
  338. return;
  339. /* Compute target energy */
  340. energy[0] = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN);
  341. /* Compute forward residual energy */
  342. if (fwd_lag)
  343. energy[2] = ff_g723_1_dot_product(buf + fwd_lag, buf + fwd_lag,
  344. SUBFRAME_LEN);
  345. /* Compute backward residual energy */
  346. if (back_lag)
  347. energy[4] = ff_g723_1_dot_product(buf - back_lag, buf - back_lag,
  348. SUBFRAME_LEN);
  349. /* Normalize and shorten */
  350. temp1 = 0;
  351. for (i = 0; i < 5; i++)
  352. temp1 = FFMAX(energy[i], temp1);
  353. scale = ff_g723_1_normalize_bits(temp1, 31);
  354. for (i = 0; i < 5; i++)
  355. energy[i] = (energy[i] << scale) >> 16;
  356. if (fwd_lag && !back_lag) { /* Case 1 */
  357. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  358. energy[2]);
  359. } else if (!fwd_lag) { /* Case 2 */
  360. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  361. energy[4]);
  362. } else { /* Case 3 */
  363. /*
  364. * Select the largest of energy[1]^2/energy[2]
  365. * and energy[3]^2/energy[4]
  366. */
  367. temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
  368. temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
  369. if (temp1 >= temp2) {
  370. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  371. energy[2]);
  372. } else {
  373. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  374. energy[4]);
  375. }
  376. }
  377. }
  378. /**
  379. * Classify frames as voiced/unvoiced.
  380. *
  381. * @param p the context
  382. * @param pitch_lag decoded pitch_lag
  383. * @param exc_eng excitation energy estimation
  384. * @param scale scaling factor of exc_eng
  385. *
  386. * @return residual interpolation index if voiced, 0 otherwise
  387. */
  388. static int comp_interp_index(G723_1_Context *p, int pitch_lag,
  389. int *exc_eng, int *scale)
  390. {
  391. int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
  392. int16_t *buf = p->audio + LPC_ORDER;
  393. int index, ccr, tgt_eng, best_eng, temp;
  394. *scale = ff_g723_1_scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX);
  395. buf += offset;
  396. /* Compute maximum backward cross-correlation */
  397. ccr = 0;
  398. index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
  399. ccr = av_sat_add32(ccr, 1 << 15) >> 16;
  400. /* Compute target energy */
  401. tgt_eng = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN * 2);
  402. *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16;
  403. if (ccr <= 0)
  404. return 0;
  405. /* Compute best energy */
  406. best_eng = ff_g723_1_dot_product(buf - index, buf - index,
  407. SUBFRAME_LEN * 2);
  408. best_eng = av_sat_add32(best_eng, 1 << 15) >> 16;
  409. temp = best_eng * *exc_eng >> 3;
  410. if (temp < ccr * ccr)
  411. return index;
  412. else
  413. return 0;
  414. }
  415. /**
  416. * Perform residual interpolation based on frame classification.
  417. *
  418. * @param buf decoded excitation vector
  419. * @param out output vector
  420. * @param lag decoded pitch lag
  421. * @param gain interpolated gain
  422. * @param rseed seed for random number generator
  423. */
  424. static void residual_interp(int16_t *buf, int16_t *out, int lag,
  425. int gain, int *rseed)
  426. {
  427. int i;
  428. if (lag) { /* Voiced */
  429. int16_t *vector_ptr = buf + PITCH_MAX;
  430. /* Attenuate */
  431. for (i = 0; i < lag; i++)
  432. out[i] = vector_ptr[i - lag] * 3 >> 2;
  433. av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out),
  434. (FRAME_LEN - lag) * sizeof(*out));
  435. } else { /* Unvoiced */
  436. for (i = 0; i < FRAME_LEN; i++) {
  437. *rseed = *rseed * 521 + 259;
  438. out[i] = gain * *rseed >> 15;
  439. }
  440. memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
  441. }
  442. }
  443. /**
  444. * Perform IIR filtering.
  445. *
  446. * @param fir_coef FIR coefficients
  447. * @param iir_coef IIR coefficients
  448. * @param src source vector
  449. * @param dest destination vector
  450. */
  451. static void iir_filter(int16_t *fir_coef, int16_t *iir_coef,
  452. int16_t *src, int *dest)
  453. {
  454. int m, n;
  455. for (m = 0; m < SUBFRAME_LEN; m++) {
  456. int64_t filter = 0;
  457. for (n = 1; n <= LPC_ORDER; n++) {
  458. filter -= fir_coef[n - 1] * src[m - n] -
  459. iir_coef[n - 1] * (dest[m - n] >> 16);
  460. }
  461. dest[m] = av_clipl_int32((src[m] << 16) + (filter << 3) + (1 << 15));
  462. }
  463. }
  464. /**
  465. * Adjust gain of postfiltered signal.
  466. *
  467. * @param p the context
  468. * @param buf postfiltered output vector
  469. * @param energy input energy coefficient
  470. */
  471. static void gain_scale(G723_1_Context *p, int16_t * buf, int energy)
  472. {
  473. int num, denom, gain, bits1, bits2;
  474. int i;
  475. num = energy;
  476. denom = 0;
  477. for (i = 0; i < SUBFRAME_LEN; i++) {
  478. int temp = buf[i] >> 2;
  479. temp *= temp;
  480. denom = av_sat_dadd32(denom, temp);
  481. }
  482. if (num && denom) {
  483. bits1 = ff_g723_1_normalize_bits(num, 31);
  484. bits2 = ff_g723_1_normalize_bits(denom, 31);
  485. num = num << bits1 >> 1;
  486. denom <<= bits2;
  487. bits2 = 5 + bits1 - bits2;
  488. bits2 = FFMAX(0, bits2);
  489. gain = (num >> 1) / (denom >> 16);
  490. gain = square_root(gain << 16 >> bits2);
  491. } else {
  492. gain = 1 << 12;
  493. }
  494. for (i = 0; i < SUBFRAME_LEN; i++) {
  495. p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4;
  496. buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
  497. (1 << 10)) >> 11);
  498. }
  499. }
  500. /**
  501. * Perform formant filtering.
  502. *
  503. * @param p the context
  504. * @param lpc quantized lpc coefficients
  505. * @param buf input buffer
  506. * @param dst output buffer
  507. */
  508. static void formant_postfilter(G723_1_Context *p, int16_t *lpc,
  509. int16_t *buf, int16_t *dst)
  510. {
  511. int16_t filter_coef[2][LPC_ORDER];
  512. int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
  513. int i, j, k;
  514. memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
  515. memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
  516. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  517. for (k = 0; k < LPC_ORDER; k++) {
  518. filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
  519. (1 << 14)) >> 15;
  520. filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
  521. (1 << 14)) >> 15;
  522. }
  523. iir_filter(filter_coef[0], filter_coef[1], buf + i, filter_signal + i);
  524. lpc += LPC_ORDER;
  525. }
  526. memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(*p->fir_mem));
  527. memcpy(p->iir_mem, filter_signal + FRAME_LEN,
  528. LPC_ORDER * sizeof(*p->iir_mem));
  529. buf += LPC_ORDER;
  530. signal_ptr = filter_signal + LPC_ORDER;
  531. for (i = 0; i < SUBFRAMES; i++) {
  532. int temp;
  533. int auto_corr[2];
  534. int scale, energy;
  535. /* Normalize */
  536. scale = ff_g723_1_scale_vector(dst, buf, SUBFRAME_LEN);
  537. /* Compute auto correlation coefficients */
  538. auto_corr[0] = ff_g723_1_dot_product(dst, dst + 1, SUBFRAME_LEN - 1);
  539. auto_corr[1] = ff_g723_1_dot_product(dst, dst, SUBFRAME_LEN);
  540. /* Compute reflection coefficient */
  541. temp = auto_corr[1] >> 16;
  542. if (temp) {
  543. temp = (auto_corr[0] >> 2) / temp;
  544. }
  545. p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2;
  546. temp = -p->reflection_coef >> 1 & ~3;
  547. /* Compensation filter */
  548. for (j = 0; j < SUBFRAME_LEN; j++) {
  549. dst[j] = av_sat_dadd32(signal_ptr[j],
  550. (signal_ptr[j - 1] >> 16) * temp) >> 16;
  551. }
  552. /* Compute normalized signal energy */
  553. temp = 2 * scale + 4;
  554. if (temp < 0) {
  555. energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
  556. } else
  557. energy = auto_corr[1] >> temp;
  558. gain_scale(p, dst, energy);
  559. buf += SUBFRAME_LEN;
  560. signal_ptr += SUBFRAME_LEN;
  561. dst += SUBFRAME_LEN;
  562. }
  563. }
  564. static int sid_gain_to_lsp_index(int gain)
  565. {
  566. if (gain < 0x10)
  567. return gain << 6;
  568. else if (gain < 0x20)
  569. return gain - 8 << 7;
  570. else
  571. return gain - 20 << 8;
  572. }
  573. static inline int cng_rand(int *state, int base)
  574. {
  575. *state = (*state * 521 + 259) & 0xFFFF;
  576. return (*state & 0x7FFF) * base >> 15;
  577. }
  578. static int estimate_sid_gain(G723_1_Context *p)
  579. {
  580. int i, shift, seg, seg2, t, val, val_add, x, y;
  581. shift = 16 - p->cur_gain * 2;
  582. if (shift > 0)
  583. t = p->sid_gain << shift;
  584. else
  585. t = p->sid_gain >> -shift;
  586. x = t * cng_filt[0] >> 16;
  587. if (x >= cng_bseg[2])
  588. return 0x3F;
  589. if (x >= cng_bseg[1]) {
  590. shift = 4;
  591. seg = 3;
  592. } else {
  593. shift = 3;
  594. seg = (x >= cng_bseg[0]);
  595. }
  596. seg2 = FFMIN(seg, 3);
  597. val = 1 << shift;
  598. val_add = val >> 1;
  599. for (i = 0; i < shift; i++) {
  600. t = seg * 32 + (val << seg2);
  601. t *= t;
  602. if (x >= t)
  603. val += val_add;
  604. else
  605. val -= val_add;
  606. val_add >>= 1;
  607. }
  608. t = seg * 32 + (val << seg2);
  609. y = t * t - x;
  610. if (y <= 0) {
  611. t = seg * 32 + (val + 1 << seg2);
  612. t = t * t - x;
  613. val = (seg2 - 1 << 4) + val;
  614. if (t >= y)
  615. val++;
  616. } else {
  617. t = seg * 32 + (val - 1 << seg2);
  618. t = t * t - x;
  619. val = (seg2 - 1 << 4) + val;
  620. if (t >= y)
  621. val--;
  622. }
  623. return val;
  624. }
  625. static void generate_noise(G723_1_Context *p)
  626. {
  627. int i, j, idx, t;
  628. int off[SUBFRAMES];
  629. int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11];
  630. int tmp[SUBFRAME_LEN * 2];
  631. int16_t *vector_ptr;
  632. int64_t sum;
  633. int b0, c, delta, x, shift;
  634. p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123;
  635. p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123;
  636. for (i = 0; i < SUBFRAMES; i++) {
  637. p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1;
  638. p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i];
  639. }
  640. for (i = 0; i < SUBFRAMES / 2; i++) {
  641. t = cng_rand(&p->cng_random_seed, 1 << 13);
  642. off[i * 2] = t & 1;
  643. off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN;
  644. t >>= 2;
  645. for (j = 0; j < 11; j++) {
  646. signs[i * 11 + j] = (t & 1) * 2 - 1 << 14;
  647. t >>= 1;
  648. }
  649. }
  650. idx = 0;
  651. for (i = 0; i < SUBFRAMES; i++) {
  652. for (j = 0; j < SUBFRAME_LEN / 2; j++)
  653. tmp[j] = j;
  654. t = SUBFRAME_LEN / 2;
  655. for (j = 0; j < pulses[i]; j++, idx++) {
  656. int idx2 = cng_rand(&p->cng_random_seed, t);
  657. pos[idx] = tmp[idx2] * 2 + off[i];
  658. tmp[idx2] = tmp[--t];
  659. }
  660. }
  661. vector_ptr = p->audio + LPC_ORDER;
  662. memcpy(vector_ptr, p->prev_excitation,
  663. PITCH_MAX * sizeof(*p->excitation));
  664. for (i = 0; i < SUBFRAMES; i += 2) {
  665. ff_g723_1_gen_acb_excitation(vector_ptr, vector_ptr,
  666. p->pitch_lag[i >> 1], &p->subframe[i],
  667. p->cur_rate);
  668. ff_g723_1_gen_acb_excitation(vector_ptr + SUBFRAME_LEN,
  669. vector_ptr + SUBFRAME_LEN,
  670. p->pitch_lag[i >> 1], &p->subframe[i + 1],
  671. p->cur_rate);
  672. t = 0;
  673. for (j = 0; j < SUBFRAME_LEN * 2; j++)
  674. t |= FFABS(vector_ptr[j]);
  675. t = FFMIN(t, 0x7FFF);
  676. if (!t) {
  677. shift = 0;
  678. } else {
  679. shift = -10 + av_log2(t);
  680. if (shift < -2)
  681. shift = -2;
  682. }
  683. sum = 0;
  684. if (shift < 0) {
  685. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  686. t = vector_ptr[j] << -shift;
  687. sum += t * t;
  688. tmp[j] = t;
  689. }
  690. } else {
  691. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  692. t = vector_ptr[j] >> shift;
  693. sum += t * t;
  694. tmp[j] = t;
  695. }
  696. }
  697. b0 = 0;
  698. for (j = 0; j < 11; j++)
  699. b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j];
  700. b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11
  701. c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5);
  702. if (shift * 2 + 3 >= 0)
  703. c >>= shift * 2 + 3;
  704. else
  705. c <<= -(shift * 2 + 3);
  706. c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15;
  707. delta = b0 * b0 * 2 - c;
  708. if (delta <= 0) {
  709. x = -b0;
  710. } else {
  711. delta = square_root(delta);
  712. x = delta - b0;
  713. t = delta + b0;
  714. if (FFABS(t) < FFABS(x))
  715. x = -t;
  716. }
  717. shift++;
  718. if (shift < 0)
  719. x >>= -shift;
  720. else
  721. x <<= shift;
  722. x = av_clip(x, -10000, 10000);
  723. for (j = 0; j < 11; j++) {
  724. idx = (i / 2) * 11 + j;
  725. vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] +
  726. (x * signs[idx] >> 15));
  727. }
  728. /* copy decoded data to serve as a history for the next decoded subframes */
  729. memcpy(vector_ptr + PITCH_MAX, vector_ptr,
  730. sizeof(*vector_ptr) * SUBFRAME_LEN * 2);
  731. vector_ptr += SUBFRAME_LEN * 2;
  732. }
  733. /* Save the excitation for the next frame */
  734. memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN,
  735. PITCH_MAX * sizeof(*p->excitation));
  736. }
  737. static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
  738. int *got_frame_ptr, AVPacket *avpkt)
  739. {
  740. G723_1_Context *p = avctx->priv_data;
  741. AVFrame *frame = data;
  742. const uint8_t *buf = avpkt->data;
  743. int buf_size = avpkt->size;
  744. int dec_mode = buf[0] & 3;
  745. PPFParam ppf[SUBFRAMES];
  746. int16_t cur_lsp[LPC_ORDER];
  747. int16_t lpc[SUBFRAMES * LPC_ORDER];
  748. int16_t acb_vector[SUBFRAME_LEN];
  749. int16_t *out;
  750. int bad_frame = 0, i, j, ret;
  751. int16_t *audio = p->audio;
  752. if (buf_size < frame_size[dec_mode]) {
  753. if (buf_size)
  754. av_log(avctx, AV_LOG_WARNING,
  755. "Expected %d bytes, got %d - skipping packet\n",
  756. frame_size[dec_mode], buf_size);
  757. *got_frame_ptr = 0;
  758. return buf_size;
  759. }
  760. if (unpack_bitstream(p, buf, buf_size) < 0) {
  761. bad_frame = 1;
  762. if (p->past_frame_type == ACTIVE_FRAME)
  763. p->cur_frame_type = ACTIVE_FRAME;
  764. else
  765. p->cur_frame_type = UNTRANSMITTED_FRAME;
  766. }
  767. frame->nb_samples = FRAME_LEN;
  768. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
  769. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  770. return ret;
  771. }
  772. out = (int16_t *)frame->data[0];
  773. if (p->cur_frame_type == ACTIVE_FRAME) {
  774. if (!bad_frame)
  775. p->erased_frames = 0;
  776. else if (p->erased_frames != 3)
  777. p->erased_frames++;
  778. ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
  779. ff_g723_1_lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
  780. /* Save the lsp_vector for the next frame */
  781. memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  782. /* Generate the excitation for the frame */
  783. memcpy(p->excitation, p->prev_excitation,
  784. PITCH_MAX * sizeof(*p->excitation));
  785. if (!p->erased_frames) {
  786. int16_t *vector_ptr = p->excitation + PITCH_MAX;
  787. /* Update interpolation gain memory */
  788. p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
  789. p->subframe[3].amp_index) >> 1];
  790. for (i = 0; i < SUBFRAMES; i++) {
  791. gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate,
  792. p->pitch_lag[i >> 1], i);
  793. ff_g723_1_gen_acb_excitation(acb_vector,
  794. &p->excitation[SUBFRAME_LEN * i],
  795. p->pitch_lag[i >> 1],
  796. &p->subframe[i], p->cur_rate);
  797. /* Get the total excitation */
  798. for (j = 0; j < SUBFRAME_LEN; j++) {
  799. int v = av_clip_int16(vector_ptr[j] << 1);
  800. vector_ptr[j] = av_clip_int16(v + acb_vector[j]);
  801. }
  802. vector_ptr += SUBFRAME_LEN;
  803. }
  804. vector_ptr = p->excitation + PITCH_MAX;
  805. p->interp_index = comp_interp_index(p, p->pitch_lag[1],
  806. &p->sid_gain, &p->cur_gain);
  807. /* Perform pitch postfiltering */
  808. if (p->postfilter) {
  809. i = PITCH_MAX;
  810. for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  811. comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
  812. ppf + j, p->cur_rate);
  813. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  814. ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
  815. vector_ptr + i,
  816. vector_ptr + i + ppf[j].index,
  817. ppf[j].sc_gain,
  818. ppf[j].opt_gain,
  819. 1 << 14, 15, SUBFRAME_LEN);
  820. } else {
  821. audio = vector_ptr - LPC_ORDER;
  822. }
  823. /* Save the excitation for the next frame */
  824. memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
  825. PITCH_MAX * sizeof(*p->excitation));
  826. } else {
  827. p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
  828. if (p->erased_frames == 3) {
  829. /* Mute output */
  830. memset(p->excitation, 0,
  831. (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
  832. memset(p->prev_excitation, 0,
  833. PITCH_MAX * sizeof(*p->excitation));
  834. memset(frame->data[0], 0,
  835. (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
  836. } else {
  837. int16_t *buf = p->audio + LPC_ORDER;
  838. /* Regenerate frame */
  839. residual_interp(p->excitation, buf, p->interp_index,
  840. p->interp_gain, &p->random_seed);
  841. /* Save the excitation for the next frame */
  842. memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX),
  843. PITCH_MAX * sizeof(*p->excitation));
  844. }
  845. }
  846. p->cng_random_seed = CNG_RANDOM_SEED;
  847. } else {
  848. if (p->cur_frame_type == SID_FRAME) {
  849. p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index);
  850. ff_g723_1_inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0);
  851. } else if (p->past_frame_type == ACTIVE_FRAME) {
  852. p->sid_gain = estimate_sid_gain(p);
  853. }
  854. if (p->past_frame_type == ACTIVE_FRAME)
  855. p->cur_gain = p->sid_gain;
  856. else
  857. p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3;
  858. generate_noise(p);
  859. ff_g723_1_lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp);
  860. /* Save the lsp_vector for the next frame */
  861. memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  862. }
  863. p->past_frame_type = p->cur_frame_type;
  864. memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
  865. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  866. ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
  867. audio + i, SUBFRAME_LEN, LPC_ORDER,
  868. 0, 1, 1 << 12);
  869. memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
  870. if (p->postfilter) {
  871. formant_postfilter(p, lpc, p->audio, out);
  872. } else { // if output is not postfiltered it should be scaled by 2
  873. for (i = 0; i < FRAME_LEN; i++)
  874. out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
  875. }
  876. *got_frame_ptr = 1;
  877. return frame_size[dec_mode];
  878. }
  879. #define OFFSET(x) offsetof(G723_1_Context, x)
  880. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  881. static const AVOption options[] = {
  882. { "postfilter", "postfilter on/off", OFFSET(postfilter), AV_OPT_TYPE_INT,
  883. { .i64 = 1 }, 0, 1, AD },
  884. { NULL }
  885. };
  886. static const AVClass g723_1dec_class = {
  887. .class_name = "G.723.1 decoder",
  888. .item_name = av_default_item_name,
  889. .option = options,
  890. .version = LIBAVUTIL_VERSION_INT,
  891. };
  892. AVCodec ff_g723_1_decoder = {
  893. .name = "g723_1",
  894. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  895. .type = AVMEDIA_TYPE_AUDIO,
  896. .id = AV_CODEC_ID_G723_1,
  897. .priv_data_size = sizeof(G723_1_Context),
  898. .init = g723_1_decode_init,
  899. .decode = g723_1_decode_frame,
  900. .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
  901. .priv_class = &g723_1dec_class,
  902. };