You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1028 lines
33KB

  1. /*
  2. * G.723.1 compatible decoder
  3. * Copyright (c) 2006 Benjamin Larsson
  4. * Copyright (c) 2010 Mohamed Naufal Basheer
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * G.723.1 compatible decoder
  25. */
  26. #define BITSTREAM_READER_LE
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/mem.h"
  29. #include "libavutil/opt.h"
  30. #include "avcodec.h"
  31. #include "get_bits.h"
  32. #include "acelp_vectors.h"
  33. #include "celp_filters.h"
  34. #include "celp_math.h"
  35. #include "g723_1.h"
  36. #include "internal.h"
  37. #define CNG_RANDOM_SEED 12345
  38. static av_cold int g723_1_decode_init(AVCodecContext *avctx)
  39. {
  40. G723_1_Context *p = avctx->priv_data;
  41. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  42. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  43. avctx->channels = 1;
  44. p->pf_gain = 1 << 12;
  45. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  46. memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp));
  47. p->cng_random_seed = CNG_RANDOM_SEED;
  48. p->past_frame_type = SID_FRAME;
  49. return 0;
  50. }
  51. /**
  52. * Unpack the frame into parameters.
  53. *
  54. * @param p the context
  55. * @param buf pointer to the input buffer
  56. * @param buf_size size of the input buffer
  57. */
  58. static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf,
  59. int buf_size)
  60. {
  61. GetBitContext gb;
  62. int ad_cb_len;
  63. int temp, info_bits, i;
  64. init_get_bits(&gb, buf, buf_size * 8);
  65. /* Extract frame type and rate info */
  66. info_bits = get_bits(&gb, 2);
  67. if (info_bits == 3) {
  68. p->cur_frame_type = UNTRANSMITTED_FRAME;
  69. return 0;
  70. }
  71. /* Extract 24 bit lsp indices, 8 bit for each band */
  72. p->lsp_index[2] = get_bits(&gb, 8);
  73. p->lsp_index[1] = get_bits(&gb, 8);
  74. p->lsp_index[0] = get_bits(&gb, 8);
  75. if (info_bits == 2) {
  76. p->cur_frame_type = SID_FRAME;
  77. p->subframe[0].amp_index = get_bits(&gb, 6);
  78. return 0;
  79. }
  80. /* Extract the info common to both rates */
  81. p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
  82. p->cur_frame_type = ACTIVE_FRAME;
  83. p->pitch_lag[0] = get_bits(&gb, 7);
  84. if (p->pitch_lag[0] > 123) /* test if forbidden code */
  85. return -1;
  86. p->pitch_lag[0] += PITCH_MIN;
  87. p->subframe[1].ad_cb_lag = get_bits(&gb, 2);
  88. p->pitch_lag[1] = get_bits(&gb, 7);
  89. if (p->pitch_lag[1] > 123)
  90. return -1;
  91. p->pitch_lag[1] += PITCH_MIN;
  92. p->subframe[3].ad_cb_lag = get_bits(&gb, 2);
  93. p->subframe[0].ad_cb_lag = 1;
  94. p->subframe[2].ad_cb_lag = 1;
  95. for (i = 0; i < SUBFRAMES; i++) {
  96. /* Extract combined gain */
  97. temp = get_bits(&gb, 12);
  98. ad_cb_len = 170;
  99. p->subframe[i].dirac_train = 0;
  100. if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
  101. p->subframe[i].dirac_train = temp >> 11;
  102. temp &= 0x7FF;
  103. ad_cb_len = 85;
  104. }
  105. p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
  106. if (p->subframe[i].ad_cb_gain < ad_cb_len) {
  107. p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
  108. GAIN_LEVELS;
  109. } else {
  110. return -1;
  111. }
  112. }
  113. p->subframe[0].grid_index = get_bits1(&gb);
  114. p->subframe[1].grid_index = get_bits1(&gb);
  115. p->subframe[2].grid_index = get_bits1(&gb);
  116. p->subframe[3].grid_index = get_bits1(&gb);
  117. if (p->cur_rate == RATE_6300) {
  118. skip_bits1(&gb); /* skip reserved bit */
  119. /* Compute pulse_pos index using the 13-bit combined position index */
  120. temp = get_bits(&gb, 13);
  121. p->subframe[0].pulse_pos = temp / 810;
  122. temp -= p->subframe[0].pulse_pos * 810;
  123. p->subframe[1].pulse_pos = FASTDIV(temp, 90);
  124. temp -= p->subframe[1].pulse_pos * 90;
  125. p->subframe[2].pulse_pos = FASTDIV(temp, 9);
  126. p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
  127. p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
  128. get_bits(&gb, 16);
  129. p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
  130. get_bits(&gb, 14);
  131. p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
  132. get_bits(&gb, 16);
  133. p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
  134. get_bits(&gb, 14);
  135. p->subframe[0].pulse_sign = get_bits(&gb, 6);
  136. p->subframe[1].pulse_sign = get_bits(&gb, 5);
  137. p->subframe[2].pulse_sign = get_bits(&gb, 6);
  138. p->subframe[3].pulse_sign = get_bits(&gb, 5);
  139. } else { /* 5300 bps */
  140. p->subframe[0].pulse_pos = get_bits(&gb, 12);
  141. p->subframe[1].pulse_pos = get_bits(&gb, 12);
  142. p->subframe[2].pulse_pos = get_bits(&gb, 12);
  143. p->subframe[3].pulse_pos = get_bits(&gb, 12);
  144. p->subframe[0].pulse_sign = get_bits(&gb, 4);
  145. p->subframe[1].pulse_sign = get_bits(&gb, 4);
  146. p->subframe[2].pulse_sign = get_bits(&gb, 4);
  147. p->subframe[3].pulse_sign = get_bits(&gb, 4);
  148. }
  149. return 0;
  150. }
  151. /**
  152. * Bitexact implementation of sqrt(val/2).
  153. */
  154. static int16_t square_root(unsigned val)
  155. {
  156. av_assert2(!(val & 0x80000000));
  157. return (ff_sqrt(val << 1) >> 1) & (~1);
  158. }
  159. /**
  160. * Generate fixed codebook excitation vector.
  161. *
  162. * @param vector decoded excitation vector
  163. * @param subfrm current subframe
  164. * @param cur_rate current bitrate
  165. * @param pitch_lag closed loop pitch lag
  166. * @param index current subframe index
  167. */
  168. static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm,
  169. enum Rate cur_rate, int pitch_lag, int index)
  170. {
  171. int temp, i, j;
  172. memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
  173. if (cur_rate == RATE_6300) {
  174. if (subfrm->pulse_pos >= max_pos[index])
  175. return;
  176. /* Decode amplitudes and positions */
  177. j = PULSE_MAX - pulses[index];
  178. temp = subfrm->pulse_pos;
  179. for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
  180. temp -= combinatorial_table[j][i];
  181. if (temp >= 0)
  182. continue;
  183. temp += combinatorial_table[j++][i];
  184. if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) {
  185. vector[subfrm->grid_index + GRID_SIZE * i] =
  186. -fixed_cb_gain[subfrm->amp_index];
  187. } else {
  188. vector[subfrm->grid_index + GRID_SIZE * i] =
  189. fixed_cb_gain[subfrm->amp_index];
  190. }
  191. if (j == PULSE_MAX)
  192. break;
  193. }
  194. if (subfrm->dirac_train == 1)
  195. ff_g723_1_gen_dirac_train(vector, pitch_lag);
  196. } else { /* 5300 bps */
  197. int cb_gain = fixed_cb_gain[subfrm->amp_index];
  198. int cb_shift = subfrm->grid_index;
  199. int cb_sign = subfrm->pulse_sign;
  200. int cb_pos = subfrm->pulse_pos;
  201. int offset, beta, lag;
  202. for (i = 0; i < 8; i += 2) {
  203. offset = ((cb_pos & 7) << 3) + cb_shift + i;
  204. vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
  205. cb_pos >>= 3;
  206. cb_sign >>= 1;
  207. }
  208. /* Enhance harmonic components */
  209. lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag +
  210. subfrm->ad_cb_lag - 1;
  211. beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1];
  212. if (lag < SUBFRAME_LEN - 2) {
  213. for (i = lag; i < SUBFRAME_LEN; i++)
  214. vector[i] += beta * vector[i - lag] >> 15;
  215. }
  216. }
  217. }
  218. /**
  219. * Estimate maximum auto-correlation around pitch lag.
  220. *
  221. * @param buf buffer with offset applied
  222. * @param offset offset of the excitation vector
  223. * @param ccr_max pointer to the maximum auto-correlation
  224. * @param pitch_lag decoded pitch lag
  225. * @param length length of autocorrelation
  226. * @param dir forward lag(1) / backward lag(-1)
  227. */
  228. static int autocorr_max(const int16_t *buf, int offset, int *ccr_max,
  229. int pitch_lag, int length, int dir)
  230. {
  231. int limit, ccr, lag = 0;
  232. int i;
  233. pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
  234. if (dir > 0)
  235. limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
  236. else
  237. limit = pitch_lag + 3;
  238. for (i = pitch_lag - 3; i <= limit; i++) {
  239. ccr = ff_g723_1_dot_product(buf, buf + dir * i, length);
  240. if (ccr > *ccr_max) {
  241. *ccr_max = ccr;
  242. lag = i;
  243. }
  244. }
  245. return lag;
  246. }
  247. /**
  248. * Calculate pitch postfilter optimal and scaling gains.
  249. *
  250. * @param lag pitch postfilter forward/backward lag
  251. * @param ppf pitch postfilter parameters
  252. * @param cur_rate current bitrate
  253. * @param tgt_eng target energy
  254. * @param ccr cross-correlation
  255. * @param res_eng residual energy
  256. */
  257. static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
  258. int tgt_eng, int ccr, int res_eng)
  259. {
  260. int pf_residual; /* square of postfiltered residual */
  261. int temp1, temp2;
  262. ppf->index = lag;
  263. temp1 = tgt_eng * res_eng >> 1;
  264. temp2 = ccr * ccr << 1;
  265. if (temp2 > temp1) {
  266. if (ccr >= res_eng) {
  267. ppf->opt_gain = ppf_gain_weight[cur_rate];
  268. } else {
  269. ppf->opt_gain = (ccr << 15) / res_eng *
  270. ppf_gain_weight[cur_rate] >> 15;
  271. }
  272. /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
  273. temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
  274. temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
  275. pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16;
  276. if (tgt_eng >= pf_residual << 1) {
  277. temp1 = 0x7fff;
  278. } else {
  279. temp1 = (tgt_eng << 14) / pf_residual;
  280. }
  281. /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
  282. ppf->sc_gain = square_root(temp1 << 16);
  283. } else {
  284. ppf->opt_gain = 0;
  285. ppf->sc_gain = 0x7fff;
  286. }
  287. ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
  288. }
  289. /**
  290. * Calculate pitch postfilter parameters.
  291. *
  292. * @param p the context
  293. * @param offset offset of the excitation vector
  294. * @param pitch_lag decoded pitch lag
  295. * @param ppf pitch postfilter parameters
  296. * @param cur_rate current bitrate
  297. */
  298. static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag,
  299. PPFParam *ppf, enum Rate cur_rate)
  300. {
  301. int16_t scale;
  302. int i;
  303. int temp1, temp2;
  304. /*
  305. * 0 - target energy
  306. * 1 - forward cross-correlation
  307. * 2 - forward residual energy
  308. * 3 - backward cross-correlation
  309. * 4 - backward residual energy
  310. */
  311. int energy[5] = {0, 0, 0, 0, 0};
  312. int16_t *buf = p->audio + LPC_ORDER + offset;
  313. int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag,
  314. SUBFRAME_LEN, 1);
  315. int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag,
  316. SUBFRAME_LEN, -1);
  317. ppf->index = 0;
  318. ppf->opt_gain = 0;
  319. ppf->sc_gain = 0x7fff;
  320. /* Case 0, Section 3.6 */
  321. if (!back_lag && !fwd_lag)
  322. return;
  323. /* Compute target energy */
  324. energy[0] = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN);
  325. /* Compute forward residual energy */
  326. if (fwd_lag)
  327. energy[2] = ff_g723_1_dot_product(buf + fwd_lag, buf + fwd_lag,
  328. SUBFRAME_LEN);
  329. /* Compute backward residual energy */
  330. if (back_lag)
  331. energy[4] = ff_g723_1_dot_product(buf - back_lag, buf - back_lag,
  332. SUBFRAME_LEN);
  333. /* Normalize and shorten */
  334. temp1 = 0;
  335. for (i = 0; i < 5; i++)
  336. temp1 = FFMAX(energy[i], temp1);
  337. scale = ff_g723_1_normalize_bits(temp1, 31);
  338. for (i = 0; i < 5; i++)
  339. energy[i] = (energy[i] << scale) >> 16;
  340. if (fwd_lag && !back_lag) { /* Case 1 */
  341. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  342. energy[2]);
  343. } else if (!fwd_lag) { /* Case 2 */
  344. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  345. energy[4]);
  346. } else { /* Case 3 */
  347. /*
  348. * Select the largest of energy[1]^2/energy[2]
  349. * and energy[3]^2/energy[4]
  350. */
  351. temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
  352. temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
  353. if (temp1 >= temp2) {
  354. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  355. energy[2]);
  356. } else {
  357. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  358. energy[4]);
  359. }
  360. }
  361. }
  362. /**
  363. * Classify frames as voiced/unvoiced.
  364. *
  365. * @param p the context
  366. * @param pitch_lag decoded pitch_lag
  367. * @param exc_eng excitation energy estimation
  368. * @param scale scaling factor of exc_eng
  369. *
  370. * @return residual interpolation index if voiced, 0 otherwise
  371. */
  372. static int comp_interp_index(G723_1_Context *p, int pitch_lag,
  373. int *exc_eng, int *scale)
  374. {
  375. int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
  376. int16_t *buf = p->audio + LPC_ORDER;
  377. int index, ccr, tgt_eng, best_eng, temp;
  378. *scale = ff_g723_1_scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX);
  379. buf += offset;
  380. /* Compute maximum backward cross-correlation */
  381. ccr = 0;
  382. index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
  383. ccr = av_sat_add32(ccr, 1 << 15) >> 16;
  384. /* Compute target energy */
  385. tgt_eng = ff_g723_1_dot_product(buf, buf, SUBFRAME_LEN * 2);
  386. *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16;
  387. if (ccr <= 0)
  388. return 0;
  389. /* Compute best energy */
  390. best_eng = ff_g723_1_dot_product(buf - index, buf - index,
  391. SUBFRAME_LEN * 2);
  392. best_eng = av_sat_add32(best_eng, 1 << 15) >> 16;
  393. temp = best_eng * *exc_eng >> 3;
  394. if (temp < ccr * ccr) {
  395. return index;
  396. } else
  397. return 0;
  398. }
  399. /**
  400. * Perform residual interpolation based on frame classification.
  401. *
  402. * @param buf decoded excitation vector
  403. * @param out output vector
  404. * @param lag decoded pitch lag
  405. * @param gain interpolated gain
  406. * @param rseed seed for random number generator
  407. */
  408. static void residual_interp(int16_t *buf, int16_t *out, int lag,
  409. int gain, int *rseed)
  410. {
  411. int i;
  412. if (lag) { /* Voiced */
  413. int16_t *vector_ptr = buf + PITCH_MAX;
  414. /* Attenuate */
  415. for (i = 0; i < lag; i++)
  416. out[i] = vector_ptr[i - lag] * 3 >> 2;
  417. av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out),
  418. (FRAME_LEN - lag) * sizeof(*out));
  419. } else { /* Unvoiced */
  420. for (i = 0; i < FRAME_LEN; i++) {
  421. *rseed = *rseed * 521 + 259;
  422. out[i] = gain * *rseed >> 15;
  423. }
  424. memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
  425. }
  426. }
  427. /**
  428. * Perform IIR filtering.
  429. *
  430. * @param fir_coef FIR coefficients
  431. * @param iir_coef IIR coefficients
  432. * @param src source vector
  433. * @param dest destination vector
  434. * @param width width of the output, 16 bits(0) / 32 bits(1)
  435. */
  436. #define iir_filter(fir_coef, iir_coef, src, dest, width)\
  437. {\
  438. int m, n;\
  439. int res_shift = 16 & ~-(width);\
  440. int in_shift = 16 - res_shift;\
  441. \
  442. for (m = 0; m < SUBFRAME_LEN; m++) {\
  443. int64_t filter = 0;\
  444. for (n = 1; n <= LPC_ORDER; n++) {\
  445. filter -= (fir_coef)[n - 1] * (src)[m - n] -\
  446. (iir_coef)[n - 1] * ((dest)[m - n] >> in_shift);\
  447. }\
  448. \
  449. (dest)[m] = av_clipl_int32(((src)[m] << 16) + (filter << 3) +\
  450. (1 << 15)) >> res_shift;\
  451. }\
  452. }
  453. /**
  454. * Adjust gain of postfiltered signal.
  455. *
  456. * @param p the context
  457. * @param buf postfiltered output vector
  458. * @param energy input energy coefficient
  459. */
  460. static void gain_scale(G723_1_Context *p, int16_t * buf, int energy)
  461. {
  462. int num, denom, gain, bits1, bits2;
  463. int i;
  464. num = energy;
  465. denom = 0;
  466. for (i = 0; i < SUBFRAME_LEN; i++) {
  467. int temp = buf[i] >> 2;
  468. temp *= temp;
  469. denom = av_sat_dadd32(denom, temp);
  470. }
  471. if (num && denom) {
  472. bits1 = ff_g723_1_normalize_bits(num, 31);
  473. bits2 = ff_g723_1_normalize_bits(denom, 31);
  474. num = num << bits1 >> 1;
  475. denom <<= bits2;
  476. bits2 = 5 + bits1 - bits2;
  477. bits2 = FFMAX(0, bits2);
  478. gain = (num >> 1) / (denom >> 16);
  479. gain = square_root(gain << 16 >> bits2);
  480. } else {
  481. gain = 1 << 12;
  482. }
  483. for (i = 0; i < SUBFRAME_LEN; i++) {
  484. p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4;
  485. buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
  486. (1 << 10)) >> 11);
  487. }
  488. }
  489. /**
  490. * Perform formant filtering.
  491. *
  492. * @param p the context
  493. * @param lpc quantized lpc coefficients
  494. * @param buf input buffer
  495. * @param dst output buffer
  496. */
  497. static void formant_postfilter(G723_1_Context *p, int16_t *lpc,
  498. int16_t *buf, int16_t *dst)
  499. {
  500. int16_t filter_coef[2][LPC_ORDER];
  501. int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
  502. int i, j, k;
  503. memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
  504. memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
  505. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  506. for (k = 0; k < LPC_ORDER; k++) {
  507. filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
  508. (1 << 14)) >> 15;
  509. filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
  510. (1 << 14)) >> 15;
  511. }
  512. iir_filter(filter_coef[0], filter_coef[1], buf + i, filter_signal + i, 1);
  513. lpc += LPC_ORDER;
  514. }
  515. memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(int16_t));
  516. memcpy(p->iir_mem, filter_signal + FRAME_LEN, LPC_ORDER * sizeof(int));
  517. buf += LPC_ORDER;
  518. signal_ptr = filter_signal + LPC_ORDER;
  519. for (i = 0; i < SUBFRAMES; i++) {
  520. int temp;
  521. int auto_corr[2];
  522. int scale, energy;
  523. /* Normalize */
  524. scale = ff_g723_1_scale_vector(dst, buf, SUBFRAME_LEN);
  525. /* Compute auto correlation coefficients */
  526. auto_corr[0] = ff_g723_1_dot_product(dst, dst + 1, SUBFRAME_LEN - 1);
  527. auto_corr[1] = ff_g723_1_dot_product(dst, dst, SUBFRAME_LEN);
  528. /* Compute reflection coefficient */
  529. temp = auto_corr[1] >> 16;
  530. if (temp) {
  531. temp = (auto_corr[0] >> 2) / temp;
  532. }
  533. p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2;
  534. temp = -p->reflection_coef >> 1 & ~3;
  535. /* Compensation filter */
  536. for (j = 0; j < SUBFRAME_LEN; j++) {
  537. dst[j] = av_sat_dadd32(signal_ptr[j],
  538. (signal_ptr[j - 1] >> 16) * temp) >> 16;
  539. }
  540. /* Compute normalized signal energy */
  541. temp = 2 * scale + 4;
  542. if (temp < 0) {
  543. energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
  544. } else
  545. energy = auto_corr[1] >> temp;
  546. gain_scale(p, dst, energy);
  547. buf += SUBFRAME_LEN;
  548. signal_ptr += SUBFRAME_LEN;
  549. dst += SUBFRAME_LEN;
  550. }
  551. }
  552. static int sid_gain_to_lsp_index(int gain)
  553. {
  554. if (gain < 0x10)
  555. return gain << 6;
  556. else if (gain < 0x20)
  557. return gain - 8 << 7;
  558. else
  559. return gain - 20 << 8;
  560. }
  561. static inline int cng_rand(int *state, int base)
  562. {
  563. *state = (*state * 521 + 259) & 0xFFFF;
  564. return (*state & 0x7FFF) * base >> 15;
  565. }
  566. static int estimate_sid_gain(G723_1_Context *p)
  567. {
  568. int i, shift, seg, seg2, t, val, val_add, x, y;
  569. shift = 16 - p->cur_gain * 2;
  570. if (shift > 0)
  571. t = p->sid_gain << shift;
  572. else
  573. t = p->sid_gain >> -shift;
  574. x = t * cng_filt[0] >> 16;
  575. if (x >= cng_bseg[2])
  576. return 0x3F;
  577. if (x >= cng_bseg[1]) {
  578. shift = 4;
  579. seg = 3;
  580. } else {
  581. shift = 3;
  582. seg = (x >= cng_bseg[0]);
  583. }
  584. seg2 = FFMIN(seg, 3);
  585. val = 1 << shift;
  586. val_add = val >> 1;
  587. for (i = 0; i < shift; i++) {
  588. t = seg * 32 + (val << seg2);
  589. t *= t;
  590. if (x >= t)
  591. val += val_add;
  592. else
  593. val -= val_add;
  594. val_add >>= 1;
  595. }
  596. t = seg * 32 + (val << seg2);
  597. y = t * t - x;
  598. if (y <= 0) {
  599. t = seg * 32 + (val + 1 << seg2);
  600. t = t * t - x;
  601. val = (seg2 - 1 << 4) + val;
  602. if (t >= y)
  603. val++;
  604. } else {
  605. t = seg * 32 + (val - 1 << seg2);
  606. t = t * t - x;
  607. val = (seg2 - 1 << 4) + val;
  608. if (t >= y)
  609. val--;
  610. }
  611. return val;
  612. }
  613. static void generate_noise(G723_1_Context *p)
  614. {
  615. int i, j, idx, t;
  616. int off[SUBFRAMES];
  617. int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11];
  618. int tmp[SUBFRAME_LEN * 2];
  619. int16_t *vector_ptr;
  620. int64_t sum;
  621. int b0, c, delta, x, shift;
  622. p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123;
  623. p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123;
  624. for (i = 0; i < SUBFRAMES; i++) {
  625. p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1;
  626. p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i];
  627. }
  628. for (i = 0; i < SUBFRAMES / 2; i++) {
  629. t = cng_rand(&p->cng_random_seed, 1 << 13);
  630. off[i * 2] = t & 1;
  631. off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN;
  632. t >>= 2;
  633. for (j = 0; j < 11; j++) {
  634. signs[i * 11 + j] = (t & 1) * 2 - 1 << 14;
  635. t >>= 1;
  636. }
  637. }
  638. idx = 0;
  639. for (i = 0; i < SUBFRAMES; i++) {
  640. for (j = 0; j < SUBFRAME_LEN / 2; j++)
  641. tmp[j] = j;
  642. t = SUBFRAME_LEN / 2;
  643. for (j = 0; j < pulses[i]; j++, idx++) {
  644. int idx2 = cng_rand(&p->cng_random_seed, t);
  645. pos[idx] = tmp[idx2] * 2 + off[i];
  646. tmp[idx2] = tmp[--t];
  647. }
  648. }
  649. vector_ptr = p->audio + LPC_ORDER;
  650. memcpy(vector_ptr, p->prev_excitation,
  651. PITCH_MAX * sizeof(*p->excitation));
  652. for (i = 0; i < SUBFRAMES; i += 2) {
  653. ff_g723_1_gen_acb_excitation(vector_ptr, vector_ptr,
  654. p->pitch_lag[i >> 1], &p->subframe[i],
  655. p->cur_rate);
  656. ff_g723_1_gen_acb_excitation(vector_ptr + SUBFRAME_LEN,
  657. vector_ptr + SUBFRAME_LEN,
  658. p->pitch_lag[i >> 1], &p->subframe[i + 1],
  659. p->cur_rate);
  660. t = 0;
  661. for (j = 0; j < SUBFRAME_LEN * 2; j++)
  662. t |= FFABS(vector_ptr[j]);
  663. t = FFMIN(t, 0x7FFF);
  664. if (!t) {
  665. shift = 0;
  666. } else {
  667. shift = -10 + av_log2(t);
  668. if (shift < -2)
  669. shift = -2;
  670. }
  671. sum = 0;
  672. if (shift < 0) {
  673. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  674. t = vector_ptr[j] << -shift;
  675. sum += t * t;
  676. tmp[j] = t;
  677. }
  678. } else {
  679. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  680. t = vector_ptr[j] >> shift;
  681. sum += t * t;
  682. tmp[j] = t;
  683. }
  684. }
  685. b0 = 0;
  686. for (j = 0; j < 11; j++)
  687. b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j];
  688. b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11
  689. c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5);
  690. if (shift * 2 + 3 >= 0)
  691. c >>= shift * 2 + 3;
  692. else
  693. c <<= -(shift * 2 + 3);
  694. c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15;
  695. delta = b0 * b0 * 2 - c;
  696. if (delta <= 0) {
  697. x = -b0;
  698. } else {
  699. delta = square_root(delta);
  700. x = delta - b0;
  701. t = delta + b0;
  702. if (FFABS(t) < FFABS(x))
  703. x = -t;
  704. }
  705. shift++;
  706. if (shift < 0)
  707. x >>= -shift;
  708. else
  709. x <<= shift;
  710. x = av_clip(x, -10000, 10000);
  711. for (j = 0; j < 11; j++) {
  712. idx = (i / 2) * 11 + j;
  713. vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] +
  714. (x * signs[idx] >> 15));
  715. }
  716. /* copy decoded data to serve as a history for the next decoded subframes */
  717. memcpy(vector_ptr + PITCH_MAX, vector_ptr,
  718. sizeof(*vector_ptr) * SUBFRAME_LEN * 2);
  719. vector_ptr += SUBFRAME_LEN * 2;
  720. }
  721. /* Save the excitation for the next frame */
  722. memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN,
  723. PITCH_MAX * sizeof(*p->excitation));
  724. }
  725. static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
  726. int *got_frame_ptr, AVPacket *avpkt)
  727. {
  728. G723_1_Context *p = avctx->priv_data;
  729. AVFrame *frame = data;
  730. const uint8_t *buf = avpkt->data;
  731. int buf_size = avpkt->size;
  732. int dec_mode = buf[0] & 3;
  733. PPFParam ppf[SUBFRAMES];
  734. int16_t cur_lsp[LPC_ORDER];
  735. int16_t lpc[SUBFRAMES * LPC_ORDER];
  736. int16_t acb_vector[SUBFRAME_LEN];
  737. int16_t *out;
  738. int bad_frame = 0, i, j, ret;
  739. int16_t *audio = p->audio;
  740. if (buf_size < frame_size[dec_mode]) {
  741. if (buf_size)
  742. av_log(avctx, AV_LOG_WARNING,
  743. "Expected %d bytes, got %d - skipping packet\n",
  744. frame_size[dec_mode], buf_size);
  745. *got_frame_ptr = 0;
  746. return buf_size;
  747. }
  748. if (unpack_bitstream(p, buf, buf_size) < 0) {
  749. bad_frame = 1;
  750. if (p->past_frame_type == ACTIVE_FRAME)
  751. p->cur_frame_type = ACTIVE_FRAME;
  752. else
  753. p->cur_frame_type = UNTRANSMITTED_FRAME;
  754. }
  755. frame->nb_samples = FRAME_LEN;
  756. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  757. return ret;
  758. out = (int16_t *)frame->data[0];
  759. if (p->cur_frame_type == ACTIVE_FRAME) {
  760. if (!bad_frame)
  761. p->erased_frames = 0;
  762. else if (p->erased_frames != 3)
  763. p->erased_frames++;
  764. ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
  765. ff_g723_1_lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
  766. /* Save the lsp_vector for the next frame */
  767. memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  768. /* Generate the excitation for the frame */
  769. memcpy(p->excitation, p->prev_excitation,
  770. PITCH_MAX * sizeof(*p->excitation));
  771. if (!p->erased_frames) {
  772. int16_t *vector_ptr = p->excitation + PITCH_MAX;
  773. /* Update interpolation gain memory */
  774. p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
  775. p->subframe[3].amp_index) >> 1];
  776. for (i = 0; i < SUBFRAMES; i++) {
  777. gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate,
  778. p->pitch_lag[i >> 1], i);
  779. ff_g723_1_gen_acb_excitation(acb_vector,
  780. &p->excitation[SUBFRAME_LEN * i],
  781. p->pitch_lag[i >> 1],
  782. &p->subframe[i], p->cur_rate);
  783. /* Get the total excitation */
  784. for (j = 0; j < SUBFRAME_LEN; j++) {
  785. int v = av_clip_int16(vector_ptr[j] << 1);
  786. vector_ptr[j] = av_clip_int16(v + acb_vector[j]);
  787. }
  788. vector_ptr += SUBFRAME_LEN;
  789. }
  790. vector_ptr = p->excitation + PITCH_MAX;
  791. p->interp_index = comp_interp_index(p, p->pitch_lag[1],
  792. &p->sid_gain, &p->cur_gain);
  793. /* Perform pitch postfiltering */
  794. if (p->postfilter) {
  795. i = PITCH_MAX;
  796. for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  797. comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
  798. ppf + j, p->cur_rate);
  799. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  800. ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
  801. vector_ptr + i,
  802. vector_ptr + i + ppf[j].index,
  803. ppf[j].sc_gain,
  804. ppf[j].opt_gain,
  805. 1 << 14, 15, SUBFRAME_LEN);
  806. } else {
  807. audio = vector_ptr - LPC_ORDER;
  808. }
  809. /* Save the excitation for the next frame */
  810. memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
  811. PITCH_MAX * sizeof(*p->excitation));
  812. } else {
  813. p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
  814. if (p->erased_frames == 3) {
  815. /* Mute output */
  816. memset(p->excitation, 0,
  817. (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
  818. memset(p->prev_excitation, 0,
  819. PITCH_MAX * sizeof(*p->excitation));
  820. memset(frame->data[0], 0,
  821. (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
  822. } else {
  823. int16_t *buf = p->audio + LPC_ORDER;
  824. /* Regenerate frame */
  825. residual_interp(p->excitation, buf, p->interp_index,
  826. p->interp_gain, &p->random_seed);
  827. /* Save the excitation for the next frame */
  828. memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX),
  829. PITCH_MAX * sizeof(*p->excitation));
  830. }
  831. }
  832. p->cng_random_seed = CNG_RANDOM_SEED;
  833. } else {
  834. if (p->cur_frame_type == SID_FRAME) {
  835. p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index);
  836. ff_g723_1_inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0);
  837. } else if (p->past_frame_type == ACTIVE_FRAME) {
  838. p->sid_gain = estimate_sid_gain(p);
  839. }
  840. if (p->past_frame_type == ACTIVE_FRAME)
  841. p->cur_gain = p->sid_gain;
  842. else
  843. p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3;
  844. generate_noise(p);
  845. ff_g723_1_lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp);
  846. /* Save the lsp_vector for the next frame */
  847. memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  848. }
  849. p->past_frame_type = p->cur_frame_type;
  850. memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
  851. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  852. ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
  853. audio + i, SUBFRAME_LEN, LPC_ORDER,
  854. 0, 1, 1 << 12);
  855. memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
  856. if (p->postfilter) {
  857. formant_postfilter(p, lpc, p->audio, out);
  858. } else { // if output is not postfiltered it should be scaled by 2
  859. for (i = 0; i < FRAME_LEN; i++)
  860. out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
  861. }
  862. *got_frame_ptr = 1;
  863. return frame_size[dec_mode];
  864. }
  865. #define OFFSET(x) offsetof(G723_1_Context, x)
  866. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  867. static const AVOption options[] = {
  868. { "postfilter", "enable postfilter", OFFSET(postfilter), AV_OPT_TYPE_BOOL,
  869. { .i64 = 1 }, 0, 1, AD },
  870. { NULL }
  871. };
  872. static const AVClass g723_1dec_class = {
  873. .class_name = "G.723.1 decoder",
  874. .item_name = av_default_item_name,
  875. .option = options,
  876. .version = LIBAVUTIL_VERSION_INT,
  877. };
  878. AVCodec ff_g723_1_decoder = {
  879. .name = "g723_1",
  880. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  881. .type = AVMEDIA_TYPE_AUDIO,
  882. .id = AV_CODEC_ID_G723_1,
  883. .priv_data_size = sizeof(G723_1_Context),
  884. .init = g723_1_decode_init,
  885. .decode = g723_1_decode_frame,
  886. .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
  887. .priv_class = &g723_1dec_class,
  888. };