You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1383 lines
42KB

  1. /*
  2. * G.723.1 compatible decoder
  3. * Copyright (c) 2006 Benjamin Larsson
  4. * Copyright (c) 2010 Mohamed Naufal Basheer
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * G.723.1 compatible decoder
  25. */
  26. #define BITSTREAM_READER_LE
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/mem.h"
  29. #include "libavutil/opt.h"
  30. #include "avcodec.h"
  31. #include "get_bits.h"
  32. #include "acelp_vectors.h"
  33. #include "celp_filters.h"
  34. #include "g723_1_data.h"
  35. #include "internal.h"
  36. #define CNG_RANDOM_SEED 12345
  37. /**
  38. * G723.1 frame types
  39. */
  40. enum FrameType {
  41. ACTIVE_FRAME, ///< Active speech
  42. SID_FRAME, ///< Silence Insertion Descriptor frame
  43. UNTRANSMITTED_FRAME
  44. };
  45. enum Rate {
  46. RATE_6300,
  47. RATE_5300
  48. };
  49. /**
  50. * G723.1 unpacked data subframe
  51. */
  52. typedef struct {
  53. int ad_cb_lag; ///< adaptive codebook lag
  54. int ad_cb_gain;
  55. int dirac_train;
  56. int pulse_sign;
  57. int grid_index;
  58. int amp_index;
  59. int pulse_pos;
  60. } G723_1_Subframe;
  61. /**
  62. * Pitch postfilter parameters
  63. */
  64. typedef struct {
  65. int index; ///< postfilter backward/forward lag
  66. int16_t opt_gain; ///< optimal gain
  67. int16_t sc_gain; ///< scaling gain
  68. } PPFParam;
  69. typedef struct g723_1_context {
  70. AVClass *class;
  71. AVFrame frame;
  72. G723_1_Subframe subframe[4];
  73. enum FrameType cur_frame_type;
  74. enum FrameType past_frame_type;
  75. enum Rate cur_rate;
  76. uint8_t lsp_index[LSP_BANDS];
  77. int pitch_lag[2];
  78. int erased_frames;
  79. int16_t prev_lsp[LPC_ORDER];
  80. int16_t sid_lsp[LPC_ORDER];
  81. int16_t prev_excitation[PITCH_MAX];
  82. int16_t excitation[PITCH_MAX + FRAME_LEN + 4];
  83. int16_t synth_mem[LPC_ORDER];
  84. int16_t fir_mem[LPC_ORDER];
  85. int iir_mem[LPC_ORDER];
  86. int random_seed;
  87. int cng_random_seed;
  88. int interp_index;
  89. int interp_gain;
  90. int sid_gain;
  91. int cur_gain;
  92. int reflection_coef;
  93. int pf_gain;
  94. int postfilter;
  95. int16_t audio[FRAME_LEN + LPC_ORDER + PITCH_MAX + 4];
  96. } G723_1_Context;
  97. static av_cold int g723_1_decode_init(AVCodecContext *avctx)
  98. {
  99. G723_1_Context *p = avctx->priv_data;
  100. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  101. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  102. avctx->channels = 1;
  103. avctx->sample_rate = 8000;
  104. p->pf_gain = 1 << 12;
  105. avcodec_get_frame_defaults(&p->frame);
  106. avctx->coded_frame = &p->frame;
  107. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  108. memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp));
  109. p->cng_random_seed = CNG_RANDOM_SEED;
  110. p->past_frame_type = SID_FRAME;
  111. return 0;
  112. }
  113. /**
  114. * Unpack the frame into parameters.
  115. *
  116. * @param p the context
  117. * @param buf pointer to the input buffer
  118. * @param buf_size size of the input buffer
  119. */
  120. static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf,
  121. int buf_size)
  122. {
  123. GetBitContext gb;
  124. int ad_cb_len;
  125. int temp, info_bits, i;
  126. init_get_bits(&gb, buf, buf_size * 8);
  127. /* Extract frame type and rate info */
  128. info_bits = get_bits(&gb, 2);
  129. if (info_bits == 3) {
  130. p->cur_frame_type = UNTRANSMITTED_FRAME;
  131. return 0;
  132. }
  133. /* Extract 24 bit lsp indices, 8 bit for each band */
  134. p->lsp_index[2] = get_bits(&gb, 8);
  135. p->lsp_index[1] = get_bits(&gb, 8);
  136. p->lsp_index[0] = get_bits(&gb, 8);
  137. if (info_bits == 2) {
  138. p->cur_frame_type = SID_FRAME;
  139. p->subframe[0].amp_index = get_bits(&gb, 6);
  140. return 0;
  141. }
  142. /* Extract the info common to both rates */
  143. p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
  144. p->cur_frame_type = ACTIVE_FRAME;
  145. p->pitch_lag[0] = get_bits(&gb, 7);
  146. if (p->pitch_lag[0] > 123) /* test if forbidden code */
  147. return -1;
  148. p->pitch_lag[0] += PITCH_MIN;
  149. p->subframe[1].ad_cb_lag = get_bits(&gb, 2);
  150. p->pitch_lag[1] = get_bits(&gb, 7);
  151. if (p->pitch_lag[1] > 123)
  152. return -1;
  153. p->pitch_lag[1] += PITCH_MIN;
  154. p->subframe[3].ad_cb_lag = get_bits(&gb, 2);
  155. p->subframe[0].ad_cb_lag = 1;
  156. p->subframe[2].ad_cb_lag = 1;
  157. for (i = 0; i < SUBFRAMES; i++) {
  158. /* Extract combined gain */
  159. temp = get_bits(&gb, 12);
  160. ad_cb_len = 170;
  161. p->subframe[i].dirac_train = 0;
  162. if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
  163. p->subframe[i].dirac_train = temp >> 11;
  164. temp &= 0x7FF;
  165. ad_cb_len = 85;
  166. }
  167. p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
  168. if (p->subframe[i].ad_cb_gain < ad_cb_len) {
  169. p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
  170. GAIN_LEVELS;
  171. } else {
  172. return -1;
  173. }
  174. }
  175. p->subframe[0].grid_index = get_bits(&gb, 1);
  176. p->subframe[1].grid_index = get_bits(&gb, 1);
  177. p->subframe[2].grid_index = get_bits(&gb, 1);
  178. p->subframe[3].grid_index = get_bits(&gb, 1);
  179. if (p->cur_rate == RATE_6300) {
  180. skip_bits(&gb, 1); /* skip reserved bit */
  181. /* Compute pulse_pos index using the 13-bit combined position index */
  182. temp = get_bits(&gb, 13);
  183. p->subframe[0].pulse_pos = temp / 810;
  184. temp -= p->subframe[0].pulse_pos * 810;
  185. p->subframe[1].pulse_pos = FASTDIV(temp, 90);
  186. temp -= p->subframe[1].pulse_pos * 90;
  187. p->subframe[2].pulse_pos = FASTDIV(temp, 9);
  188. p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
  189. p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
  190. get_bits(&gb, 16);
  191. p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
  192. get_bits(&gb, 14);
  193. p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
  194. get_bits(&gb, 16);
  195. p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
  196. get_bits(&gb, 14);
  197. p->subframe[0].pulse_sign = get_bits(&gb, 6);
  198. p->subframe[1].pulse_sign = get_bits(&gb, 5);
  199. p->subframe[2].pulse_sign = get_bits(&gb, 6);
  200. p->subframe[3].pulse_sign = get_bits(&gb, 5);
  201. } else { /* 5300 bps */
  202. p->subframe[0].pulse_pos = get_bits(&gb, 12);
  203. p->subframe[1].pulse_pos = get_bits(&gb, 12);
  204. p->subframe[2].pulse_pos = get_bits(&gb, 12);
  205. p->subframe[3].pulse_pos = get_bits(&gb, 12);
  206. p->subframe[0].pulse_sign = get_bits(&gb, 4);
  207. p->subframe[1].pulse_sign = get_bits(&gb, 4);
  208. p->subframe[2].pulse_sign = get_bits(&gb, 4);
  209. p->subframe[3].pulse_sign = get_bits(&gb, 4);
  210. }
  211. return 0;
  212. }
  213. /**
  214. * Bitexact implementation of sqrt(val/2).
  215. */
  216. static int16_t square_root(int val)
  217. {
  218. int16_t res = 0;
  219. int16_t exp = 0x4000;
  220. int i;
  221. for (i = 0; i < 14; i ++) {
  222. int res_exp = res + exp;
  223. if (val >= res_exp * res_exp << 1)
  224. res += exp;
  225. exp >>= 1;
  226. }
  227. return res;
  228. }
  229. /**
  230. * Calculate the number of left-shifts required for normalizing the input.
  231. *
  232. * @param num input number
  233. * @param width width of the input, 16 bits(0) / 32 bits(1)
  234. */
  235. static int normalize_bits(int num, int width)
  236. {
  237. return width - av_log2(num) - 1;
  238. }
  239. /**
  240. * Scale vector contents based on the largest of their absolutes.
  241. */
  242. static int scale_vector(int16_t *dst, const int16_t *vector, int length)
  243. {
  244. int bits, max = 0;
  245. int i;
  246. for (i = 0; i < length; i++)
  247. max |= FFABS(vector[i]);
  248. max = FFMIN(max, 0x7FFF);
  249. bits = normalize_bits(max, 15);
  250. for (i = 0; i < length; i++)
  251. dst[i] = vector[i] << bits >> 3;
  252. return bits - 3;
  253. }
  254. /**
  255. * Perform inverse quantization of LSP frequencies.
  256. *
  257. * @param cur_lsp the current LSP vector
  258. * @param prev_lsp the previous LSP vector
  259. * @param lsp_index VQ indices
  260. * @param bad_frame bad frame flag
  261. */
  262. static void inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp,
  263. uint8_t *lsp_index, int bad_frame)
  264. {
  265. int min_dist, pred;
  266. int i, j, temp, stable;
  267. /* Check for frame erasure */
  268. if (!bad_frame) {
  269. min_dist = 0x100;
  270. pred = 12288;
  271. } else {
  272. min_dist = 0x200;
  273. pred = 23552;
  274. lsp_index[0] = lsp_index[1] = lsp_index[2] = 0;
  275. }
  276. /* Get the VQ table entry corresponding to the transmitted index */
  277. cur_lsp[0] = lsp_band0[lsp_index[0]][0];
  278. cur_lsp[1] = lsp_band0[lsp_index[0]][1];
  279. cur_lsp[2] = lsp_band0[lsp_index[0]][2];
  280. cur_lsp[3] = lsp_band1[lsp_index[1]][0];
  281. cur_lsp[4] = lsp_band1[lsp_index[1]][1];
  282. cur_lsp[5] = lsp_band1[lsp_index[1]][2];
  283. cur_lsp[6] = lsp_band2[lsp_index[2]][0];
  284. cur_lsp[7] = lsp_band2[lsp_index[2]][1];
  285. cur_lsp[8] = lsp_band2[lsp_index[2]][2];
  286. cur_lsp[9] = lsp_band2[lsp_index[2]][3];
  287. /* Add predicted vector & DC component to the previously quantized vector */
  288. for (i = 0; i < LPC_ORDER; i++) {
  289. temp = ((prev_lsp[i] - dc_lsp[i]) * pred + (1 << 14)) >> 15;
  290. cur_lsp[i] += dc_lsp[i] + temp;
  291. }
  292. for (i = 0; i < LPC_ORDER; i++) {
  293. cur_lsp[0] = FFMAX(cur_lsp[0], 0x180);
  294. cur_lsp[LPC_ORDER - 1] = FFMIN(cur_lsp[LPC_ORDER - 1], 0x7e00);
  295. /* Stability check */
  296. for (j = 1; j < LPC_ORDER; j++) {
  297. temp = min_dist + cur_lsp[j - 1] - cur_lsp[j];
  298. if (temp > 0) {
  299. temp >>= 1;
  300. cur_lsp[j - 1] -= temp;
  301. cur_lsp[j] += temp;
  302. }
  303. }
  304. stable = 1;
  305. for (j = 1; j < LPC_ORDER; j++) {
  306. temp = cur_lsp[j - 1] + min_dist - cur_lsp[j] - 4;
  307. if (temp > 0) {
  308. stable = 0;
  309. break;
  310. }
  311. }
  312. if (stable)
  313. break;
  314. }
  315. if (!stable)
  316. memcpy(cur_lsp, prev_lsp, LPC_ORDER * sizeof(*cur_lsp));
  317. }
  318. /**
  319. * Bitexact implementation of 2ab scaled by 1/2^16.
  320. *
  321. * @param a 32 bit multiplicand
  322. * @param b 16 bit multiplier
  323. */
  324. #define MULL2(a, b) \
  325. ((((a) >> 16) * (b) << 1) + (((a) & 0xffff) * (b) >> 15))
  326. /**
  327. * Convert LSP frequencies to LPC coefficients.
  328. *
  329. * @param lpc buffer for LPC coefficients
  330. */
  331. static void lsp2lpc(int16_t *lpc)
  332. {
  333. int f1[LPC_ORDER / 2 + 1];
  334. int f2[LPC_ORDER / 2 + 1];
  335. int i, j;
  336. /* Calculate negative cosine */
  337. for (j = 0; j < LPC_ORDER; j++) {
  338. int index = lpc[j] >> 7;
  339. int offset = lpc[j] & 0x7f;
  340. int temp1 = cos_tab[index] << 16;
  341. int temp2 = (cos_tab[index + 1] - cos_tab[index]) *
  342. ((offset << 8) + 0x80) << 1;
  343. lpc[j] = -(av_sat_dadd32(1 << 15, temp1 + temp2) >> 16);
  344. }
  345. /*
  346. * Compute sum and difference polynomial coefficients
  347. * (bitexact alternative to lsp2poly() in lsp.c)
  348. */
  349. /* Initialize with values in Q28 */
  350. f1[0] = 1 << 28;
  351. f1[1] = (lpc[0] << 14) + (lpc[2] << 14);
  352. f1[2] = lpc[0] * lpc[2] + (2 << 28);
  353. f2[0] = 1 << 28;
  354. f2[1] = (lpc[1] << 14) + (lpc[3] << 14);
  355. f2[2] = lpc[1] * lpc[3] + (2 << 28);
  356. /*
  357. * Calculate and scale the coefficients by 1/2 in
  358. * each iteration for a final scaling factor of Q25
  359. */
  360. for (i = 2; i < LPC_ORDER / 2; i++) {
  361. f1[i + 1] = f1[i - 1] + MULL2(f1[i], lpc[2 * i]);
  362. f2[i + 1] = f2[i - 1] + MULL2(f2[i], lpc[2 * i + 1]);
  363. for (j = i; j >= 2; j--) {
  364. f1[j] = MULL2(f1[j - 1], lpc[2 * i]) +
  365. (f1[j] >> 1) + (f1[j - 2] >> 1);
  366. f2[j] = MULL2(f2[j - 1], lpc[2 * i + 1]) +
  367. (f2[j] >> 1) + (f2[j - 2] >> 1);
  368. }
  369. f1[0] >>= 1;
  370. f2[0] >>= 1;
  371. f1[1] = ((lpc[2 * i] << 16 >> i) + f1[1]) >> 1;
  372. f2[1] = ((lpc[2 * i + 1] << 16 >> i) + f2[1]) >> 1;
  373. }
  374. /* Convert polynomial coefficients to LPC coefficients */
  375. for (i = 0; i < LPC_ORDER / 2; i++) {
  376. int64_t ff1 = f1[i + 1] + f1[i];
  377. int64_t ff2 = f2[i + 1] - f2[i];
  378. lpc[i] = av_clipl_int32(((ff1 + ff2) << 3) + (1 << 15)) >> 16;
  379. lpc[LPC_ORDER - i - 1] = av_clipl_int32(((ff1 - ff2) << 3) +
  380. (1 << 15)) >> 16;
  381. }
  382. }
  383. /**
  384. * Quantize LSP frequencies by interpolation and convert them to
  385. * the corresponding LPC coefficients.
  386. *
  387. * @param lpc buffer for LPC coefficients
  388. * @param cur_lsp the current LSP vector
  389. * @param prev_lsp the previous LSP vector
  390. */
  391. static void lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
  392. {
  393. int i;
  394. int16_t *lpc_ptr = lpc;
  395. /* cur_lsp * 0.25 + prev_lsp * 0.75 */
  396. ff_acelp_weighted_vector_sum(lpc, cur_lsp, prev_lsp,
  397. 4096, 12288, 1 << 13, 14, LPC_ORDER);
  398. ff_acelp_weighted_vector_sum(lpc + LPC_ORDER, cur_lsp, prev_lsp,
  399. 8192, 8192, 1 << 13, 14, LPC_ORDER);
  400. ff_acelp_weighted_vector_sum(lpc + 2 * LPC_ORDER, cur_lsp, prev_lsp,
  401. 12288, 4096, 1 << 13, 14, LPC_ORDER);
  402. memcpy(lpc + 3 * LPC_ORDER, cur_lsp, LPC_ORDER * sizeof(*lpc));
  403. for (i = 0; i < SUBFRAMES; i++) {
  404. lsp2lpc(lpc_ptr);
  405. lpc_ptr += LPC_ORDER;
  406. }
  407. }
  408. /**
  409. * Generate a train of dirac functions with period as pitch lag.
  410. */
  411. static void gen_dirac_train(int16_t *buf, int pitch_lag)
  412. {
  413. int16_t vector[SUBFRAME_LEN];
  414. int i, j;
  415. memcpy(vector, buf, SUBFRAME_LEN * sizeof(*vector));
  416. for (i = pitch_lag; i < SUBFRAME_LEN; i += pitch_lag) {
  417. for (j = 0; j < SUBFRAME_LEN - i; j++)
  418. buf[i + j] += vector[j];
  419. }
  420. }
  421. /**
  422. * Generate fixed codebook excitation vector.
  423. *
  424. * @param vector decoded excitation vector
  425. * @param subfrm current subframe
  426. * @param cur_rate current bitrate
  427. * @param pitch_lag closed loop pitch lag
  428. * @param index current subframe index
  429. */
  430. static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm,
  431. enum Rate cur_rate, int pitch_lag, int index)
  432. {
  433. int temp, i, j;
  434. memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
  435. if (cur_rate == RATE_6300) {
  436. if (subfrm->pulse_pos >= max_pos[index])
  437. return;
  438. /* Decode amplitudes and positions */
  439. j = PULSE_MAX - pulses[index];
  440. temp = subfrm->pulse_pos;
  441. for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
  442. temp -= combinatorial_table[j][i];
  443. if (temp >= 0)
  444. continue;
  445. temp += combinatorial_table[j++][i];
  446. if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) {
  447. vector[subfrm->grid_index + GRID_SIZE * i] =
  448. -fixed_cb_gain[subfrm->amp_index];
  449. } else {
  450. vector[subfrm->grid_index + GRID_SIZE * i] =
  451. fixed_cb_gain[subfrm->amp_index];
  452. }
  453. if (j == PULSE_MAX)
  454. break;
  455. }
  456. if (subfrm->dirac_train == 1)
  457. gen_dirac_train(vector, pitch_lag);
  458. } else { /* 5300 bps */
  459. int cb_gain = fixed_cb_gain[subfrm->amp_index];
  460. int cb_shift = subfrm->grid_index;
  461. int cb_sign = subfrm->pulse_sign;
  462. int cb_pos = subfrm->pulse_pos;
  463. int offset, beta, lag;
  464. for (i = 0; i < 8; i += 2) {
  465. offset = ((cb_pos & 7) << 3) + cb_shift + i;
  466. vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
  467. cb_pos >>= 3;
  468. cb_sign >>= 1;
  469. }
  470. /* Enhance harmonic components */
  471. lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag +
  472. subfrm->ad_cb_lag - 1;
  473. beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1];
  474. if (lag < SUBFRAME_LEN - 2) {
  475. for (i = lag; i < SUBFRAME_LEN; i++)
  476. vector[i] += beta * vector[i - lag] >> 15;
  477. }
  478. }
  479. }
  480. /**
  481. * Get delayed contribution from the previous excitation vector.
  482. */
  483. static void get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
  484. {
  485. int offset = PITCH_MAX - PITCH_ORDER / 2 - lag;
  486. int i;
  487. residual[0] = prev_excitation[offset];
  488. residual[1] = prev_excitation[offset + 1];
  489. offset += 2;
  490. for (i = 2; i < SUBFRAME_LEN + PITCH_ORDER - 1; i++)
  491. residual[i] = prev_excitation[offset + (i - 2) % lag];
  492. }
  493. static int dot_product(const int16_t *a, const int16_t *b, int length)
  494. {
  495. int i, sum = 0;
  496. for (i = 0; i < length; i++) {
  497. int prod = a[i] * b[i];
  498. sum = av_sat_dadd32(sum, prod);
  499. }
  500. return sum;
  501. }
  502. /**
  503. * Generate adaptive codebook excitation.
  504. */
  505. static void gen_acb_excitation(int16_t *vector, int16_t *prev_excitation,
  506. int pitch_lag, G723_1_Subframe *subfrm,
  507. enum Rate cur_rate)
  508. {
  509. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  510. const int16_t *cb_ptr;
  511. int lag = pitch_lag + subfrm->ad_cb_lag - 1;
  512. int i;
  513. int sum;
  514. get_residual(residual, prev_excitation, lag);
  515. /* Select quantization table */
  516. if (cur_rate == RATE_6300 && pitch_lag < SUBFRAME_LEN - 2)
  517. cb_ptr = adaptive_cb_gain85;
  518. else
  519. cb_ptr = adaptive_cb_gain170;
  520. /* Calculate adaptive vector */
  521. cb_ptr += subfrm->ad_cb_gain * 20;
  522. for (i = 0; i < SUBFRAME_LEN; i++) {
  523. sum = dot_product(residual + i, cb_ptr, PITCH_ORDER);
  524. vector[i] = av_sat_dadd32(1 << 15, sum) >> 16;
  525. }
  526. }
  527. /**
  528. * Estimate maximum auto-correlation around pitch lag.
  529. *
  530. * @param buf buffer with offset applied
  531. * @param offset offset of the excitation vector
  532. * @param ccr_max pointer to the maximum auto-correlation
  533. * @param pitch_lag decoded pitch lag
  534. * @param length length of autocorrelation
  535. * @param dir forward lag(1) / backward lag(-1)
  536. */
  537. static int autocorr_max(const int16_t *buf, int offset, int *ccr_max,
  538. int pitch_lag, int length, int dir)
  539. {
  540. int limit, ccr, lag = 0;
  541. int i;
  542. pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
  543. if (dir > 0)
  544. limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
  545. else
  546. limit = pitch_lag + 3;
  547. for (i = pitch_lag - 3; i <= limit; i++) {
  548. ccr = dot_product(buf, buf + dir * i, length);
  549. if (ccr > *ccr_max) {
  550. *ccr_max = ccr;
  551. lag = i;
  552. }
  553. }
  554. return lag;
  555. }
  556. /**
  557. * Calculate pitch postfilter optimal and scaling gains.
  558. *
  559. * @param lag pitch postfilter forward/backward lag
  560. * @param ppf pitch postfilter parameters
  561. * @param cur_rate current bitrate
  562. * @param tgt_eng target energy
  563. * @param ccr cross-correlation
  564. * @param res_eng residual energy
  565. */
  566. static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
  567. int tgt_eng, int ccr, int res_eng)
  568. {
  569. int pf_residual; /* square of postfiltered residual */
  570. int temp1, temp2;
  571. ppf->index = lag;
  572. temp1 = tgt_eng * res_eng >> 1;
  573. temp2 = ccr * ccr << 1;
  574. if (temp2 > temp1) {
  575. if (ccr >= res_eng) {
  576. ppf->opt_gain = ppf_gain_weight[cur_rate];
  577. } else {
  578. ppf->opt_gain = (ccr << 15) / res_eng *
  579. ppf_gain_weight[cur_rate] >> 15;
  580. }
  581. /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
  582. temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
  583. temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
  584. pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16;
  585. if (tgt_eng >= pf_residual << 1) {
  586. temp1 = 0x7fff;
  587. } else {
  588. temp1 = (tgt_eng << 14) / pf_residual;
  589. }
  590. /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
  591. ppf->sc_gain = square_root(temp1 << 16);
  592. } else {
  593. ppf->opt_gain = 0;
  594. ppf->sc_gain = 0x7fff;
  595. }
  596. ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
  597. }
  598. /**
  599. * Calculate pitch postfilter parameters.
  600. *
  601. * @param p the context
  602. * @param offset offset of the excitation vector
  603. * @param pitch_lag decoded pitch lag
  604. * @param ppf pitch postfilter parameters
  605. * @param cur_rate current bitrate
  606. */
  607. static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag,
  608. PPFParam *ppf, enum Rate cur_rate)
  609. {
  610. int16_t scale;
  611. int i;
  612. int temp1, temp2;
  613. /*
  614. * 0 - target energy
  615. * 1 - forward cross-correlation
  616. * 2 - forward residual energy
  617. * 3 - backward cross-correlation
  618. * 4 - backward residual energy
  619. */
  620. int energy[5] = {0, 0, 0, 0, 0};
  621. int16_t *buf = p->audio + LPC_ORDER + offset;
  622. int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag,
  623. SUBFRAME_LEN, 1);
  624. int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag,
  625. SUBFRAME_LEN, -1);
  626. ppf->index = 0;
  627. ppf->opt_gain = 0;
  628. ppf->sc_gain = 0x7fff;
  629. /* Case 0, Section 3.6 */
  630. if (!back_lag && !fwd_lag)
  631. return;
  632. /* Compute target energy */
  633. energy[0] = dot_product(buf, buf, SUBFRAME_LEN);
  634. /* Compute forward residual energy */
  635. if (fwd_lag)
  636. energy[2] = dot_product(buf + fwd_lag, buf + fwd_lag, SUBFRAME_LEN);
  637. /* Compute backward residual energy */
  638. if (back_lag)
  639. energy[4] = dot_product(buf - back_lag, buf - back_lag, SUBFRAME_LEN);
  640. /* Normalize and shorten */
  641. temp1 = 0;
  642. for (i = 0; i < 5; i++)
  643. temp1 = FFMAX(energy[i], temp1);
  644. scale = normalize_bits(temp1, 31);
  645. for (i = 0; i < 5; i++)
  646. energy[i] = (energy[i] << scale) >> 16;
  647. if (fwd_lag && !back_lag) { /* Case 1 */
  648. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  649. energy[2]);
  650. } else if (!fwd_lag) { /* Case 2 */
  651. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  652. energy[4]);
  653. } else { /* Case 3 */
  654. /*
  655. * Select the largest of energy[1]^2/energy[2]
  656. * and energy[3]^2/energy[4]
  657. */
  658. temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
  659. temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
  660. if (temp1 >= temp2) {
  661. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  662. energy[2]);
  663. } else {
  664. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  665. energy[4]);
  666. }
  667. }
  668. }
  669. /**
  670. * Classify frames as voiced/unvoiced.
  671. *
  672. * @param p the context
  673. * @param pitch_lag decoded pitch_lag
  674. * @param exc_eng excitation energy estimation
  675. * @param scale scaling factor of exc_eng
  676. *
  677. * @return residual interpolation index if voiced, 0 otherwise
  678. */
  679. static int comp_interp_index(G723_1_Context *p, int pitch_lag,
  680. int *exc_eng, int *scale)
  681. {
  682. int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
  683. int16_t *buf = p->audio + LPC_ORDER;
  684. int index, ccr, tgt_eng, best_eng, temp;
  685. *scale = scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX);
  686. buf += offset;
  687. /* Compute maximum backward cross-correlation */
  688. ccr = 0;
  689. index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
  690. ccr = av_sat_add32(ccr, 1 << 15) >> 16;
  691. /* Compute target energy */
  692. tgt_eng = dot_product(buf, buf, SUBFRAME_LEN * 2);
  693. *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16;
  694. if (ccr <= 0)
  695. return 0;
  696. /* Compute best energy */
  697. best_eng = dot_product(buf - index, buf - index, SUBFRAME_LEN * 2);
  698. best_eng = av_sat_add32(best_eng, 1 << 15) >> 16;
  699. temp = best_eng * *exc_eng >> 3;
  700. if (temp < ccr * ccr)
  701. return index;
  702. else
  703. return 0;
  704. }
  705. /**
  706. * Peform residual interpolation based on frame classification.
  707. *
  708. * @param buf decoded excitation vector
  709. * @param out output vector
  710. * @param lag decoded pitch lag
  711. * @param gain interpolated gain
  712. * @param rseed seed for random number generator
  713. */
  714. static void residual_interp(int16_t *buf, int16_t *out, int lag,
  715. int gain, int *rseed)
  716. {
  717. int i;
  718. if (lag) { /* Voiced */
  719. int16_t *vector_ptr = buf + PITCH_MAX;
  720. /* Attenuate */
  721. for (i = 0; i < lag; i++)
  722. out[i] = vector_ptr[i - lag] * 3 >> 2;
  723. av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out),
  724. (FRAME_LEN - lag) * sizeof(*out));
  725. } else { /* Unvoiced */
  726. for (i = 0; i < FRAME_LEN; i++) {
  727. *rseed = *rseed * 521 + 259;
  728. out[i] = gain * *rseed >> 15;
  729. }
  730. memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
  731. }
  732. }
  733. /**
  734. * Perform IIR filtering.
  735. *
  736. * @param fir_coef FIR coefficients
  737. * @param iir_coef IIR coefficients
  738. * @param src source vector
  739. * @param dest destination vector
  740. */
  741. static inline void iir_filter(int16_t *fir_coef, int16_t *iir_coef,
  742. int16_t *src, int *dest)
  743. {
  744. int m, n;
  745. for (m = 0; m < SUBFRAME_LEN; m++) {
  746. int64_t filter = 0;
  747. for (n = 1; n <= LPC_ORDER; n++) {
  748. filter -= fir_coef[n - 1] * src[m - n] -
  749. iir_coef[n - 1] * (dest[m - n] >> 16);
  750. }
  751. dest[m] = av_clipl_int32((src[m] << 16) + (filter << 3) + (1 << 15));
  752. }
  753. }
  754. /**
  755. * Adjust gain of postfiltered signal.
  756. *
  757. * @param p the context
  758. * @param buf postfiltered output vector
  759. * @param energy input energy coefficient
  760. */
  761. static void gain_scale(G723_1_Context *p, int16_t * buf, int energy)
  762. {
  763. int num, denom, gain, bits1, bits2;
  764. int i;
  765. num = energy;
  766. denom = 0;
  767. for (i = 0; i < SUBFRAME_LEN; i++) {
  768. int temp = buf[i] >> 2;
  769. temp *= temp;
  770. denom = av_sat_dadd32(denom, temp);
  771. }
  772. if (num && denom) {
  773. bits1 = normalize_bits(num, 31);
  774. bits2 = normalize_bits(denom, 31);
  775. num = num << bits1 >> 1;
  776. denom <<= bits2;
  777. bits2 = 5 + bits1 - bits2;
  778. bits2 = FFMAX(0, bits2);
  779. gain = (num >> 1) / (denom >> 16);
  780. gain = square_root(gain << 16 >> bits2);
  781. } else {
  782. gain = 1 << 12;
  783. }
  784. for (i = 0; i < SUBFRAME_LEN; i++) {
  785. p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4;
  786. buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
  787. (1 << 10)) >> 11);
  788. }
  789. }
  790. /**
  791. * Perform formant filtering.
  792. *
  793. * @param p the context
  794. * @param lpc quantized lpc coefficients
  795. * @param buf input buffer
  796. * @param dst output buffer
  797. */
  798. static void formant_postfilter(G723_1_Context *p, int16_t *lpc,
  799. int16_t *buf, int16_t *dst)
  800. {
  801. int16_t filter_coef[2][LPC_ORDER];
  802. int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
  803. int i, j, k;
  804. memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
  805. memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
  806. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  807. for (k = 0; k < LPC_ORDER; k++) {
  808. filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
  809. (1 << 14)) >> 15;
  810. filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
  811. (1 << 14)) >> 15;
  812. }
  813. iir_filter(filter_coef[0], filter_coef[1], buf + i,
  814. filter_signal + i);
  815. lpc += LPC_ORDER;
  816. }
  817. memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(*p->fir_mem));
  818. memcpy(p->iir_mem, filter_signal + FRAME_LEN,
  819. LPC_ORDER * sizeof(*p->iir_mem));
  820. buf += LPC_ORDER;
  821. signal_ptr = filter_signal + LPC_ORDER;
  822. for (i = 0; i < SUBFRAMES; i++) {
  823. int temp;
  824. int auto_corr[2];
  825. int scale, energy;
  826. /* Normalize */
  827. scale = scale_vector(dst, buf, SUBFRAME_LEN);
  828. /* Compute auto correlation coefficients */
  829. auto_corr[0] = dot_product(dst, dst + 1, SUBFRAME_LEN - 1);
  830. auto_corr[1] = dot_product(dst, dst, SUBFRAME_LEN);
  831. /* Compute reflection coefficient */
  832. temp = auto_corr[1] >> 16;
  833. if (temp) {
  834. temp = (auto_corr[0] >> 2) / temp;
  835. }
  836. p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2;
  837. temp = -p->reflection_coef >> 1 & ~3;
  838. /* Compensation filter */
  839. for (j = 0; j < SUBFRAME_LEN; j++) {
  840. dst[j] = av_sat_dadd32(signal_ptr[j],
  841. (signal_ptr[j - 1] >> 16) * temp) >> 16;
  842. }
  843. /* Compute normalized signal energy */
  844. temp = 2 * scale + 4;
  845. if (temp < 0) {
  846. energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
  847. } else
  848. energy = auto_corr[1] >> temp;
  849. gain_scale(p, dst, energy);
  850. buf += SUBFRAME_LEN;
  851. signal_ptr += SUBFRAME_LEN;
  852. dst += SUBFRAME_LEN;
  853. }
  854. }
  855. static int sid_gain_to_lsp_index(int gain)
  856. {
  857. if (gain < 0x10)
  858. return gain << 6;
  859. else if (gain < 0x20)
  860. return gain - 8 << 7;
  861. else
  862. return gain - 20 << 8;
  863. }
  864. static inline int cng_rand(int *state, int base)
  865. {
  866. *state = (*state * 521 + 259) & 0xFFFF;
  867. return (*state & 0x7FFF) * base >> 15;
  868. }
  869. static int estimate_sid_gain(G723_1_Context *p)
  870. {
  871. int i, shift, seg, seg2, t, val, val_add, x, y;
  872. shift = 16 - p->cur_gain * 2;
  873. if (shift > 0)
  874. t = p->sid_gain << shift;
  875. else
  876. t = p->sid_gain >> -shift;
  877. x = t * cng_filt[0] >> 16;
  878. if (x >= cng_bseg[2])
  879. return 0x3F;
  880. if (x >= cng_bseg[1]) {
  881. shift = 4;
  882. seg = 3;
  883. } else {
  884. shift = 3;
  885. seg = (x >= cng_bseg[0]);
  886. }
  887. seg2 = FFMIN(seg, 3);
  888. val = 1 << shift;
  889. val_add = val >> 1;
  890. for (i = 0; i < shift; i++) {
  891. t = seg * 32 + (val << seg2);
  892. t *= t;
  893. if (x >= t)
  894. val += val_add;
  895. else
  896. val -= val_add;
  897. val_add >>= 1;
  898. }
  899. t = seg * 32 + (val << seg2);
  900. y = t * t - x;
  901. if (y <= 0) {
  902. t = seg * 32 + (val + 1 << seg2);
  903. t = t * t - x;
  904. val = (seg2 - 1 << 4) + val;
  905. if (t >= y)
  906. val++;
  907. } else {
  908. t = seg * 32 + (val - 1 << seg2);
  909. t = t * t - x;
  910. val = (seg2 - 1 << 4) + val;
  911. if (t >= y)
  912. val--;
  913. }
  914. return val;
  915. }
  916. static void generate_noise(G723_1_Context *p)
  917. {
  918. int i, j, idx, t;
  919. int off[SUBFRAMES];
  920. int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11];
  921. int tmp[SUBFRAME_LEN * 2];
  922. int16_t *vector_ptr;
  923. int64_t sum;
  924. int b0, c, delta, x, shift;
  925. p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123;
  926. p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123;
  927. for (i = 0; i < SUBFRAMES; i++) {
  928. p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1;
  929. p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i];
  930. }
  931. for (i = 0; i < SUBFRAMES / 2; i++) {
  932. t = cng_rand(&p->cng_random_seed, 1 << 13);
  933. off[i * 2] = t & 1;
  934. off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN;
  935. t >>= 2;
  936. for (j = 0; j < 11; j++) {
  937. signs[i * 11 + j] = (t & 1) * 2 - 1 << 14;
  938. t >>= 1;
  939. }
  940. }
  941. idx = 0;
  942. for (i = 0; i < SUBFRAMES; i++) {
  943. for (j = 0; j < SUBFRAME_LEN / 2; j++)
  944. tmp[j] = j;
  945. t = SUBFRAME_LEN / 2;
  946. for (j = 0; j < pulses[i]; j++, idx++) {
  947. int idx2 = cng_rand(&p->cng_random_seed, t);
  948. pos[idx] = tmp[idx2] * 2 + off[i];
  949. tmp[idx2] = tmp[--t];
  950. }
  951. }
  952. vector_ptr = p->audio + LPC_ORDER;
  953. memcpy(vector_ptr, p->prev_excitation,
  954. PITCH_MAX * sizeof(*p->excitation));
  955. for (i = 0; i < SUBFRAMES; i += 2) {
  956. gen_acb_excitation(vector_ptr, vector_ptr,
  957. p->pitch_lag[i >> 1], &p->subframe[i],
  958. p->cur_rate);
  959. gen_acb_excitation(vector_ptr + SUBFRAME_LEN,
  960. vector_ptr + SUBFRAME_LEN,
  961. p->pitch_lag[i >> 1], &p->subframe[i + 1],
  962. p->cur_rate);
  963. t = 0;
  964. for (j = 0; j < SUBFRAME_LEN * 2; j++)
  965. t |= FFABS(vector_ptr[j]);
  966. t = FFMIN(t, 0x7FFF);
  967. if (!t) {
  968. shift = 0;
  969. } else {
  970. shift = -10 + av_log2(t);
  971. if (shift < -2)
  972. shift = -2;
  973. }
  974. sum = 0;
  975. if (shift < 0) {
  976. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  977. t = vector_ptr[j] << -shift;
  978. sum += t * t;
  979. tmp[j] = t;
  980. }
  981. } else {
  982. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  983. t = vector_ptr[j] >> shift;
  984. sum += t * t;
  985. tmp[j] = t;
  986. }
  987. }
  988. b0 = 0;
  989. for (j = 0; j < 11; j++)
  990. b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j];
  991. b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11
  992. c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5);
  993. if (shift * 2 + 3 >= 0)
  994. c >>= shift * 2 + 3;
  995. else
  996. c <<= -(shift * 2 + 3);
  997. c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15;
  998. delta = b0 * b0 * 2 - c;
  999. if (delta <= 0) {
  1000. x = -b0;
  1001. } else {
  1002. delta = square_root(delta);
  1003. x = delta - b0;
  1004. t = delta + b0;
  1005. if (FFABS(t) < FFABS(x))
  1006. x = -t;
  1007. }
  1008. shift++;
  1009. if (shift < 0)
  1010. x >>= -shift;
  1011. else
  1012. x <<= shift;
  1013. x = av_clip(x, -10000, 10000);
  1014. for (j = 0; j < 11; j++) {
  1015. idx = (i / 2) * 11 + j;
  1016. vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] +
  1017. (x * signs[idx] >> 15));
  1018. }
  1019. /* copy decoded data to serve as a history for the next decoded subframes */
  1020. memcpy(vector_ptr + PITCH_MAX, vector_ptr,
  1021. sizeof(*vector_ptr) * SUBFRAME_LEN * 2);
  1022. vector_ptr += SUBFRAME_LEN * 2;
  1023. }
  1024. /* Save the excitation for the next frame */
  1025. memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN,
  1026. PITCH_MAX * sizeof(*p->excitation));
  1027. }
  1028. static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
  1029. int *got_frame_ptr, AVPacket *avpkt)
  1030. {
  1031. G723_1_Context *p = avctx->priv_data;
  1032. const uint8_t *buf = avpkt->data;
  1033. int buf_size = avpkt->size;
  1034. int dec_mode = buf[0] & 3;
  1035. PPFParam ppf[SUBFRAMES];
  1036. int16_t cur_lsp[LPC_ORDER];
  1037. int16_t lpc[SUBFRAMES * LPC_ORDER];
  1038. int16_t acb_vector[SUBFRAME_LEN];
  1039. int16_t *out;
  1040. int bad_frame = 0, i, j, ret;
  1041. int16_t *audio = p->audio;
  1042. if (buf_size < frame_size[dec_mode]) {
  1043. if (buf_size)
  1044. av_log(avctx, AV_LOG_WARNING,
  1045. "Expected %d bytes, got %d - skipping packet\n",
  1046. frame_size[dec_mode], buf_size);
  1047. *got_frame_ptr = 0;
  1048. return buf_size;
  1049. }
  1050. if (unpack_bitstream(p, buf, buf_size) < 0) {
  1051. bad_frame = 1;
  1052. if (p->past_frame_type == ACTIVE_FRAME)
  1053. p->cur_frame_type = ACTIVE_FRAME;
  1054. else
  1055. p->cur_frame_type = UNTRANSMITTED_FRAME;
  1056. }
  1057. p->frame.nb_samples = FRAME_LEN;
  1058. if ((ret = ff_get_buffer(avctx, &p->frame)) < 0) {
  1059. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  1060. return ret;
  1061. }
  1062. out = (int16_t *)p->frame.data[0];
  1063. if (p->cur_frame_type == ACTIVE_FRAME) {
  1064. if (!bad_frame)
  1065. p->erased_frames = 0;
  1066. else if (p->erased_frames != 3)
  1067. p->erased_frames++;
  1068. inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
  1069. lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
  1070. /* Save the lsp_vector for the next frame */
  1071. memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  1072. /* Generate the excitation for the frame */
  1073. memcpy(p->excitation, p->prev_excitation,
  1074. PITCH_MAX * sizeof(*p->excitation));
  1075. if (!p->erased_frames) {
  1076. int16_t *vector_ptr = p->excitation + PITCH_MAX;
  1077. /* Update interpolation gain memory */
  1078. p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
  1079. p->subframe[3].amp_index) >> 1];
  1080. for (i = 0; i < SUBFRAMES; i++) {
  1081. gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate,
  1082. p->pitch_lag[i >> 1], i);
  1083. gen_acb_excitation(acb_vector, &p->excitation[SUBFRAME_LEN * i],
  1084. p->pitch_lag[i >> 1], &p->subframe[i],
  1085. p->cur_rate);
  1086. /* Get the total excitation */
  1087. for (j = 0; j < SUBFRAME_LEN; j++) {
  1088. int v = av_clip_int16(vector_ptr[j] << 1);
  1089. vector_ptr[j] = av_clip_int16(v + acb_vector[j]);
  1090. }
  1091. vector_ptr += SUBFRAME_LEN;
  1092. }
  1093. vector_ptr = p->excitation + PITCH_MAX;
  1094. p->interp_index = comp_interp_index(p, p->pitch_lag[1],
  1095. &p->sid_gain, &p->cur_gain);
  1096. /* Peform pitch postfiltering */
  1097. if (p->postfilter) {
  1098. i = PITCH_MAX;
  1099. for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1100. comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
  1101. ppf + j, p->cur_rate);
  1102. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1103. ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
  1104. vector_ptr + i,
  1105. vector_ptr + i + ppf[j].index,
  1106. ppf[j].sc_gain,
  1107. ppf[j].opt_gain,
  1108. 1 << 14, 15, SUBFRAME_LEN);
  1109. } else {
  1110. audio = vector_ptr - LPC_ORDER;
  1111. }
  1112. /* Save the excitation for the next frame */
  1113. memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
  1114. PITCH_MAX * sizeof(*p->excitation));
  1115. } else {
  1116. p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
  1117. if (p->erased_frames == 3) {
  1118. /* Mute output */
  1119. memset(p->excitation, 0,
  1120. (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
  1121. memset(p->prev_excitation, 0,
  1122. PITCH_MAX * sizeof(*p->excitation));
  1123. memset(p->frame.data[0], 0,
  1124. (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
  1125. } else {
  1126. int16_t *buf = p->audio + LPC_ORDER;
  1127. /* Regenerate frame */
  1128. residual_interp(p->excitation, buf, p->interp_index,
  1129. p->interp_gain, &p->random_seed);
  1130. /* Save the excitation for the next frame */
  1131. memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX),
  1132. PITCH_MAX * sizeof(*p->excitation));
  1133. }
  1134. }
  1135. p->cng_random_seed = CNG_RANDOM_SEED;
  1136. } else {
  1137. if (p->cur_frame_type == SID_FRAME) {
  1138. p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index);
  1139. inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0);
  1140. } else if (p->past_frame_type == ACTIVE_FRAME) {
  1141. p->sid_gain = estimate_sid_gain(p);
  1142. }
  1143. if (p->past_frame_type == ACTIVE_FRAME)
  1144. p->cur_gain = p->sid_gain;
  1145. else
  1146. p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3;
  1147. generate_noise(p);
  1148. lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp);
  1149. /* Save the lsp_vector for the next frame */
  1150. memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  1151. }
  1152. p->past_frame_type = p->cur_frame_type;
  1153. memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
  1154. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1155. ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
  1156. audio + i, SUBFRAME_LEN, LPC_ORDER,
  1157. 0, 1, 1 << 12);
  1158. memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
  1159. if (p->postfilter) {
  1160. formant_postfilter(p, lpc, p->audio, out);
  1161. } else { // if output is not postfiltered it should be scaled by 2
  1162. for (i = 0; i < FRAME_LEN; i++)
  1163. out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
  1164. }
  1165. *got_frame_ptr = 1;
  1166. *(AVFrame *)data = p->frame;
  1167. return frame_size[dec_mode];
  1168. }
  1169. #define OFFSET(x) offsetof(G723_1_Context, x)
  1170. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1171. static const AVOption options[] = {
  1172. { "postfilter", "postfilter on/off", OFFSET(postfilter), AV_OPT_TYPE_INT,
  1173. { .i64 = 1 }, 0, 1, AD },
  1174. { NULL }
  1175. };
  1176. static const AVClass g723_1dec_class = {
  1177. .class_name = "G.723.1 decoder",
  1178. .item_name = av_default_item_name,
  1179. .option = options,
  1180. .version = LIBAVUTIL_VERSION_INT,
  1181. };
  1182. AVCodec ff_g723_1_decoder = {
  1183. .name = "g723_1",
  1184. .type = AVMEDIA_TYPE_AUDIO,
  1185. .id = AV_CODEC_ID_G723_1,
  1186. .priv_data_size = sizeof(G723_1_Context),
  1187. .init = g723_1_decode_init,
  1188. .decode = g723_1_decode_frame,
  1189. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  1190. .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
  1191. .priv_class = &g723_1dec_class,
  1192. };