You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2485 lines
77KB

  1. /*
  2. * G.723.1 compatible decoder
  3. * Copyright (c) 2006 Benjamin Larsson
  4. * Copyright (c) 2010 Mohamed Naufal Basheer
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * G.723.1 compatible decoder
  25. */
  26. #define BITSTREAM_READER_LE
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/mem.h"
  29. #include "libavutil/opt.h"
  30. #include "avcodec.h"
  31. #include "internal.h"
  32. #include "get_bits.h"
  33. #include "acelp_vectors.h"
  34. #include "celp_filters.h"
  35. #include "celp_math.h"
  36. #include "g723_1_data.h"
  37. #include "internal.h"
  38. #define CNG_RANDOM_SEED 12345
  39. typedef struct g723_1_context {
  40. AVClass *class;
  41. AVFrame frame;
  42. G723_1_Subframe subframe[4];
  43. enum FrameType cur_frame_type;
  44. enum FrameType past_frame_type;
  45. enum Rate cur_rate;
  46. uint8_t lsp_index[LSP_BANDS];
  47. int pitch_lag[2];
  48. int erased_frames;
  49. int16_t prev_lsp[LPC_ORDER];
  50. int16_t sid_lsp[LPC_ORDER];
  51. int16_t prev_excitation[PITCH_MAX];
  52. int16_t excitation[PITCH_MAX + FRAME_LEN + 4];
  53. int16_t synth_mem[LPC_ORDER];
  54. int16_t fir_mem[LPC_ORDER];
  55. int iir_mem[LPC_ORDER];
  56. int random_seed;
  57. int cng_random_seed;
  58. int interp_index;
  59. int interp_gain;
  60. int sid_gain;
  61. int cur_gain;
  62. int reflection_coef;
  63. int pf_gain; ///< formant postfilter
  64. ///< gain scaling unit memory
  65. int postfilter;
  66. int16_t audio[FRAME_LEN + LPC_ORDER + PITCH_MAX + 4];
  67. int16_t prev_data[HALF_FRAME_LEN];
  68. int16_t prev_weight_sig[PITCH_MAX];
  69. int16_t hpf_fir_mem; ///< highpass filter fir
  70. int hpf_iir_mem; ///< and iir memories
  71. int16_t perf_fir_mem[LPC_ORDER]; ///< perceptual filter fir
  72. int16_t perf_iir_mem[LPC_ORDER]; ///< and iir memories
  73. int16_t harmonic_mem[PITCH_MAX];
  74. } G723_1_Context;
  75. static av_cold int g723_1_decode_init(AVCodecContext *avctx)
  76. {
  77. G723_1_Context *p = avctx->priv_data;
  78. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  79. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  80. avctx->channels = 1;
  81. p->pf_gain = 1 << 12;
  82. avcodec_get_frame_defaults(&p->frame);
  83. avctx->coded_frame = &p->frame;
  84. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  85. memcpy(p->sid_lsp, dc_lsp, LPC_ORDER * sizeof(*p->sid_lsp));
  86. p->cng_random_seed = CNG_RANDOM_SEED;
  87. p->past_frame_type = SID_FRAME;
  88. return 0;
  89. }
  90. /**
  91. * Unpack the frame into parameters.
  92. *
  93. * @param p the context
  94. * @param buf pointer to the input buffer
  95. * @param buf_size size of the input buffer
  96. */
  97. static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf,
  98. int buf_size)
  99. {
  100. GetBitContext gb;
  101. int ad_cb_len;
  102. int temp, info_bits, i;
  103. init_get_bits(&gb, buf, buf_size * 8);
  104. /* Extract frame type and rate info */
  105. info_bits = get_bits(&gb, 2);
  106. if (info_bits == 3) {
  107. p->cur_frame_type = UNTRANSMITTED_FRAME;
  108. return 0;
  109. }
  110. /* Extract 24 bit lsp indices, 8 bit for each band */
  111. p->lsp_index[2] = get_bits(&gb, 8);
  112. p->lsp_index[1] = get_bits(&gb, 8);
  113. p->lsp_index[0] = get_bits(&gb, 8);
  114. if (info_bits == 2) {
  115. p->cur_frame_type = SID_FRAME;
  116. p->subframe[0].amp_index = get_bits(&gb, 6);
  117. return 0;
  118. }
  119. /* Extract the info common to both rates */
  120. p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
  121. p->cur_frame_type = ACTIVE_FRAME;
  122. p->pitch_lag[0] = get_bits(&gb, 7);
  123. if (p->pitch_lag[0] > 123) /* test if forbidden code */
  124. return -1;
  125. p->pitch_lag[0] += PITCH_MIN;
  126. p->subframe[1].ad_cb_lag = get_bits(&gb, 2);
  127. p->pitch_lag[1] = get_bits(&gb, 7);
  128. if (p->pitch_lag[1] > 123)
  129. return -1;
  130. p->pitch_lag[1] += PITCH_MIN;
  131. p->subframe[3].ad_cb_lag = get_bits(&gb, 2);
  132. p->subframe[0].ad_cb_lag = 1;
  133. p->subframe[2].ad_cb_lag = 1;
  134. for (i = 0; i < SUBFRAMES; i++) {
  135. /* Extract combined gain */
  136. temp = get_bits(&gb, 12);
  137. ad_cb_len = 170;
  138. p->subframe[i].dirac_train = 0;
  139. if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
  140. p->subframe[i].dirac_train = temp >> 11;
  141. temp &= 0x7FF;
  142. ad_cb_len = 85;
  143. }
  144. p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
  145. if (p->subframe[i].ad_cb_gain < ad_cb_len) {
  146. p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
  147. GAIN_LEVELS;
  148. } else {
  149. return -1;
  150. }
  151. }
  152. p->subframe[0].grid_index = get_bits1(&gb);
  153. p->subframe[1].grid_index = get_bits1(&gb);
  154. p->subframe[2].grid_index = get_bits1(&gb);
  155. p->subframe[3].grid_index = get_bits1(&gb);
  156. if (p->cur_rate == RATE_6300) {
  157. skip_bits1(&gb); /* skip reserved bit */
  158. /* Compute pulse_pos index using the 13-bit combined position index */
  159. temp = get_bits(&gb, 13);
  160. p->subframe[0].pulse_pos = temp / 810;
  161. temp -= p->subframe[0].pulse_pos * 810;
  162. p->subframe[1].pulse_pos = FASTDIV(temp, 90);
  163. temp -= p->subframe[1].pulse_pos * 90;
  164. p->subframe[2].pulse_pos = FASTDIV(temp, 9);
  165. p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
  166. p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
  167. get_bits(&gb, 16);
  168. p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
  169. get_bits(&gb, 14);
  170. p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
  171. get_bits(&gb, 16);
  172. p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
  173. get_bits(&gb, 14);
  174. p->subframe[0].pulse_sign = get_bits(&gb, 6);
  175. p->subframe[1].pulse_sign = get_bits(&gb, 5);
  176. p->subframe[2].pulse_sign = get_bits(&gb, 6);
  177. p->subframe[3].pulse_sign = get_bits(&gb, 5);
  178. } else { /* 5300 bps */
  179. p->subframe[0].pulse_pos = get_bits(&gb, 12);
  180. p->subframe[1].pulse_pos = get_bits(&gb, 12);
  181. p->subframe[2].pulse_pos = get_bits(&gb, 12);
  182. p->subframe[3].pulse_pos = get_bits(&gb, 12);
  183. p->subframe[0].pulse_sign = get_bits(&gb, 4);
  184. p->subframe[1].pulse_sign = get_bits(&gb, 4);
  185. p->subframe[2].pulse_sign = get_bits(&gb, 4);
  186. p->subframe[3].pulse_sign = get_bits(&gb, 4);
  187. }
  188. return 0;
  189. }
  190. /**
  191. * Bitexact implementation of sqrt(val/2).
  192. */
  193. static int16_t square_root(unsigned val)
  194. {
  195. av_assert2(!(val & 0x80000000));
  196. return (ff_sqrt(val << 1) >> 1) & (~1);
  197. }
  198. /**
  199. * Calculate the number of left-shifts required for normalizing the input.
  200. *
  201. * @param num input number
  202. * @param width width of the input, 15 or 31 bits
  203. */
  204. static int normalize_bits(int num, int width)
  205. {
  206. return width - av_log2(num) - 1;
  207. }
  208. #define normalize_bits_int16(num) normalize_bits(num, 15)
  209. #define normalize_bits_int32(num) normalize_bits(num, 31)
  210. /**
  211. * Scale vector contents based on the largest of their absolutes.
  212. */
  213. static int scale_vector(int16_t *dst, const int16_t *vector, int length)
  214. {
  215. int bits, max = 0;
  216. int i;
  217. for (i = 0; i < length; i++)
  218. max |= FFABS(vector[i]);
  219. bits= 14 - av_log2_16bit(max);
  220. bits= FFMAX(bits, 0);
  221. for (i = 0; i < length; i++)
  222. dst[i] = vector[i] << bits >> 3;
  223. return bits - 3;
  224. }
  225. /**
  226. * Perform inverse quantization of LSP frequencies.
  227. *
  228. * @param cur_lsp the current LSP vector
  229. * @param prev_lsp the previous LSP vector
  230. * @param lsp_index VQ indices
  231. * @param bad_frame bad frame flag
  232. */
  233. static void inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp,
  234. uint8_t *lsp_index, int bad_frame)
  235. {
  236. int min_dist, pred;
  237. int i, j, temp, stable;
  238. /* Check for frame erasure */
  239. if (!bad_frame) {
  240. min_dist = 0x100;
  241. pred = 12288;
  242. } else {
  243. min_dist = 0x200;
  244. pred = 23552;
  245. lsp_index[0] = lsp_index[1] = lsp_index[2] = 0;
  246. }
  247. /* Get the VQ table entry corresponding to the transmitted index */
  248. cur_lsp[0] = lsp_band0[lsp_index[0]][0];
  249. cur_lsp[1] = lsp_band0[lsp_index[0]][1];
  250. cur_lsp[2] = lsp_band0[lsp_index[0]][2];
  251. cur_lsp[3] = lsp_band1[lsp_index[1]][0];
  252. cur_lsp[4] = lsp_band1[lsp_index[1]][1];
  253. cur_lsp[5] = lsp_band1[lsp_index[1]][2];
  254. cur_lsp[6] = lsp_band2[lsp_index[2]][0];
  255. cur_lsp[7] = lsp_band2[lsp_index[2]][1];
  256. cur_lsp[8] = lsp_band2[lsp_index[2]][2];
  257. cur_lsp[9] = lsp_band2[lsp_index[2]][3];
  258. /* Add predicted vector & DC component to the previously quantized vector */
  259. for (i = 0; i < LPC_ORDER; i++) {
  260. temp = ((prev_lsp[i] - dc_lsp[i]) * pred + (1 << 14)) >> 15;
  261. cur_lsp[i] += dc_lsp[i] + temp;
  262. }
  263. for (i = 0; i < LPC_ORDER; i++) {
  264. cur_lsp[0] = FFMAX(cur_lsp[0], 0x180);
  265. cur_lsp[LPC_ORDER - 1] = FFMIN(cur_lsp[LPC_ORDER - 1], 0x7e00);
  266. /* Stability check */
  267. for (j = 1; j < LPC_ORDER; j++) {
  268. temp = min_dist + cur_lsp[j - 1] - cur_lsp[j];
  269. if (temp > 0) {
  270. temp >>= 1;
  271. cur_lsp[j - 1] -= temp;
  272. cur_lsp[j] += temp;
  273. }
  274. }
  275. stable = 1;
  276. for (j = 1; j < LPC_ORDER; j++) {
  277. temp = cur_lsp[j - 1] + min_dist - cur_lsp[j] - 4;
  278. if (temp > 0) {
  279. stable = 0;
  280. break;
  281. }
  282. }
  283. if (stable)
  284. break;
  285. }
  286. if (!stable)
  287. memcpy(cur_lsp, prev_lsp, LPC_ORDER * sizeof(*cur_lsp));
  288. }
  289. /**
  290. * Bitexact implementation of 2ab scaled by 1/2^16.
  291. *
  292. * @param a 32 bit multiplicand
  293. * @param b 16 bit multiplier
  294. */
  295. #define MULL2(a, b) \
  296. MULL(a,b,15)
  297. /**
  298. * Convert LSP frequencies to LPC coefficients.
  299. *
  300. * @param lpc buffer for LPC coefficients
  301. */
  302. static void lsp2lpc(int16_t *lpc)
  303. {
  304. int f1[LPC_ORDER / 2 + 1];
  305. int f2[LPC_ORDER / 2 + 1];
  306. int i, j;
  307. /* Calculate negative cosine */
  308. for (j = 0; j < LPC_ORDER; j++) {
  309. int index = (lpc[j] >> 7) & 0x1FF;
  310. int offset = lpc[j] & 0x7f;
  311. int temp1 = cos_tab[index] << 16;
  312. int temp2 = (cos_tab[index + 1] - cos_tab[index]) *
  313. ((offset << 8) + 0x80) << 1;
  314. lpc[j] = -(av_sat_dadd32(1 << 15, temp1 + temp2) >> 16);
  315. }
  316. /*
  317. * Compute sum and difference polynomial coefficients
  318. * (bitexact alternative to lsp2poly() in lsp.c)
  319. */
  320. /* Initialize with values in Q28 */
  321. f1[0] = 1 << 28;
  322. f1[1] = (lpc[0] << 14) + (lpc[2] << 14);
  323. f1[2] = lpc[0] * lpc[2] + (2 << 28);
  324. f2[0] = 1 << 28;
  325. f2[1] = (lpc[1] << 14) + (lpc[3] << 14);
  326. f2[2] = lpc[1] * lpc[3] + (2 << 28);
  327. /*
  328. * Calculate and scale the coefficients by 1/2 in
  329. * each iteration for a final scaling factor of Q25
  330. */
  331. for (i = 2; i < LPC_ORDER / 2; i++) {
  332. f1[i + 1] = f1[i - 1] + MULL2(f1[i], lpc[2 * i]);
  333. f2[i + 1] = f2[i - 1] + MULL2(f2[i], lpc[2 * i + 1]);
  334. for (j = i; j >= 2; j--) {
  335. f1[j] = MULL2(f1[j - 1], lpc[2 * i]) +
  336. (f1[j] >> 1) + (f1[j - 2] >> 1);
  337. f2[j] = MULL2(f2[j - 1], lpc[2 * i + 1]) +
  338. (f2[j] >> 1) + (f2[j - 2] >> 1);
  339. }
  340. f1[0] >>= 1;
  341. f2[0] >>= 1;
  342. f1[1] = ((lpc[2 * i] << 16 >> i) + f1[1]) >> 1;
  343. f2[1] = ((lpc[2 * i + 1] << 16 >> i) + f2[1]) >> 1;
  344. }
  345. /* Convert polynomial coefficients to LPC coefficients */
  346. for (i = 0; i < LPC_ORDER / 2; i++) {
  347. int64_t ff1 = f1[i + 1] + f1[i];
  348. int64_t ff2 = f2[i + 1] - f2[i];
  349. lpc[i] = av_clipl_int32(((ff1 + ff2) << 3) + (1 << 15)) >> 16;
  350. lpc[LPC_ORDER - i - 1] = av_clipl_int32(((ff1 - ff2) << 3) +
  351. (1 << 15)) >> 16;
  352. }
  353. }
  354. /**
  355. * Quantize LSP frequencies by interpolation and convert them to
  356. * the corresponding LPC coefficients.
  357. *
  358. * @param lpc buffer for LPC coefficients
  359. * @param cur_lsp the current LSP vector
  360. * @param prev_lsp the previous LSP vector
  361. */
  362. static void lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
  363. {
  364. int i;
  365. int16_t *lpc_ptr = lpc;
  366. /* cur_lsp * 0.25 + prev_lsp * 0.75 */
  367. ff_acelp_weighted_vector_sum(lpc, cur_lsp, prev_lsp,
  368. 4096, 12288, 1 << 13, 14, LPC_ORDER);
  369. ff_acelp_weighted_vector_sum(lpc + LPC_ORDER, cur_lsp, prev_lsp,
  370. 8192, 8192, 1 << 13, 14, LPC_ORDER);
  371. ff_acelp_weighted_vector_sum(lpc + 2 * LPC_ORDER, cur_lsp, prev_lsp,
  372. 12288, 4096, 1 << 13, 14, LPC_ORDER);
  373. memcpy(lpc + 3 * LPC_ORDER, cur_lsp, LPC_ORDER * sizeof(*lpc));
  374. for (i = 0; i < SUBFRAMES; i++) {
  375. lsp2lpc(lpc_ptr);
  376. lpc_ptr += LPC_ORDER;
  377. }
  378. }
  379. /**
  380. * Generate a train of dirac functions with period as pitch lag.
  381. */
  382. static void gen_dirac_train(int16_t *buf, int pitch_lag)
  383. {
  384. int16_t vector[SUBFRAME_LEN];
  385. int i, j;
  386. memcpy(vector, buf, SUBFRAME_LEN * sizeof(*vector));
  387. for (i = pitch_lag; i < SUBFRAME_LEN; i += pitch_lag) {
  388. for (j = 0; j < SUBFRAME_LEN - i; j++)
  389. buf[i + j] += vector[j];
  390. }
  391. }
  392. /**
  393. * Generate fixed codebook excitation vector.
  394. *
  395. * @param vector decoded excitation vector
  396. * @param subfrm current subframe
  397. * @param cur_rate current bitrate
  398. * @param pitch_lag closed loop pitch lag
  399. * @param index current subframe index
  400. */
  401. static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe *subfrm,
  402. enum Rate cur_rate, int pitch_lag, int index)
  403. {
  404. int temp, i, j;
  405. memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
  406. if (cur_rate == RATE_6300) {
  407. if (subfrm->pulse_pos >= max_pos[index])
  408. return;
  409. /* Decode amplitudes and positions */
  410. j = PULSE_MAX - pulses[index];
  411. temp = subfrm->pulse_pos;
  412. for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
  413. temp -= combinatorial_table[j][i];
  414. if (temp >= 0)
  415. continue;
  416. temp += combinatorial_table[j++][i];
  417. if (subfrm->pulse_sign & (1 << (PULSE_MAX - j))) {
  418. vector[subfrm->grid_index + GRID_SIZE * i] =
  419. -fixed_cb_gain[subfrm->amp_index];
  420. } else {
  421. vector[subfrm->grid_index + GRID_SIZE * i] =
  422. fixed_cb_gain[subfrm->amp_index];
  423. }
  424. if (j == PULSE_MAX)
  425. break;
  426. }
  427. if (subfrm->dirac_train == 1)
  428. gen_dirac_train(vector, pitch_lag);
  429. } else { /* 5300 bps */
  430. int cb_gain = fixed_cb_gain[subfrm->amp_index];
  431. int cb_shift = subfrm->grid_index;
  432. int cb_sign = subfrm->pulse_sign;
  433. int cb_pos = subfrm->pulse_pos;
  434. int offset, beta, lag;
  435. for (i = 0; i < 8; i += 2) {
  436. offset = ((cb_pos & 7) << 3) + cb_shift + i;
  437. vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
  438. cb_pos >>= 3;
  439. cb_sign >>= 1;
  440. }
  441. /* Enhance harmonic components */
  442. lag = pitch_contrib[subfrm->ad_cb_gain << 1] + pitch_lag +
  443. subfrm->ad_cb_lag - 1;
  444. beta = pitch_contrib[(subfrm->ad_cb_gain << 1) + 1];
  445. if (lag < SUBFRAME_LEN - 2) {
  446. for (i = lag; i < SUBFRAME_LEN; i++)
  447. vector[i] += beta * vector[i - lag] >> 15;
  448. }
  449. }
  450. }
  451. /**
  452. * Get delayed contribution from the previous excitation vector.
  453. */
  454. static void get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
  455. {
  456. int offset = PITCH_MAX - PITCH_ORDER / 2 - lag;
  457. int i;
  458. residual[0] = prev_excitation[offset];
  459. residual[1] = prev_excitation[offset + 1];
  460. offset += 2;
  461. for (i = 2; i < SUBFRAME_LEN + PITCH_ORDER - 1; i++)
  462. residual[i] = prev_excitation[offset + (i - 2) % lag];
  463. }
  464. static int dot_product(const int16_t *a, const int16_t *b, int length)
  465. {
  466. int sum = ff_dot_product(a,b,length);
  467. return av_sat_add32(sum, sum);
  468. }
  469. /**
  470. * Generate adaptive codebook excitation.
  471. */
  472. static void gen_acb_excitation(int16_t *vector, int16_t *prev_excitation,
  473. int pitch_lag, G723_1_Subframe *subfrm,
  474. enum Rate cur_rate)
  475. {
  476. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  477. const int16_t *cb_ptr;
  478. int lag = pitch_lag + subfrm->ad_cb_lag - 1;
  479. int i;
  480. int sum;
  481. get_residual(residual, prev_excitation, lag);
  482. /* Select quantization table */
  483. if (cur_rate == RATE_6300 && pitch_lag < SUBFRAME_LEN - 2) {
  484. cb_ptr = adaptive_cb_gain85;
  485. } else
  486. cb_ptr = adaptive_cb_gain170;
  487. /* Calculate adaptive vector */
  488. cb_ptr += subfrm->ad_cb_gain * 20;
  489. for (i = 0; i < SUBFRAME_LEN; i++) {
  490. sum = ff_dot_product(residual + i, cb_ptr, PITCH_ORDER);
  491. vector[i] = av_sat_dadd32(1 << 15, av_sat_add32(sum, sum)) >> 16;
  492. }
  493. }
  494. /**
  495. * Estimate maximum auto-correlation around pitch lag.
  496. *
  497. * @param buf buffer with offset applied
  498. * @param offset offset of the excitation vector
  499. * @param ccr_max pointer to the maximum auto-correlation
  500. * @param pitch_lag decoded pitch lag
  501. * @param length length of autocorrelation
  502. * @param dir forward lag(1) / backward lag(-1)
  503. */
  504. static int autocorr_max(const int16_t *buf, int offset, int *ccr_max,
  505. int pitch_lag, int length, int dir)
  506. {
  507. int limit, ccr, lag = 0;
  508. int i;
  509. pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
  510. if (dir > 0)
  511. limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
  512. else
  513. limit = pitch_lag + 3;
  514. for (i = pitch_lag - 3; i <= limit; i++) {
  515. ccr = dot_product(buf, buf + dir * i, length);
  516. if (ccr > *ccr_max) {
  517. *ccr_max = ccr;
  518. lag = i;
  519. }
  520. }
  521. return lag;
  522. }
  523. /**
  524. * Calculate pitch postfilter optimal and scaling gains.
  525. *
  526. * @param lag pitch postfilter forward/backward lag
  527. * @param ppf pitch postfilter parameters
  528. * @param cur_rate current bitrate
  529. * @param tgt_eng target energy
  530. * @param ccr cross-correlation
  531. * @param res_eng residual energy
  532. */
  533. static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
  534. int tgt_eng, int ccr, int res_eng)
  535. {
  536. int pf_residual; /* square of postfiltered residual */
  537. int temp1, temp2;
  538. ppf->index = lag;
  539. temp1 = tgt_eng * res_eng >> 1;
  540. temp2 = ccr * ccr << 1;
  541. if (temp2 > temp1) {
  542. if (ccr >= res_eng) {
  543. ppf->opt_gain = ppf_gain_weight[cur_rate];
  544. } else {
  545. ppf->opt_gain = (ccr << 15) / res_eng *
  546. ppf_gain_weight[cur_rate] >> 15;
  547. }
  548. /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
  549. temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
  550. temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
  551. pf_residual = av_sat_add32(temp1, temp2 + (1 << 15)) >> 16;
  552. if (tgt_eng >= pf_residual << 1) {
  553. temp1 = 0x7fff;
  554. } else {
  555. temp1 = (tgt_eng << 14) / pf_residual;
  556. }
  557. /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
  558. ppf->sc_gain = square_root(temp1 << 16);
  559. } else {
  560. ppf->opt_gain = 0;
  561. ppf->sc_gain = 0x7fff;
  562. }
  563. ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
  564. }
  565. /**
  566. * Calculate pitch postfilter parameters.
  567. *
  568. * @param p the context
  569. * @param offset offset of the excitation vector
  570. * @param pitch_lag decoded pitch lag
  571. * @param ppf pitch postfilter parameters
  572. * @param cur_rate current bitrate
  573. */
  574. static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag,
  575. PPFParam *ppf, enum Rate cur_rate)
  576. {
  577. int16_t scale;
  578. int i;
  579. int temp1, temp2;
  580. /*
  581. * 0 - target energy
  582. * 1 - forward cross-correlation
  583. * 2 - forward residual energy
  584. * 3 - backward cross-correlation
  585. * 4 - backward residual energy
  586. */
  587. int energy[5] = {0, 0, 0, 0, 0};
  588. int16_t *buf = p->audio + LPC_ORDER + offset;
  589. int fwd_lag = autocorr_max(buf, offset, &energy[1], pitch_lag,
  590. SUBFRAME_LEN, 1);
  591. int back_lag = autocorr_max(buf, offset, &energy[3], pitch_lag,
  592. SUBFRAME_LEN, -1);
  593. ppf->index = 0;
  594. ppf->opt_gain = 0;
  595. ppf->sc_gain = 0x7fff;
  596. /* Case 0, Section 3.6 */
  597. if (!back_lag && !fwd_lag)
  598. return;
  599. /* Compute target energy */
  600. energy[0] = dot_product(buf, buf, SUBFRAME_LEN);
  601. /* Compute forward residual energy */
  602. if (fwd_lag)
  603. energy[2] = dot_product(buf + fwd_lag, buf + fwd_lag, SUBFRAME_LEN);
  604. /* Compute backward residual energy */
  605. if (back_lag)
  606. energy[4] = dot_product(buf - back_lag, buf - back_lag, SUBFRAME_LEN);
  607. /* Normalize and shorten */
  608. temp1 = 0;
  609. for (i = 0; i < 5; i++)
  610. temp1 = FFMAX(energy[i], temp1);
  611. scale = normalize_bits(temp1, 31);
  612. for (i = 0; i < 5; i++)
  613. energy[i] = (energy[i] << scale) >> 16;
  614. if (fwd_lag && !back_lag) { /* Case 1 */
  615. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  616. energy[2]);
  617. } else if (!fwd_lag) { /* Case 2 */
  618. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  619. energy[4]);
  620. } else { /* Case 3 */
  621. /*
  622. * Select the largest of energy[1]^2/energy[2]
  623. * and energy[3]^2/energy[4]
  624. */
  625. temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
  626. temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
  627. if (temp1 >= temp2) {
  628. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  629. energy[2]);
  630. } else {
  631. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  632. energy[4]);
  633. }
  634. }
  635. }
  636. /**
  637. * Classify frames as voiced/unvoiced.
  638. *
  639. * @param p the context
  640. * @param pitch_lag decoded pitch_lag
  641. * @param exc_eng excitation energy estimation
  642. * @param scale scaling factor of exc_eng
  643. *
  644. * @return residual interpolation index if voiced, 0 otherwise
  645. */
  646. static int comp_interp_index(G723_1_Context *p, int pitch_lag,
  647. int *exc_eng, int *scale)
  648. {
  649. int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
  650. int16_t *buf = p->audio + LPC_ORDER;
  651. int index, ccr, tgt_eng, best_eng, temp;
  652. *scale = scale_vector(buf, p->excitation, FRAME_LEN + PITCH_MAX);
  653. buf += offset;
  654. /* Compute maximum backward cross-correlation */
  655. ccr = 0;
  656. index = autocorr_max(buf, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
  657. ccr = av_sat_add32(ccr, 1 << 15) >> 16;
  658. /* Compute target energy */
  659. tgt_eng = dot_product(buf, buf, SUBFRAME_LEN * 2);
  660. *exc_eng = av_sat_add32(tgt_eng, 1 << 15) >> 16;
  661. if (ccr <= 0)
  662. return 0;
  663. /* Compute best energy */
  664. best_eng = dot_product(buf - index, buf - index, SUBFRAME_LEN * 2);
  665. best_eng = av_sat_add32(best_eng, 1 << 15) >> 16;
  666. temp = best_eng * *exc_eng >> 3;
  667. if (temp < ccr * ccr) {
  668. return index;
  669. } else
  670. return 0;
  671. }
  672. /**
  673. * Peform residual interpolation based on frame classification.
  674. *
  675. * @param buf decoded excitation vector
  676. * @param out output vector
  677. * @param lag decoded pitch lag
  678. * @param gain interpolated gain
  679. * @param rseed seed for random number generator
  680. */
  681. static void residual_interp(int16_t *buf, int16_t *out, int lag,
  682. int gain, int *rseed)
  683. {
  684. int i;
  685. if (lag) { /* Voiced */
  686. int16_t *vector_ptr = buf + PITCH_MAX;
  687. /* Attenuate */
  688. for (i = 0; i < lag; i++)
  689. out[i] = vector_ptr[i - lag] * 3 >> 2;
  690. av_memcpy_backptr((uint8_t*)(out + lag), lag * sizeof(*out),
  691. (FRAME_LEN - lag) * sizeof(*out));
  692. } else { /* Unvoiced */
  693. for (i = 0; i < FRAME_LEN; i++) {
  694. *rseed = *rseed * 521 + 259;
  695. out[i] = gain * *rseed >> 15;
  696. }
  697. memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
  698. }
  699. }
  700. /**
  701. * Perform IIR filtering.
  702. *
  703. * @param fir_coef FIR coefficients
  704. * @param iir_coef IIR coefficients
  705. * @param src source vector
  706. * @param dest destination vector
  707. * @param width width of the output, 16 bits(0) / 32 bits(1)
  708. */
  709. #define iir_filter(fir_coef, iir_coef, src, dest, width)\
  710. {\
  711. int m, n;\
  712. int res_shift = 16 & ~-(width);\
  713. int in_shift = 16 - res_shift;\
  714. \
  715. for (m = 0; m < SUBFRAME_LEN; m++) {\
  716. int64_t filter = 0;\
  717. for (n = 1; n <= LPC_ORDER; n++) {\
  718. filter -= (fir_coef)[n - 1] * (src)[m - n] -\
  719. (iir_coef)[n - 1] * ((dest)[m - n] >> in_shift);\
  720. }\
  721. \
  722. (dest)[m] = av_clipl_int32(((src)[m] << 16) + (filter << 3) +\
  723. (1 << 15)) >> res_shift;\
  724. }\
  725. }
  726. /**
  727. * Adjust gain of postfiltered signal.
  728. *
  729. * @param p the context
  730. * @param buf postfiltered output vector
  731. * @param energy input energy coefficient
  732. */
  733. static void gain_scale(G723_1_Context *p, int16_t * buf, int energy)
  734. {
  735. int num, denom, gain, bits1, bits2;
  736. int i;
  737. num = energy;
  738. denom = 0;
  739. for (i = 0; i < SUBFRAME_LEN; i++) {
  740. int temp = buf[i] >> 2;
  741. temp *= temp;
  742. denom = av_sat_dadd32(denom, temp);
  743. }
  744. if (num && denom) {
  745. bits1 = normalize_bits(num, 31);
  746. bits2 = normalize_bits(denom, 31);
  747. num = num << bits1 >> 1;
  748. denom <<= bits2;
  749. bits2 = 5 + bits1 - bits2;
  750. bits2 = FFMAX(0, bits2);
  751. gain = (num >> 1) / (denom >> 16);
  752. gain = square_root(gain << 16 >> bits2);
  753. } else {
  754. gain = 1 << 12;
  755. }
  756. for (i = 0; i < SUBFRAME_LEN; i++) {
  757. p->pf_gain = (15 * p->pf_gain + gain + (1 << 3)) >> 4;
  758. buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
  759. (1 << 10)) >> 11);
  760. }
  761. }
  762. /**
  763. * Perform formant filtering.
  764. *
  765. * @param p the context
  766. * @param lpc quantized lpc coefficients
  767. * @param buf input buffer
  768. * @param dst output buffer
  769. */
  770. static void formant_postfilter(G723_1_Context *p, int16_t *lpc,
  771. int16_t *buf, int16_t *dst)
  772. {
  773. int16_t filter_coef[2][LPC_ORDER];
  774. int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
  775. int i, j, k;
  776. memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
  777. memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
  778. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  779. for (k = 0; k < LPC_ORDER; k++) {
  780. filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
  781. (1 << 14)) >> 15;
  782. filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
  783. (1 << 14)) >> 15;
  784. }
  785. iir_filter(filter_coef[0], filter_coef[1], buf + i,
  786. filter_signal + i, 1);
  787. lpc += LPC_ORDER;
  788. }
  789. memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(int16_t));
  790. memcpy(p->iir_mem, filter_signal + FRAME_LEN, LPC_ORDER * sizeof(int));
  791. buf += LPC_ORDER;
  792. signal_ptr = filter_signal + LPC_ORDER;
  793. for (i = 0; i < SUBFRAMES; i++) {
  794. int temp;
  795. int auto_corr[2];
  796. int scale, energy;
  797. /* Normalize */
  798. scale = scale_vector(dst, buf, SUBFRAME_LEN);
  799. /* Compute auto correlation coefficients */
  800. auto_corr[0] = dot_product(dst, dst + 1, SUBFRAME_LEN - 1);
  801. auto_corr[1] = dot_product(dst, dst, SUBFRAME_LEN);
  802. /* Compute reflection coefficient */
  803. temp = auto_corr[1] >> 16;
  804. if (temp) {
  805. temp = (auto_corr[0] >> 2) / temp;
  806. }
  807. p->reflection_coef = (3 * p->reflection_coef + temp + 2) >> 2;
  808. temp = -p->reflection_coef >> 1 & ~3;
  809. /* Compensation filter */
  810. for (j = 0; j < SUBFRAME_LEN; j++) {
  811. dst[j] = av_sat_dadd32(signal_ptr[j],
  812. (signal_ptr[j - 1] >> 16) * temp) >> 16;
  813. }
  814. /* Compute normalized signal energy */
  815. temp = 2 * scale + 4;
  816. if (temp < 0) {
  817. energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
  818. } else
  819. energy = auto_corr[1] >> temp;
  820. gain_scale(p, dst, energy);
  821. buf += SUBFRAME_LEN;
  822. signal_ptr += SUBFRAME_LEN;
  823. dst += SUBFRAME_LEN;
  824. }
  825. }
  826. static int sid_gain_to_lsp_index(int gain)
  827. {
  828. if (gain < 0x10)
  829. return gain << 6;
  830. else if (gain < 0x20)
  831. return gain - 8 << 7;
  832. else
  833. return gain - 20 << 8;
  834. }
  835. static inline int cng_rand(int *state, int base)
  836. {
  837. *state = (*state * 521 + 259) & 0xFFFF;
  838. return (*state & 0x7FFF) * base >> 15;
  839. }
  840. static int estimate_sid_gain(G723_1_Context *p)
  841. {
  842. int i, shift, seg, seg2, t, val, val_add, x, y;
  843. shift = 16 - p->cur_gain * 2;
  844. if (shift > 0)
  845. t = p->sid_gain << shift;
  846. else
  847. t = p->sid_gain >> -shift;
  848. x = t * cng_filt[0] >> 16;
  849. if (x >= cng_bseg[2])
  850. return 0x3F;
  851. if (x >= cng_bseg[1]) {
  852. shift = 4;
  853. seg = 3;
  854. } else {
  855. shift = 3;
  856. seg = (x >= cng_bseg[0]);
  857. }
  858. seg2 = FFMIN(seg, 3);
  859. val = 1 << shift;
  860. val_add = val >> 1;
  861. for (i = 0; i < shift; i++) {
  862. t = seg * 32 + (val << seg2);
  863. t *= t;
  864. if (x >= t)
  865. val += val_add;
  866. else
  867. val -= val_add;
  868. val_add >>= 1;
  869. }
  870. t = seg * 32 + (val << seg2);
  871. y = t * t - x;
  872. if (y <= 0) {
  873. t = seg * 32 + (val + 1 << seg2);
  874. t = t * t - x;
  875. val = (seg2 - 1 << 4) + val;
  876. if (t >= y)
  877. val++;
  878. } else {
  879. t = seg * 32 + (val - 1 << seg2);
  880. t = t * t - x;
  881. val = (seg2 - 1 << 4) + val;
  882. if (t >= y)
  883. val--;
  884. }
  885. return val;
  886. }
  887. static void generate_noise(G723_1_Context *p)
  888. {
  889. int i, j, idx, t;
  890. int off[SUBFRAMES];
  891. int signs[SUBFRAMES / 2 * 11], pos[SUBFRAMES / 2 * 11];
  892. int tmp[SUBFRAME_LEN * 2];
  893. int16_t *vector_ptr;
  894. int64_t sum;
  895. int b0, c, delta, x, shift;
  896. p->pitch_lag[0] = cng_rand(&p->cng_random_seed, 21) + 123;
  897. p->pitch_lag[1] = cng_rand(&p->cng_random_seed, 19) + 123;
  898. for (i = 0; i < SUBFRAMES; i++) {
  899. p->subframe[i].ad_cb_gain = cng_rand(&p->cng_random_seed, 50) + 1;
  900. p->subframe[i].ad_cb_lag = cng_adaptive_cb_lag[i];
  901. }
  902. for (i = 0; i < SUBFRAMES / 2; i++) {
  903. t = cng_rand(&p->cng_random_seed, 1 << 13);
  904. off[i * 2] = t & 1;
  905. off[i * 2 + 1] = ((t >> 1) & 1) + SUBFRAME_LEN;
  906. t >>= 2;
  907. for (j = 0; j < 11; j++) {
  908. signs[i * 11 + j] = (t & 1) * 2 - 1 << 14;
  909. t >>= 1;
  910. }
  911. }
  912. idx = 0;
  913. for (i = 0; i < SUBFRAMES; i++) {
  914. for (j = 0; j < SUBFRAME_LEN / 2; j++)
  915. tmp[j] = j;
  916. t = SUBFRAME_LEN / 2;
  917. for (j = 0; j < pulses[i]; j++, idx++) {
  918. int idx2 = cng_rand(&p->cng_random_seed, t);
  919. pos[idx] = tmp[idx2] * 2 + off[i];
  920. tmp[idx2] = tmp[--t];
  921. }
  922. }
  923. vector_ptr = p->audio + LPC_ORDER;
  924. memcpy(vector_ptr, p->prev_excitation,
  925. PITCH_MAX * sizeof(*p->excitation));
  926. for (i = 0; i < SUBFRAMES; i += 2) {
  927. gen_acb_excitation(vector_ptr, vector_ptr,
  928. p->pitch_lag[i >> 1], &p->subframe[i],
  929. p->cur_rate);
  930. gen_acb_excitation(vector_ptr + SUBFRAME_LEN,
  931. vector_ptr + SUBFRAME_LEN,
  932. p->pitch_lag[i >> 1], &p->subframe[i + 1],
  933. p->cur_rate);
  934. t = 0;
  935. for (j = 0; j < SUBFRAME_LEN * 2; j++)
  936. t |= FFABS(vector_ptr[j]);
  937. t = FFMIN(t, 0x7FFF);
  938. if (!t) {
  939. shift = 0;
  940. } else {
  941. shift = -10 + av_log2(t);
  942. if (shift < -2)
  943. shift = -2;
  944. }
  945. sum = 0;
  946. if (shift < 0) {
  947. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  948. t = vector_ptr[j] << -shift;
  949. sum += t * t;
  950. tmp[j] = t;
  951. }
  952. } else {
  953. for (j = 0; j < SUBFRAME_LEN * 2; j++) {
  954. t = vector_ptr[j] >> shift;
  955. sum += t * t;
  956. tmp[j] = t;
  957. }
  958. }
  959. b0 = 0;
  960. for (j = 0; j < 11; j++)
  961. b0 += tmp[pos[(i / 2) * 11 + j]] * signs[(i / 2) * 11 + j];
  962. b0 = b0 * 2 * 2979LL + (1 << 29) >> 30; // approximated division by 11
  963. c = p->cur_gain * (p->cur_gain * SUBFRAME_LEN >> 5);
  964. if (shift * 2 + 3 >= 0)
  965. c >>= shift * 2 + 3;
  966. else
  967. c <<= -(shift * 2 + 3);
  968. c = (av_clipl_int32(sum << 1) - c) * 2979LL >> 15;
  969. delta = b0 * b0 * 2 - c;
  970. if (delta <= 0) {
  971. x = -b0;
  972. } else {
  973. delta = square_root(delta);
  974. x = delta - b0;
  975. t = delta + b0;
  976. if (FFABS(t) < FFABS(x))
  977. x = -t;
  978. }
  979. shift++;
  980. if (shift < 0)
  981. x >>= -shift;
  982. else
  983. x <<= shift;
  984. x = av_clip(x, -10000, 10000);
  985. for (j = 0; j < 11; j++) {
  986. idx = (i / 2) * 11 + j;
  987. vector_ptr[pos[idx]] = av_clip_int16(vector_ptr[pos[idx]] +
  988. (x * signs[idx] >> 15));
  989. }
  990. /* copy decoded data to serve as a history for the next decoded subframes */
  991. memcpy(vector_ptr + PITCH_MAX, vector_ptr,
  992. sizeof(*vector_ptr) * SUBFRAME_LEN * 2);
  993. vector_ptr += SUBFRAME_LEN * 2;
  994. }
  995. /* Save the excitation for the next frame */
  996. memcpy(p->prev_excitation, p->audio + LPC_ORDER + FRAME_LEN,
  997. PITCH_MAX * sizeof(*p->excitation));
  998. }
  999. static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
  1000. int *got_frame_ptr, AVPacket *avpkt)
  1001. {
  1002. G723_1_Context *p = avctx->priv_data;
  1003. const uint8_t *buf = avpkt->data;
  1004. int buf_size = avpkt->size;
  1005. int dec_mode = buf[0] & 3;
  1006. PPFParam ppf[SUBFRAMES];
  1007. int16_t cur_lsp[LPC_ORDER];
  1008. int16_t lpc[SUBFRAMES * LPC_ORDER];
  1009. int16_t acb_vector[SUBFRAME_LEN];
  1010. int16_t *out;
  1011. int bad_frame = 0, i, j, ret;
  1012. int16_t *audio = p->audio;
  1013. if (buf_size < frame_size[dec_mode]) {
  1014. if (buf_size)
  1015. av_log(avctx, AV_LOG_WARNING,
  1016. "Expected %d bytes, got %d - skipping packet\n",
  1017. frame_size[dec_mode], buf_size);
  1018. *got_frame_ptr = 0;
  1019. return buf_size;
  1020. }
  1021. if (unpack_bitstream(p, buf, buf_size) < 0) {
  1022. bad_frame = 1;
  1023. if (p->past_frame_type == ACTIVE_FRAME)
  1024. p->cur_frame_type = ACTIVE_FRAME;
  1025. else
  1026. p->cur_frame_type = UNTRANSMITTED_FRAME;
  1027. }
  1028. p->frame.nb_samples = FRAME_LEN;
  1029. if ((ret = ff_get_buffer(avctx, &p->frame)) < 0) {
  1030. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  1031. return ret;
  1032. }
  1033. out = (int16_t *)p->frame.data[0];
  1034. if (p->cur_frame_type == ACTIVE_FRAME) {
  1035. if (!bad_frame)
  1036. p->erased_frames = 0;
  1037. else if (p->erased_frames != 3)
  1038. p->erased_frames++;
  1039. inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
  1040. lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
  1041. /* Save the lsp_vector for the next frame */
  1042. memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  1043. /* Generate the excitation for the frame */
  1044. memcpy(p->excitation, p->prev_excitation,
  1045. PITCH_MAX * sizeof(*p->excitation));
  1046. if (!p->erased_frames) {
  1047. int16_t *vector_ptr = p->excitation + PITCH_MAX;
  1048. /* Update interpolation gain memory */
  1049. p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
  1050. p->subframe[3].amp_index) >> 1];
  1051. for (i = 0; i < SUBFRAMES; i++) {
  1052. gen_fcb_excitation(vector_ptr, &p->subframe[i], p->cur_rate,
  1053. p->pitch_lag[i >> 1], i);
  1054. gen_acb_excitation(acb_vector, &p->excitation[SUBFRAME_LEN * i],
  1055. p->pitch_lag[i >> 1], &p->subframe[i],
  1056. p->cur_rate);
  1057. /* Get the total excitation */
  1058. for (j = 0; j < SUBFRAME_LEN; j++) {
  1059. int v = av_clip_int16(vector_ptr[j] << 1);
  1060. vector_ptr[j] = av_clip_int16(v + acb_vector[j]);
  1061. }
  1062. vector_ptr += SUBFRAME_LEN;
  1063. }
  1064. vector_ptr = p->excitation + PITCH_MAX;
  1065. p->interp_index = comp_interp_index(p, p->pitch_lag[1],
  1066. &p->sid_gain, &p->cur_gain);
  1067. /* Peform pitch postfiltering */
  1068. if (p->postfilter) {
  1069. i = PITCH_MAX;
  1070. for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1071. comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
  1072. ppf + j, p->cur_rate);
  1073. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1074. ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
  1075. vector_ptr + i,
  1076. vector_ptr + i + ppf[j].index,
  1077. ppf[j].sc_gain,
  1078. ppf[j].opt_gain,
  1079. 1 << 14, 15, SUBFRAME_LEN);
  1080. } else {
  1081. audio = vector_ptr - LPC_ORDER;
  1082. }
  1083. /* Save the excitation for the next frame */
  1084. memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
  1085. PITCH_MAX * sizeof(*p->excitation));
  1086. } else {
  1087. p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
  1088. if (p->erased_frames == 3) {
  1089. /* Mute output */
  1090. memset(p->excitation, 0,
  1091. (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
  1092. memset(p->prev_excitation, 0,
  1093. PITCH_MAX * sizeof(*p->excitation));
  1094. memset(p->frame.data[0], 0,
  1095. (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
  1096. } else {
  1097. int16_t *buf = p->audio + LPC_ORDER;
  1098. /* Regenerate frame */
  1099. residual_interp(p->excitation, buf, p->interp_index,
  1100. p->interp_gain, &p->random_seed);
  1101. /* Save the excitation for the next frame */
  1102. memcpy(p->prev_excitation, buf + (FRAME_LEN - PITCH_MAX),
  1103. PITCH_MAX * sizeof(*p->excitation));
  1104. }
  1105. }
  1106. p->cng_random_seed = CNG_RANDOM_SEED;
  1107. } else {
  1108. if (p->cur_frame_type == SID_FRAME) {
  1109. p->sid_gain = sid_gain_to_lsp_index(p->subframe[0].amp_index);
  1110. inverse_quant(p->sid_lsp, p->prev_lsp, p->lsp_index, 0);
  1111. } else if (p->past_frame_type == ACTIVE_FRAME) {
  1112. p->sid_gain = estimate_sid_gain(p);
  1113. }
  1114. if (p->past_frame_type == ACTIVE_FRAME)
  1115. p->cur_gain = p->sid_gain;
  1116. else
  1117. p->cur_gain = (p->cur_gain * 7 + p->sid_gain) >> 3;
  1118. generate_noise(p);
  1119. lsp_interpolate(lpc, p->sid_lsp, p->prev_lsp);
  1120. /* Save the lsp_vector for the next frame */
  1121. memcpy(p->prev_lsp, p->sid_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  1122. }
  1123. p->past_frame_type = p->cur_frame_type;
  1124. memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
  1125. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1126. ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
  1127. audio + i, SUBFRAME_LEN, LPC_ORDER,
  1128. 0, 1, 1 << 12);
  1129. memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
  1130. if (p->postfilter) {
  1131. formant_postfilter(p, lpc, p->audio, out);
  1132. } else { // if output is not postfiltered it should be scaled by 2
  1133. for (i = 0; i < FRAME_LEN; i++)
  1134. out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
  1135. }
  1136. *got_frame_ptr = 1;
  1137. *(AVFrame *)data = p->frame;
  1138. return frame_size[dec_mode];
  1139. }
  1140. #define OFFSET(x) offsetof(G723_1_Context, x)
  1141. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1142. static const AVOption options[] = {
  1143. { "postfilter", "postfilter on/off", OFFSET(postfilter), AV_OPT_TYPE_INT,
  1144. { .i64 = 1 }, 0, 1, AD },
  1145. { NULL }
  1146. };
  1147. static const AVClass g723_1dec_class = {
  1148. .class_name = "G.723.1 decoder",
  1149. .item_name = av_default_item_name,
  1150. .option = options,
  1151. .version = LIBAVUTIL_VERSION_INT,
  1152. };
  1153. AVCodec ff_g723_1_decoder = {
  1154. .name = "g723_1",
  1155. .type = AVMEDIA_TYPE_AUDIO,
  1156. .id = AV_CODEC_ID_G723_1,
  1157. .priv_data_size = sizeof(G723_1_Context),
  1158. .init = g723_1_decode_init,
  1159. .decode = g723_1_decode_frame,
  1160. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  1161. .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
  1162. .priv_class = &g723_1dec_class,
  1163. };
  1164. #if CONFIG_G723_1_ENCODER
  1165. #define BITSTREAM_WRITER_LE
  1166. #include "put_bits.h"
  1167. static av_cold int g723_1_encode_init(AVCodecContext *avctx)
  1168. {
  1169. G723_1_Context *p = avctx->priv_data;
  1170. if (avctx->sample_rate != 8000) {
  1171. av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n");
  1172. return -1;
  1173. }
  1174. if (avctx->channels != 1) {
  1175. av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
  1176. return AVERROR(EINVAL);
  1177. }
  1178. if (avctx->bit_rate == 6300) {
  1179. p->cur_rate = RATE_6300;
  1180. } else if (avctx->bit_rate == 5300) {
  1181. av_log(avctx, AV_LOG_ERROR, "Bitrate not supported yet, use 6.3k\n");
  1182. return AVERROR_PATCHWELCOME;
  1183. } else {
  1184. av_log(avctx, AV_LOG_ERROR,
  1185. "Bitrate not supported, use 6.3k\n");
  1186. return AVERROR(EINVAL);
  1187. }
  1188. avctx->frame_size = 240;
  1189. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(int16_t));
  1190. return 0;
  1191. }
  1192. /**
  1193. * Remove DC component from the input signal.
  1194. *
  1195. * @param buf input signal
  1196. * @param fir zero memory
  1197. * @param iir pole memory
  1198. */
  1199. static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
  1200. {
  1201. int i;
  1202. for (i = 0; i < FRAME_LEN; i++) {
  1203. *iir = (buf[i] << 15) + ((-*fir) << 15) + MULL2(*iir, 0x7f00);
  1204. *fir = buf[i];
  1205. buf[i] = av_clipl_int32((int64_t)*iir + (1 << 15)) >> 16;
  1206. }
  1207. }
  1208. /**
  1209. * Estimate autocorrelation of the input vector.
  1210. *
  1211. * @param buf input buffer
  1212. * @param autocorr autocorrelation coefficients vector
  1213. */
  1214. static void comp_autocorr(int16_t *buf, int16_t *autocorr)
  1215. {
  1216. int i, scale, temp;
  1217. int16_t vector[LPC_FRAME];
  1218. scale_vector(vector, buf, LPC_FRAME);
  1219. /* Apply the Hamming window */
  1220. for (i = 0; i < LPC_FRAME; i++)
  1221. vector[i] = (vector[i] * hamming_window[i] + (1 << 14)) >> 15;
  1222. /* Compute the first autocorrelation coefficient */
  1223. temp = ff_dot_product(vector, vector, LPC_FRAME);
  1224. /* Apply a white noise correlation factor of (1025/1024) */
  1225. temp += temp >> 10;
  1226. /* Normalize */
  1227. scale = normalize_bits_int32(temp);
  1228. autocorr[0] = av_clipl_int32((int64_t)(temp << scale) +
  1229. (1 << 15)) >> 16;
  1230. /* Compute the remaining coefficients */
  1231. if (!autocorr[0]) {
  1232. memset(autocorr + 1, 0, LPC_ORDER * sizeof(int16_t));
  1233. } else {
  1234. for (i = 1; i <= LPC_ORDER; i++) {
  1235. temp = ff_dot_product(vector, vector + i, LPC_FRAME - i);
  1236. temp = MULL2((temp << scale), binomial_window[i - 1]);
  1237. autocorr[i] = av_clipl_int32((int64_t)temp + (1 << 15)) >> 16;
  1238. }
  1239. }
  1240. }
  1241. /**
  1242. * Use Levinson-Durbin recursion to compute LPC coefficients from
  1243. * autocorrelation values.
  1244. *
  1245. * @param lpc LPC coefficients vector
  1246. * @param autocorr autocorrelation coefficients vector
  1247. * @param error prediction error
  1248. */
  1249. static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
  1250. {
  1251. int16_t vector[LPC_ORDER];
  1252. int16_t partial_corr;
  1253. int i, j, temp;
  1254. memset(lpc, 0, LPC_ORDER * sizeof(int16_t));
  1255. for (i = 0; i < LPC_ORDER; i++) {
  1256. /* Compute the partial correlation coefficient */
  1257. temp = 0;
  1258. for (j = 0; j < i; j++)
  1259. temp -= lpc[j] * autocorr[i - j - 1];
  1260. temp = ((autocorr[i] << 13) + temp) << 3;
  1261. if (FFABS(temp) >= (error << 16))
  1262. break;
  1263. partial_corr = temp / (error << 1);
  1264. lpc[i] = av_clipl_int32((int64_t)(partial_corr << 14) +
  1265. (1 << 15)) >> 16;
  1266. /* Update the prediction error */
  1267. temp = MULL2(temp, partial_corr);
  1268. error = av_clipl_int32((int64_t)(error << 16) - temp +
  1269. (1 << 15)) >> 16;
  1270. memcpy(vector, lpc, i * sizeof(int16_t));
  1271. for (j = 0; j < i; j++) {
  1272. temp = partial_corr * vector[i - j - 1] << 1;
  1273. lpc[j] = av_clipl_int32((int64_t)(lpc[j] << 16) - temp +
  1274. (1 << 15)) >> 16;
  1275. }
  1276. }
  1277. }
  1278. /**
  1279. * Calculate LPC coefficients for the current frame.
  1280. *
  1281. * @param buf current frame
  1282. * @param prev_data 2 trailing subframes of the previous frame
  1283. * @param lpc LPC coefficients vector
  1284. */
  1285. static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
  1286. {
  1287. int16_t autocorr[(LPC_ORDER + 1) * SUBFRAMES];
  1288. int16_t *autocorr_ptr = autocorr;
  1289. int16_t *lpc_ptr = lpc;
  1290. int i, j;
  1291. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  1292. comp_autocorr(buf + i, autocorr_ptr);
  1293. levinson_durbin(lpc_ptr, autocorr_ptr + 1, autocorr_ptr[0]);
  1294. lpc_ptr += LPC_ORDER;
  1295. autocorr_ptr += LPC_ORDER + 1;
  1296. }
  1297. }
  1298. static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
  1299. {
  1300. int f[LPC_ORDER + 2]; ///< coefficients of the sum and difference
  1301. ///< polynomials (F1, F2) ordered as
  1302. ///< f1[0], f2[0], ...., f1[5], f2[5]
  1303. int max, shift, cur_val, prev_val, count, p;
  1304. int i, j;
  1305. int64_t temp;
  1306. /* Initialize f1[0] and f2[0] to 1 in Q25 */
  1307. for (i = 0; i < LPC_ORDER; i++)
  1308. lsp[i] = (lpc[i] * bandwidth_expand[i] + (1 << 14)) >> 15;
  1309. /* Apply bandwidth expansion on the LPC coefficients */
  1310. f[0] = f[1] = 1 << 25;
  1311. /* Compute the remaining coefficients */
  1312. for (i = 0; i < LPC_ORDER / 2; i++) {
  1313. /* f1 */
  1314. f[2 * i + 2] = -f[2 * i] - ((lsp[i] + lsp[LPC_ORDER - 1 - i]) << 12);
  1315. /* f2 */
  1316. f[2 * i + 3] = f[2 * i + 1] - ((lsp[i] - lsp[LPC_ORDER - 1 - i]) << 12);
  1317. }
  1318. /* Divide f1[5] and f2[5] by 2 for use in polynomial evaluation */
  1319. f[LPC_ORDER] >>= 1;
  1320. f[LPC_ORDER + 1] >>= 1;
  1321. /* Normalize and shorten */
  1322. max = FFABS(f[0]);
  1323. for (i = 1; i < LPC_ORDER + 2; i++)
  1324. max = FFMAX(max, FFABS(f[i]));
  1325. shift = normalize_bits_int32(max);
  1326. for (i = 0; i < LPC_ORDER + 2; i++)
  1327. f[i] = av_clipl_int32((int64_t)(f[i] << shift) + (1 << 15)) >> 16;
  1328. /**
  1329. * Evaluate F1 and F2 at uniform intervals of pi/256 along the
  1330. * unit circle and check for zero crossings.
  1331. */
  1332. p = 0;
  1333. temp = 0;
  1334. for (i = 0; i <= LPC_ORDER / 2; i++)
  1335. temp += f[2 * i] * cos_tab[0];
  1336. prev_val = av_clipl_int32(temp << 1);
  1337. count = 0;
  1338. for ( i = 1; i < COS_TBL_SIZE / 2; i++) {
  1339. /* Evaluate */
  1340. temp = 0;
  1341. for (j = 0; j <= LPC_ORDER / 2; j++)
  1342. temp += f[LPC_ORDER - 2 * j + p] * cos_tab[i * j % COS_TBL_SIZE];
  1343. cur_val = av_clipl_int32(temp << 1);
  1344. /* Check for sign change, indicating a zero crossing */
  1345. if ((cur_val ^ prev_val) < 0) {
  1346. int abs_cur = FFABS(cur_val);
  1347. int abs_prev = FFABS(prev_val);
  1348. int sum = abs_cur + abs_prev;
  1349. shift = normalize_bits_int32(sum);
  1350. sum <<= shift;
  1351. abs_prev = abs_prev << shift >> 8;
  1352. lsp[count++] = ((i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
  1353. if (count == LPC_ORDER)
  1354. break;
  1355. /* Switch between sum and difference polynomials */
  1356. p ^= 1;
  1357. /* Evaluate */
  1358. temp = 0;
  1359. for (j = 0; j <= LPC_ORDER / 2; j++){
  1360. temp += f[LPC_ORDER - 2 * j + p] *
  1361. cos_tab[i * j % COS_TBL_SIZE];
  1362. }
  1363. cur_val = av_clipl_int32(temp<<1);
  1364. }
  1365. prev_val = cur_val;
  1366. }
  1367. if (count != LPC_ORDER)
  1368. memcpy(lsp, prev_lsp, LPC_ORDER * sizeof(int16_t));
  1369. }
  1370. /**
  1371. * Quantize the current LSP subvector.
  1372. *
  1373. * @param num band number
  1374. * @param offset offset of the current subvector in an LPC_ORDER vector
  1375. * @param size size of the current subvector
  1376. */
  1377. #define get_index(num, offset, size) \
  1378. {\
  1379. int error, max = -1;\
  1380. int16_t temp[4];\
  1381. int i, j;\
  1382. for (i = 0; i < LSP_CB_SIZE; i++) {\
  1383. for (j = 0; j < size; j++){\
  1384. temp[j] = (weight[j + (offset)] * lsp_band##num[i][j] +\
  1385. (1 << 14)) >> 15;\
  1386. }\
  1387. error = dot_product(lsp + (offset), temp, size) << 1;\
  1388. error -= dot_product(lsp_band##num[i], temp, size);\
  1389. if (error > max) {\
  1390. max = error;\
  1391. lsp_index[num] = i;\
  1392. }\
  1393. }\
  1394. }
  1395. /**
  1396. * Vector quantize the LSP frequencies.
  1397. *
  1398. * @param lsp the current lsp vector
  1399. * @param prev_lsp the previous lsp vector
  1400. */
  1401. static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
  1402. {
  1403. int16_t weight[LPC_ORDER];
  1404. int16_t min, max;
  1405. int shift, i;
  1406. /* Calculate the VQ weighting vector */
  1407. weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
  1408. weight[LPC_ORDER - 1] = (1 << 20) /
  1409. (lsp[LPC_ORDER - 1] - lsp[LPC_ORDER - 2]);
  1410. for (i = 1; i < LPC_ORDER - 1; i++) {
  1411. min = FFMIN(lsp[i] - lsp[i - 1], lsp[i + 1] - lsp[i]);
  1412. if (min > 0x20)
  1413. weight[i] = (1 << 20) / min;
  1414. else
  1415. weight[i] = INT16_MAX;
  1416. }
  1417. /* Normalize */
  1418. max = 0;
  1419. for (i = 0; i < LPC_ORDER; i++)
  1420. max = FFMAX(weight[i], max);
  1421. shift = normalize_bits_int16(max);
  1422. for (i = 0; i < LPC_ORDER; i++) {
  1423. weight[i] <<= shift;
  1424. }
  1425. /* Compute the VQ target vector */
  1426. for (i = 0; i < LPC_ORDER; i++) {
  1427. lsp[i] -= dc_lsp[i] +
  1428. (((prev_lsp[i] - dc_lsp[i]) * 12288 + (1 << 14)) >> 15);
  1429. }
  1430. get_index(0, 0, 3);
  1431. get_index(1, 3, 3);
  1432. get_index(2, 6, 4);
  1433. }
  1434. /**
  1435. * Apply the formant perceptual weighting filter.
  1436. *
  1437. * @param flt_coef filter coefficients
  1438. * @param unq_lpc unquantized lpc vector
  1439. */
  1440. static void perceptual_filter(G723_1_Context *p, int16_t *flt_coef,
  1441. int16_t *unq_lpc, int16_t *buf)
  1442. {
  1443. int16_t vector[FRAME_LEN + LPC_ORDER];
  1444. int i, j, k, l = 0;
  1445. memcpy(buf, p->iir_mem, sizeof(int16_t) * LPC_ORDER);
  1446. memcpy(vector, p->fir_mem, sizeof(int16_t) * LPC_ORDER);
  1447. memcpy(vector + LPC_ORDER, buf + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  1448. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  1449. for (k = 0; k < LPC_ORDER; k++) {
  1450. flt_coef[k + 2 * l] = (unq_lpc[k + l] * percept_flt_tbl[0][k] +
  1451. (1 << 14)) >> 15;
  1452. flt_coef[k + 2 * l + LPC_ORDER] = (unq_lpc[k + l] *
  1453. percept_flt_tbl[1][k] +
  1454. (1 << 14)) >> 15;
  1455. }
  1456. iir_filter(flt_coef + 2 * l, flt_coef + 2 * l + LPC_ORDER, vector + i,
  1457. buf + i, 0);
  1458. l += LPC_ORDER;
  1459. }
  1460. memcpy(p->iir_mem, buf + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  1461. memcpy(p->fir_mem, vector + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  1462. }
  1463. /**
  1464. * Estimate the open loop pitch period.
  1465. *
  1466. * @param buf perceptually weighted speech
  1467. * @param start estimation is carried out from this position
  1468. */
  1469. static int estimate_pitch(int16_t *buf, int start)
  1470. {
  1471. int max_exp = 32;
  1472. int max_ccr = 0x4000;
  1473. int max_eng = 0x7fff;
  1474. int index = PITCH_MIN;
  1475. int offset = start - PITCH_MIN + 1;
  1476. int ccr, eng, orig_eng, ccr_eng, exp;
  1477. int diff, temp;
  1478. int i;
  1479. orig_eng = ff_dot_product(buf + offset, buf + offset, HALF_FRAME_LEN);
  1480. for (i = PITCH_MIN; i <= PITCH_MAX - 3; i++) {
  1481. offset--;
  1482. /* Update energy and compute correlation */
  1483. orig_eng += buf[offset] * buf[offset] -
  1484. buf[offset + HALF_FRAME_LEN] * buf[offset + HALF_FRAME_LEN];
  1485. ccr = ff_dot_product(buf + start, buf + offset, HALF_FRAME_LEN);
  1486. if (ccr <= 0)
  1487. continue;
  1488. /* Split into mantissa and exponent to maintain precision */
  1489. exp = normalize_bits_int32(ccr);
  1490. ccr = av_clipl_int32((int64_t)(ccr << exp) + (1 << 15)) >> 16;
  1491. exp <<= 1;
  1492. ccr *= ccr;
  1493. temp = normalize_bits_int32(ccr);
  1494. ccr = ccr << temp >> 16;
  1495. exp += temp;
  1496. temp = normalize_bits_int32(orig_eng);
  1497. eng = av_clipl_int32((int64_t)(orig_eng << temp) + (1 << 15)) >> 16;
  1498. exp -= temp;
  1499. if (ccr >= eng) {
  1500. exp--;
  1501. ccr >>= 1;
  1502. }
  1503. if (exp > max_exp)
  1504. continue;
  1505. if (exp + 1 < max_exp)
  1506. goto update;
  1507. /* Equalize exponents before comparison */
  1508. if (exp + 1 == max_exp)
  1509. temp = max_ccr >> 1;
  1510. else
  1511. temp = max_ccr;
  1512. ccr_eng = ccr * max_eng;
  1513. diff = ccr_eng - eng * temp;
  1514. if (diff > 0 && (i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
  1515. update:
  1516. index = i;
  1517. max_exp = exp;
  1518. max_ccr = ccr;
  1519. max_eng = eng;
  1520. }
  1521. }
  1522. return index;
  1523. }
  1524. /**
  1525. * Compute harmonic noise filter parameters.
  1526. *
  1527. * @param buf perceptually weighted speech
  1528. * @param pitch_lag open loop pitch period
  1529. * @param hf harmonic filter parameters
  1530. */
  1531. static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
  1532. {
  1533. int ccr, eng, max_ccr, max_eng;
  1534. int exp, max, diff;
  1535. int energy[15];
  1536. int i, j;
  1537. for (i = 0, j = pitch_lag - 3; j <= pitch_lag + 3; i++, j++) {
  1538. /* Compute residual energy */
  1539. energy[i << 1] = ff_dot_product(buf - j, buf - j, SUBFRAME_LEN);
  1540. /* Compute correlation */
  1541. energy[(i << 1) + 1] = ff_dot_product(buf, buf - j, SUBFRAME_LEN);
  1542. }
  1543. /* Compute target energy */
  1544. energy[14] = ff_dot_product(buf, buf, SUBFRAME_LEN);
  1545. /* Normalize */
  1546. max = 0;
  1547. for (i = 0; i < 15; i++)
  1548. max = FFMAX(max, FFABS(energy[i]));
  1549. exp = normalize_bits_int32(max);
  1550. for (i = 0; i < 15; i++) {
  1551. energy[i] = av_clipl_int32((int64_t)(energy[i] << exp) +
  1552. (1 << 15)) >> 16;
  1553. }
  1554. hf->index = -1;
  1555. hf->gain = 0;
  1556. max_ccr = 1;
  1557. max_eng = 0x7fff;
  1558. for (i = 0; i <= 6; i++) {
  1559. eng = energy[i << 1];
  1560. ccr = energy[(i << 1) + 1];
  1561. if (ccr <= 0)
  1562. continue;
  1563. ccr = (ccr * ccr + (1 << 14)) >> 15;
  1564. diff = ccr * max_eng - eng * max_ccr;
  1565. if (diff > 0) {
  1566. max_ccr = ccr;
  1567. max_eng = eng;
  1568. hf->index = i;
  1569. }
  1570. }
  1571. if (hf->index == -1) {
  1572. hf->index = pitch_lag;
  1573. return;
  1574. }
  1575. eng = energy[14] * max_eng;
  1576. eng = (eng >> 2) + (eng >> 3);
  1577. ccr = energy[(hf->index << 1) + 1] * energy[(hf->index << 1) + 1];
  1578. if (eng < ccr) {
  1579. eng = energy[(hf->index << 1) + 1];
  1580. if (eng >= max_eng)
  1581. hf->gain = 0x2800;
  1582. else
  1583. hf->gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
  1584. }
  1585. hf->index += pitch_lag - 3;
  1586. }
  1587. /**
  1588. * Apply the harmonic noise shaping filter.
  1589. *
  1590. * @param hf filter parameters
  1591. */
  1592. static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
  1593. {
  1594. int i;
  1595. for (i = 0; i < SUBFRAME_LEN; i++) {
  1596. int64_t temp = hf->gain * src[i - hf->index] << 1;
  1597. dest[i] = av_clipl_int32((src[i] << 16) - temp + (1 << 15)) >> 16;
  1598. }
  1599. }
  1600. static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
  1601. {
  1602. int i;
  1603. for (i = 0; i < SUBFRAME_LEN; i++) {
  1604. int64_t temp = hf->gain * src[i - hf->index] << 1;
  1605. dest[i] = av_clipl_int32(((dest[i] - src[i]) << 16) + temp +
  1606. (1 << 15)) >> 16;
  1607. }
  1608. }
  1609. /**
  1610. * Combined synthesis and formant perceptual weighting filer.
  1611. *
  1612. * @param qnt_lpc quantized lpc coefficients
  1613. * @param perf_lpc perceptual filter coefficients
  1614. * @param perf_fir perceptual filter fir memory
  1615. * @param perf_iir perceptual filter iir memory
  1616. * @param scale the filter output will be scaled by 2^scale
  1617. */
  1618. static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc,
  1619. int16_t *perf_fir, int16_t *perf_iir,
  1620. const int16_t *src, int16_t *dest, int scale)
  1621. {
  1622. int i, j;
  1623. int16_t buf_16[SUBFRAME_LEN + LPC_ORDER];
  1624. int64_t buf[SUBFRAME_LEN];
  1625. int16_t *bptr_16 = buf_16 + LPC_ORDER;
  1626. memcpy(buf_16, perf_fir, sizeof(int16_t) * LPC_ORDER);
  1627. memcpy(dest - LPC_ORDER, perf_iir, sizeof(int16_t) * LPC_ORDER);
  1628. for (i = 0; i < SUBFRAME_LEN; i++) {
  1629. int64_t temp = 0;
  1630. for (j = 1; j <= LPC_ORDER; j++)
  1631. temp -= qnt_lpc[j - 1] * bptr_16[i - j];
  1632. buf[i] = (src[i] << 15) + (temp << 3);
  1633. bptr_16[i] = av_clipl_int32(buf[i] + (1 << 15)) >> 16;
  1634. }
  1635. for (i = 0; i < SUBFRAME_LEN; i++) {
  1636. int64_t fir = 0, iir = 0;
  1637. for (j = 1; j <= LPC_ORDER; j++) {
  1638. fir -= perf_lpc[j - 1] * bptr_16[i - j];
  1639. iir += perf_lpc[j + LPC_ORDER - 1] * dest[i - j];
  1640. }
  1641. dest[i] = av_clipl_int32(((buf[i] + (fir << 3)) << scale) + (iir << 3) +
  1642. (1 << 15)) >> 16;
  1643. }
  1644. memcpy(perf_fir, buf_16 + SUBFRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  1645. memcpy(perf_iir, dest + SUBFRAME_LEN - LPC_ORDER,
  1646. sizeof(int16_t) * LPC_ORDER);
  1647. }
  1648. /**
  1649. * Compute the adaptive codebook contribution.
  1650. *
  1651. * @param buf input signal
  1652. * @param index the current subframe index
  1653. */
  1654. static void acb_search(G723_1_Context *p, int16_t *residual,
  1655. int16_t *impulse_resp, const int16_t *buf,
  1656. int index)
  1657. {
  1658. int16_t flt_buf[PITCH_ORDER][SUBFRAME_LEN];
  1659. const int16_t *cb_tbl = adaptive_cb_gain85;
  1660. int ccr_buf[PITCH_ORDER * SUBFRAMES << 2];
  1661. int pitch_lag = p->pitch_lag[index >> 1];
  1662. int acb_lag = 1;
  1663. int acb_gain = 0;
  1664. int odd_frame = index & 1;
  1665. int iter = 3 + odd_frame;
  1666. int count = 0;
  1667. int tbl_size = 85;
  1668. int i, j, k, l, max;
  1669. int64_t temp;
  1670. if (!odd_frame) {
  1671. if (pitch_lag == PITCH_MIN)
  1672. pitch_lag++;
  1673. else
  1674. pitch_lag = FFMIN(pitch_lag, PITCH_MAX - 5);
  1675. }
  1676. for (i = 0; i < iter; i++) {
  1677. get_residual(residual, p->prev_excitation, pitch_lag + i - 1);
  1678. for (j = 0; j < SUBFRAME_LEN; j++) {
  1679. temp = 0;
  1680. for (k = 0; k <= j; k++)
  1681. temp += residual[PITCH_ORDER - 1 + k] * impulse_resp[j - k];
  1682. flt_buf[PITCH_ORDER - 1][j] = av_clipl_int32((temp << 1) +
  1683. (1 << 15)) >> 16;
  1684. }
  1685. for (j = PITCH_ORDER - 2; j >= 0; j--) {
  1686. flt_buf[j][0] = ((residual[j] << 13) + (1 << 14)) >> 15;
  1687. for (k = 1; k < SUBFRAME_LEN; k++) {
  1688. temp = (flt_buf[j + 1][k - 1] << 15) +
  1689. residual[j] * impulse_resp[k];
  1690. flt_buf[j][k] = av_clipl_int32((temp << 1) + (1 << 15)) >> 16;
  1691. }
  1692. }
  1693. /* Compute crosscorrelation with the signal */
  1694. for (j = 0; j < PITCH_ORDER; j++) {
  1695. temp = ff_dot_product(buf, flt_buf[j], SUBFRAME_LEN);
  1696. ccr_buf[count++] = av_clipl_int32(temp << 1);
  1697. }
  1698. /* Compute energies */
  1699. for (j = 0; j < PITCH_ORDER; j++) {
  1700. ccr_buf[count++] = dot_product(flt_buf[j], flt_buf[j],
  1701. SUBFRAME_LEN);
  1702. }
  1703. for (j = 1; j < PITCH_ORDER; j++) {
  1704. for (k = 0; k < j; k++) {
  1705. temp = ff_dot_product(flt_buf[j], flt_buf[k], SUBFRAME_LEN);
  1706. ccr_buf[count++] = av_clipl_int32(temp<<2);
  1707. }
  1708. }
  1709. }
  1710. /* Normalize and shorten */
  1711. max = 0;
  1712. for (i = 0; i < 20 * iter; i++)
  1713. max = FFMAX(max, FFABS(ccr_buf[i]));
  1714. temp = normalize_bits_int32(max);
  1715. for (i = 0; i < 20 * iter; i++){
  1716. ccr_buf[i] = av_clipl_int32((int64_t)(ccr_buf[i] << temp) +
  1717. (1 << 15)) >> 16;
  1718. }
  1719. max = 0;
  1720. for (i = 0; i < iter; i++) {
  1721. /* Select quantization table */
  1722. if (!odd_frame && pitch_lag + i - 1 >= SUBFRAME_LEN - 2 ||
  1723. odd_frame && pitch_lag >= SUBFRAME_LEN - 2) {
  1724. cb_tbl = adaptive_cb_gain170;
  1725. tbl_size = 170;
  1726. }
  1727. for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
  1728. temp = 0;
  1729. for (l = 0; l < 20; l++)
  1730. temp += ccr_buf[20 * i + l] * cb_tbl[k + l];
  1731. temp = av_clipl_int32(temp);
  1732. if (temp > max) {
  1733. max = temp;
  1734. acb_gain = j;
  1735. acb_lag = i;
  1736. }
  1737. }
  1738. }
  1739. if (!odd_frame) {
  1740. pitch_lag += acb_lag - 1;
  1741. acb_lag = 1;
  1742. }
  1743. p->pitch_lag[index >> 1] = pitch_lag;
  1744. p->subframe[index].ad_cb_lag = acb_lag;
  1745. p->subframe[index].ad_cb_gain = acb_gain;
  1746. }
  1747. /**
  1748. * Subtract the adaptive codebook contribution from the input
  1749. * to obtain the residual.
  1750. *
  1751. * @param buf target vector
  1752. */
  1753. static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp,
  1754. int16_t *buf)
  1755. {
  1756. int i, j;
  1757. /* Subtract adaptive CB contribution to obtain the residual */
  1758. for (i = 0; i < SUBFRAME_LEN; i++) {
  1759. int64_t temp = buf[i] << 14;
  1760. for (j = 0; j <= i; j++)
  1761. temp -= residual[j] * impulse_resp[i - j];
  1762. buf[i] = av_clipl_int32((temp << 2) + (1 << 15)) >> 16;
  1763. }
  1764. }
  1765. /**
  1766. * Quantize the residual signal using the fixed codebook (MP-MLQ).
  1767. *
  1768. * @param optim optimized fixed codebook parameters
  1769. * @param buf excitation vector
  1770. */
  1771. static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp,
  1772. int16_t *buf, int pulse_cnt, int pitch_lag)
  1773. {
  1774. FCBParam param;
  1775. int16_t impulse_r[SUBFRAME_LEN];
  1776. int16_t temp_corr[SUBFRAME_LEN];
  1777. int16_t impulse_corr[SUBFRAME_LEN];
  1778. int ccr1[SUBFRAME_LEN];
  1779. int ccr2[SUBFRAME_LEN];
  1780. int amp, err, max, max_amp_index, min, scale, i, j, k, l;
  1781. int64_t temp;
  1782. /* Update impulse response */
  1783. memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN);
  1784. param.dirac_train = 0;
  1785. if (pitch_lag < SUBFRAME_LEN - 2) {
  1786. param.dirac_train = 1;
  1787. gen_dirac_train(impulse_r, pitch_lag);
  1788. }
  1789. for (i = 0; i < SUBFRAME_LEN; i++)
  1790. temp_corr[i] = impulse_r[i] >> 1;
  1791. /* Compute impulse response autocorrelation */
  1792. temp = dot_product(temp_corr, temp_corr, SUBFRAME_LEN);
  1793. scale = normalize_bits_int32(temp);
  1794. impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
  1795. for (i = 1; i < SUBFRAME_LEN; i++) {
  1796. temp = dot_product(temp_corr + i, temp_corr, SUBFRAME_LEN - i);
  1797. impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
  1798. }
  1799. /* Compute crosscorrelation of impulse response with residual signal */
  1800. scale -= 4;
  1801. for (i = 0; i < SUBFRAME_LEN; i++){
  1802. temp = dot_product(buf + i, impulse_r, SUBFRAME_LEN - i);
  1803. if (scale < 0)
  1804. ccr1[i] = temp >> -scale;
  1805. else
  1806. ccr1[i] = av_clipl_int32(temp << scale);
  1807. }
  1808. /* Search loop */
  1809. for (i = 0; i < GRID_SIZE; i++) {
  1810. /* Maximize the crosscorrelation */
  1811. max = 0;
  1812. for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) {
  1813. temp = FFABS(ccr1[j]);
  1814. if (temp >= max) {
  1815. max = temp;
  1816. param.pulse_pos[0] = j;
  1817. }
  1818. }
  1819. /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */
  1820. amp = max;
  1821. min = 1 << 30;
  1822. max_amp_index = GAIN_LEVELS - 2;
  1823. for (j = max_amp_index; j >= 2; j--) {
  1824. temp = av_clipl_int32((int64_t)fixed_cb_gain[j] *
  1825. impulse_corr[0] << 1);
  1826. temp = FFABS(temp - amp);
  1827. if (temp < min) {
  1828. min = temp;
  1829. max_amp_index = j;
  1830. }
  1831. }
  1832. max_amp_index--;
  1833. /* Select additional gain values */
  1834. for (j = 1; j < 5; j++) {
  1835. for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) {
  1836. temp_corr[k] = 0;
  1837. ccr2[k] = ccr1[k];
  1838. }
  1839. param.amp_index = max_amp_index + j - 2;
  1840. amp = fixed_cb_gain[param.amp_index];
  1841. param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp;
  1842. temp_corr[param.pulse_pos[0]] = 1;
  1843. for (k = 1; k < pulse_cnt; k++) {
  1844. max = -1 << 30;
  1845. for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) {
  1846. if (temp_corr[l])
  1847. continue;
  1848. temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])];
  1849. temp = av_clipl_int32((int64_t)temp *
  1850. param.pulse_sign[k - 1] << 1);
  1851. ccr2[l] -= temp;
  1852. temp = FFABS(ccr2[l]);
  1853. if (temp > max) {
  1854. max = temp;
  1855. param.pulse_pos[k] = l;
  1856. }
  1857. }
  1858. param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ?
  1859. -amp : amp;
  1860. temp_corr[param.pulse_pos[k]] = 1;
  1861. }
  1862. /* Create the error vector */
  1863. memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN);
  1864. for (k = 0; k < pulse_cnt; k++)
  1865. temp_corr[param.pulse_pos[k]] = param.pulse_sign[k];
  1866. for (k = SUBFRAME_LEN - 1; k >= 0; k--) {
  1867. temp = 0;
  1868. for (l = 0; l <= k; l++) {
  1869. int prod = av_clipl_int32((int64_t)temp_corr[l] *
  1870. impulse_r[k - l] << 1);
  1871. temp = av_clipl_int32(temp + prod);
  1872. }
  1873. temp_corr[k] = temp << 2 >> 16;
  1874. }
  1875. /* Compute square of error */
  1876. err = 0;
  1877. for (k = 0; k < SUBFRAME_LEN; k++) {
  1878. int64_t prod;
  1879. prod = av_clipl_int32((int64_t)buf[k] * temp_corr[k] << 1);
  1880. err = av_clipl_int32(err - prod);
  1881. prod = av_clipl_int32((int64_t)temp_corr[k] * temp_corr[k]);
  1882. err = av_clipl_int32(err + prod);
  1883. }
  1884. /* Minimize */
  1885. if (err < optim->min_err) {
  1886. optim->min_err = err;
  1887. optim->grid_index = i;
  1888. optim->amp_index = param.amp_index;
  1889. optim->dirac_train = param.dirac_train;
  1890. for (k = 0; k < pulse_cnt; k++) {
  1891. optim->pulse_sign[k] = param.pulse_sign[k];
  1892. optim->pulse_pos[k] = param.pulse_pos[k];
  1893. }
  1894. }
  1895. }
  1896. }
  1897. }
  1898. /**
  1899. * Encode the pulse position and gain of the current subframe.
  1900. *
  1901. * @param optim optimized fixed CB parameters
  1902. * @param buf excitation vector
  1903. */
  1904. static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim,
  1905. int16_t *buf, int pulse_cnt)
  1906. {
  1907. int i, j;
  1908. j = PULSE_MAX - pulse_cnt;
  1909. subfrm->pulse_sign = 0;
  1910. subfrm->pulse_pos = 0;
  1911. for (i = 0; i < SUBFRAME_LEN >> 1; i++) {
  1912. int val = buf[optim->grid_index + (i << 1)];
  1913. if (!val) {
  1914. subfrm->pulse_pos += combinatorial_table[j][i];
  1915. } else {
  1916. subfrm->pulse_sign <<= 1;
  1917. if (val < 0) subfrm->pulse_sign++;
  1918. j++;
  1919. if (j == PULSE_MAX) break;
  1920. }
  1921. }
  1922. subfrm->amp_index = optim->amp_index;
  1923. subfrm->grid_index = optim->grid_index;
  1924. subfrm->dirac_train = optim->dirac_train;
  1925. }
  1926. /**
  1927. * Compute the fixed codebook excitation.
  1928. *
  1929. * @param buf target vector
  1930. * @param impulse_resp impulse response of the combined filter
  1931. */
  1932. static void fcb_search(G723_1_Context *p, int16_t *impulse_resp,
  1933. int16_t *buf, int index)
  1934. {
  1935. FCBParam optim;
  1936. int pulse_cnt = pulses[index];
  1937. int i;
  1938. optim.min_err = 1 << 30;
  1939. get_fcb_param(&optim, impulse_resp, buf, pulse_cnt, SUBFRAME_LEN);
  1940. if (p->pitch_lag[index >> 1] < SUBFRAME_LEN - 2) {
  1941. get_fcb_param(&optim, impulse_resp, buf, pulse_cnt,
  1942. p->pitch_lag[index >> 1]);
  1943. }
  1944. /* Reconstruct the excitation */
  1945. memset(buf, 0, sizeof(int16_t) * SUBFRAME_LEN);
  1946. for (i = 0; i < pulse_cnt; i++)
  1947. buf[optim.pulse_pos[i]] = optim.pulse_sign[i];
  1948. pack_fcb_param(&p->subframe[index], &optim, buf, pulse_cnt);
  1949. if (optim.dirac_train)
  1950. gen_dirac_train(buf, p->pitch_lag[index >> 1]);
  1951. }
  1952. /**
  1953. * Pack the frame parameters into output bitstream.
  1954. *
  1955. * @param frame output buffer
  1956. * @param size size of the buffer
  1957. */
  1958. static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
  1959. {
  1960. PutBitContext pb;
  1961. int info_bits, i, temp;
  1962. init_put_bits(&pb, frame, size);
  1963. if (p->cur_rate == RATE_6300) {
  1964. info_bits = 0;
  1965. put_bits(&pb, 2, info_bits);
  1966. }
  1967. put_bits(&pb, 8, p->lsp_index[2]);
  1968. put_bits(&pb, 8, p->lsp_index[1]);
  1969. put_bits(&pb, 8, p->lsp_index[0]);
  1970. put_bits(&pb, 7, p->pitch_lag[0] - PITCH_MIN);
  1971. put_bits(&pb, 2, p->subframe[1].ad_cb_lag);
  1972. put_bits(&pb, 7, p->pitch_lag[1] - PITCH_MIN);
  1973. put_bits(&pb, 2, p->subframe[3].ad_cb_lag);
  1974. /* Write 12 bit combined gain */
  1975. for (i = 0; i < SUBFRAMES; i++) {
  1976. temp = p->subframe[i].ad_cb_gain * GAIN_LEVELS +
  1977. p->subframe[i].amp_index;
  1978. if (p->cur_rate == RATE_6300)
  1979. temp += p->subframe[i].dirac_train << 11;
  1980. put_bits(&pb, 12, temp);
  1981. }
  1982. put_bits(&pb, 1, p->subframe[0].grid_index);
  1983. put_bits(&pb, 1, p->subframe[1].grid_index);
  1984. put_bits(&pb, 1, p->subframe[2].grid_index);
  1985. put_bits(&pb, 1, p->subframe[3].grid_index);
  1986. if (p->cur_rate == RATE_6300) {
  1987. skip_put_bits(&pb, 1); /* reserved bit */
  1988. /* Write 13 bit combined position index */
  1989. temp = (p->subframe[0].pulse_pos >> 16) * 810 +
  1990. (p->subframe[1].pulse_pos >> 14) * 90 +
  1991. (p->subframe[2].pulse_pos >> 16) * 9 +
  1992. (p->subframe[3].pulse_pos >> 14);
  1993. put_bits(&pb, 13, temp);
  1994. put_bits(&pb, 16, p->subframe[0].pulse_pos & 0xffff);
  1995. put_bits(&pb, 14, p->subframe[1].pulse_pos & 0x3fff);
  1996. put_bits(&pb, 16, p->subframe[2].pulse_pos & 0xffff);
  1997. put_bits(&pb, 14, p->subframe[3].pulse_pos & 0x3fff);
  1998. put_bits(&pb, 6, p->subframe[0].pulse_sign);
  1999. put_bits(&pb, 5, p->subframe[1].pulse_sign);
  2000. put_bits(&pb, 6, p->subframe[2].pulse_sign);
  2001. put_bits(&pb, 5, p->subframe[3].pulse_sign);
  2002. }
  2003. flush_put_bits(&pb);
  2004. return frame_size[info_bits];
  2005. }
  2006. static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  2007. const AVFrame *frame, int *got_packet_ptr)
  2008. {
  2009. G723_1_Context *p = avctx->priv_data;
  2010. int16_t unq_lpc[LPC_ORDER * SUBFRAMES];
  2011. int16_t qnt_lpc[LPC_ORDER * SUBFRAMES];
  2012. int16_t cur_lsp[LPC_ORDER];
  2013. int16_t weighted_lpc[LPC_ORDER * SUBFRAMES << 1];
  2014. int16_t vector[FRAME_LEN + PITCH_MAX];
  2015. int offset, ret;
  2016. int16_t *in = (const int16_t *)frame->data[0];
  2017. HFParam hf[4];
  2018. int i, j;
  2019. highpass_filter(in, &p->hpf_fir_mem, &p->hpf_iir_mem);
  2020. memcpy(vector, p->prev_data, HALF_FRAME_LEN * sizeof(int16_t));
  2021. memcpy(vector + HALF_FRAME_LEN, in, FRAME_LEN * sizeof(int16_t));
  2022. comp_lpc_coeff(vector, unq_lpc);
  2023. lpc2lsp(&unq_lpc[LPC_ORDER * 3], p->prev_lsp, cur_lsp);
  2024. lsp_quantize(p->lsp_index, cur_lsp, p->prev_lsp);
  2025. /* Update memory */
  2026. memcpy(vector + LPC_ORDER, p->prev_data + SUBFRAME_LEN,
  2027. sizeof(int16_t) * SUBFRAME_LEN);
  2028. memcpy(vector + LPC_ORDER + SUBFRAME_LEN, in,
  2029. sizeof(int16_t) * (HALF_FRAME_LEN + SUBFRAME_LEN));
  2030. memcpy(p->prev_data, in + HALF_FRAME_LEN,
  2031. sizeof(int16_t) * HALF_FRAME_LEN);
  2032. memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  2033. perceptual_filter(p, weighted_lpc, unq_lpc, vector);
  2034. memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  2035. memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
  2036. memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
  2037. scale_vector(vector, vector, FRAME_LEN + PITCH_MAX);
  2038. p->pitch_lag[0] = estimate_pitch(vector, PITCH_MAX);
  2039. p->pitch_lag[1] = estimate_pitch(vector, PITCH_MAX + HALF_FRAME_LEN);
  2040. for (i = PITCH_MAX, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  2041. comp_harmonic_coeff(vector + i, p->pitch_lag[j >> 1], hf + j);
  2042. memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
  2043. memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
  2044. memcpy(p->prev_weight_sig, vector + FRAME_LEN, sizeof(int16_t) * PITCH_MAX);
  2045. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  2046. harmonic_filter(hf + j, vector + PITCH_MAX + i, in + i);
  2047. inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, 0);
  2048. lsp_interpolate(qnt_lpc, cur_lsp, p->prev_lsp);
  2049. memcpy(p->prev_lsp, cur_lsp, sizeof(int16_t) * LPC_ORDER);
  2050. offset = 0;
  2051. for (i = 0; i < SUBFRAMES; i++) {
  2052. int16_t impulse_resp[SUBFRAME_LEN];
  2053. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  2054. int16_t flt_in[SUBFRAME_LEN];
  2055. int16_t zero[LPC_ORDER], fir[LPC_ORDER], iir[LPC_ORDER];
  2056. /**
  2057. * Compute the combined impulse response of the synthesis filter,
  2058. * formant perceptual weighting filter and harmonic noise shaping filter
  2059. */
  2060. memset(zero, 0, sizeof(int16_t) * LPC_ORDER);
  2061. memset(vector, 0, sizeof(int16_t) * PITCH_MAX);
  2062. memset(flt_in, 0, sizeof(int16_t) * SUBFRAME_LEN);
  2063. flt_in[0] = 1 << 13; /* Unit impulse */
  2064. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  2065. zero, zero, flt_in, vector + PITCH_MAX, 1);
  2066. harmonic_filter(hf + i, vector + PITCH_MAX, impulse_resp);
  2067. /* Compute the combined zero input response */
  2068. flt_in[0] = 0;
  2069. memcpy(fir, p->perf_fir_mem, sizeof(int16_t) * LPC_ORDER);
  2070. memcpy(iir, p->perf_iir_mem, sizeof(int16_t) * LPC_ORDER);
  2071. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  2072. fir, iir, flt_in, vector + PITCH_MAX, 0);
  2073. memcpy(vector, p->harmonic_mem, sizeof(int16_t) * PITCH_MAX);
  2074. harmonic_noise_sub(hf + i, vector + PITCH_MAX, in);
  2075. acb_search(p, residual, impulse_resp, in, i);
  2076. gen_acb_excitation(residual, p->prev_excitation,p->pitch_lag[i >> 1],
  2077. &p->subframe[i], p->cur_rate);
  2078. sub_acb_contrib(residual, impulse_resp, in);
  2079. fcb_search(p, impulse_resp, in, i);
  2080. /* Reconstruct the excitation */
  2081. gen_acb_excitation(impulse_resp, p->prev_excitation, p->pitch_lag[i >> 1],
  2082. &p->subframe[i], RATE_6300);
  2083. memmove(p->prev_excitation, p->prev_excitation + SUBFRAME_LEN,
  2084. sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
  2085. for (j = 0; j < SUBFRAME_LEN; j++)
  2086. in[j] = av_clip_int16((in[j] << 1) + impulse_resp[j]);
  2087. memcpy(p->prev_excitation + PITCH_MAX - SUBFRAME_LEN, in,
  2088. sizeof(int16_t) * SUBFRAME_LEN);
  2089. /* Update filter memories */
  2090. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  2091. p->perf_fir_mem, p->perf_iir_mem,
  2092. in, vector + PITCH_MAX, 0);
  2093. memmove(p->harmonic_mem, p->harmonic_mem + SUBFRAME_LEN,
  2094. sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
  2095. memcpy(p->harmonic_mem + PITCH_MAX - SUBFRAME_LEN, vector + PITCH_MAX,
  2096. sizeof(int16_t) * SUBFRAME_LEN);
  2097. in += SUBFRAME_LEN;
  2098. offset += LPC_ORDER;
  2099. }
  2100. if ((ret = ff_alloc_packet2(avctx, avpkt, 24)))
  2101. return ret;
  2102. *got_packet_ptr = 1;
  2103. avpkt->size = pack_bitstream(p, avpkt->data, avpkt->size);
  2104. return 0;
  2105. }
  2106. AVCodec ff_g723_1_encoder = {
  2107. .name = "g723_1",
  2108. .type = AVMEDIA_TYPE_AUDIO,
  2109. .id = AV_CODEC_ID_G723_1,
  2110. .priv_data_size = sizeof(G723_1_Context),
  2111. .init = g723_1_encode_init,
  2112. .encode2 = g723_1_encode_frame,
  2113. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  2114. .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
  2115. AV_SAMPLE_FMT_NONE},
  2116. };
  2117. #endif