You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2291 lines
72KB

  1. /*
  2. * G.723.1 compatible decoder
  3. * Copyright (c) 2006 Benjamin Larsson
  4. * Copyright (c) 2010 Mohamed Naufal Basheer
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * G.723.1 compatible decoder
  25. */
  26. #define BITSTREAM_READER_LE
  27. #include "libavutil/audioconvert.h"
  28. #include "libavutil/lzo.h"
  29. #include "libavutil/opt.h"
  30. #include "avcodec.h"
  31. #include "internal.h"
  32. #include "get_bits.h"
  33. #include "acelp_vectors.h"
  34. #include "celp_filters.h"
  35. #include "celp_math.h"
  36. #include "lsp.h"
  37. #include "g723_1_data.h"
  38. typedef struct g723_1_context {
  39. AVClass *class;
  40. AVFrame frame;
  41. G723_1_Subframe subframe[4];
  42. enum FrameType cur_frame_type;
  43. enum FrameType past_frame_type;
  44. enum Rate cur_rate;
  45. uint8_t lsp_index[LSP_BANDS];
  46. int pitch_lag[2];
  47. int erased_frames;
  48. int16_t prev_lsp[LPC_ORDER];
  49. int16_t prev_excitation[PITCH_MAX];
  50. int16_t excitation[PITCH_MAX + FRAME_LEN + 4];
  51. int16_t synth_mem[LPC_ORDER];
  52. int16_t fir_mem[LPC_ORDER];
  53. int iir_mem[LPC_ORDER];
  54. int random_seed;
  55. int interp_index;
  56. int interp_gain;
  57. int sid_gain;
  58. int cur_gain;
  59. int reflection_coef;
  60. int pf_gain; ///< formant postfilter
  61. ///< gain scaling unit memory
  62. int postfilter;
  63. int16_t audio[FRAME_LEN + LPC_ORDER];
  64. int16_t prev_data[HALF_FRAME_LEN];
  65. int16_t prev_weight_sig[PITCH_MAX];
  66. int16_t hpf_fir_mem; ///< highpass filter fir
  67. int hpf_iir_mem; ///< and iir memories
  68. int16_t perf_fir_mem[LPC_ORDER]; ///< perceptual filter fir
  69. int16_t perf_iir_mem[LPC_ORDER]; ///< and iir memories
  70. int16_t harmonic_mem[PITCH_MAX];
  71. } G723_1_Context;
  72. static av_cold int g723_1_decode_init(AVCodecContext *avctx)
  73. {
  74. G723_1_Context *p = avctx->priv_data;
  75. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  76. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  77. avctx->channels = 1;
  78. p->pf_gain = 1 << 12;
  79. avcodec_get_frame_defaults(&p->frame);
  80. avctx->coded_frame = &p->frame;
  81. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  82. return 0;
  83. }
  84. /**
  85. * Unpack the frame into parameters.
  86. *
  87. * @param p the context
  88. * @param buf pointer to the input buffer
  89. * @param buf_size size of the input buffer
  90. */
  91. static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf,
  92. int buf_size)
  93. {
  94. GetBitContext gb;
  95. int ad_cb_len;
  96. int temp, info_bits, i;
  97. init_get_bits(&gb, buf, buf_size * 8);
  98. /* Extract frame type and rate info */
  99. info_bits = get_bits(&gb, 2);
  100. if (info_bits == 3) {
  101. p->cur_frame_type = UNTRANSMITTED_FRAME;
  102. return 0;
  103. }
  104. /* Extract 24 bit lsp indices, 8 bit for each band */
  105. p->lsp_index[2] = get_bits(&gb, 8);
  106. p->lsp_index[1] = get_bits(&gb, 8);
  107. p->lsp_index[0] = get_bits(&gb, 8);
  108. if (info_bits == 2) {
  109. p->cur_frame_type = SID_FRAME;
  110. p->subframe[0].amp_index = get_bits(&gb, 6);
  111. return 0;
  112. }
  113. /* Extract the info common to both rates */
  114. p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
  115. p->cur_frame_type = ACTIVE_FRAME;
  116. p->pitch_lag[0] = get_bits(&gb, 7);
  117. if (p->pitch_lag[0] > 123) /* test if forbidden code */
  118. return -1;
  119. p->pitch_lag[0] += PITCH_MIN;
  120. p->subframe[1].ad_cb_lag = get_bits(&gb, 2);
  121. p->pitch_lag[1] = get_bits(&gb, 7);
  122. if (p->pitch_lag[1] > 123)
  123. return -1;
  124. p->pitch_lag[1] += PITCH_MIN;
  125. p->subframe[3].ad_cb_lag = get_bits(&gb, 2);
  126. p->subframe[0].ad_cb_lag = 1;
  127. p->subframe[2].ad_cb_lag = 1;
  128. for (i = 0; i < SUBFRAMES; i++) {
  129. /* Extract combined gain */
  130. temp = get_bits(&gb, 12);
  131. ad_cb_len = 170;
  132. p->subframe[i].dirac_train = 0;
  133. if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
  134. p->subframe[i].dirac_train = temp >> 11;
  135. temp &= 0x7FF;
  136. ad_cb_len = 85;
  137. }
  138. p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
  139. if (p->subframe[i].ad_cb_gain < ad_cb_len) {
  140. p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
  141. GAIN_LEVELS;
  142. } else {
  143. return -1;
  144. }
  145. }
  146. p->subframe[0].grid_index = get_bits1(&gb);
  147. p->subframe[1].grid_index = get_bits1(&gb);
  148. p->subframe[2].grid_index = get_bits1(&gb);
  149. p->subframe[3].grid_index = get_bits1(&gb);
  150. if (p->cur_rate == RATE_6300) {
  151. skip_bits1(&gb); /* skip reserved bit */
  152. /* Compute pulse_pos index using the 13-bit combined position index */
  153. temp = get_bits(&gb, 13);
  154. p->subframe[0].pulse_pos = temp / 810;
  155. temp -= p->subframe[0].pulse_pos * 810;
  156. p->subframe[1].pulse_pos = FASTDIV(temp, 90);
  157. temp -= p->subframe[1].pulse_pos * 90;
  158. p->subframe[2].pulse_pos = FASTDIV(temp, 9);
  159. p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
  160. p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
  161. get_bits(&gb, 16);
  162. p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
  163. get_bits(&gb, 14);
  164. p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
  165. get_bits(&gb, 16);
  166. p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
  167. get_bits(&gb, 14);
  168. p->subframe[0].pulse_sign = get_bits(&gb, 6);
  169. p->subframe[1].pulse_sign = get_bits(&gb, 5);
  170. p->subframe[2].pulse_sign = get_bits(&gb, 6);
  171. p->subframe[3].pulse_sign = get_bits(&gb, 5);
  172. } else { /* 5300 bps */
  173. p->subframe[0].pulse_pos = get_bits(&gb, 12);
  174. p->subframe[1].pulse_pos = get_bits(&gb, 12);
  175. p->subframe[2].pulse_pos = get_bits(&gb, 12);
  176. p->subframe[3].pulse_pos = get_bits(&gb, 12);
  177. p->subframe[0].pulse_sign = get_bits(&gb, 4);
  178. p->subframe[1].pulse_sign = get_bits(&gb, 4);
  179. p->subframe[2].pulse_sign = get_bits(&gb, 4);
  180. p->subframe[3].pulse_sign = get_bits(&gb, 4);
  181. }
  182. return 0;
  183. }
  184. /**
  185. * Bitexact implementation of sqrt(val/2).
  186. */
  187. static int16_t square_root(int val)
  188. {
  189. return (ff_sqrt(val << 1) >> 1) & (~1);
  190. }
  191. /**
  192. * Calculate the number of left-shifts required for normalizing the input.
  193. *
  194. * @param num input number
  195. * @param width width of the input, 15 or 31 bits
  196. */
  197. static int normalize_bits(int num, int width)
  198. {
  199. int i = 0;
  200. if (num) {
  201. if (num == -1)
  202. return width;
  203. if (num < 0)
  204. num = ~num;
  205. i= width - av_log2(num) - 1;
  206. i= FFMAX(i, 0);
  207. }
  208. return i;
  209. }
  210. #define normalize_bits_int16(num) normalize_bits(num, 15)
  211. #define normalize_bits_int32(num) normalize_bits(num, 31)
  212. #define dot_product(a,b,c,d) (ff_dot_product(a,b,c)<<(d))
  213. /**
  214. * Scale vector contents based on the largest of their absolutes.
  215. */
  216. static int scale_vector(int16_t *vector, int length)
  217. {
  218. int bits, scale, max = 0;
  219. int i;
  220. const int16_t shift_table[16] = {
  221. 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
  222. 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x7fff
  223. };
  224. for (i = 0; i < length; i++)
  225. max = FFMAX(max, FFABS(vector[i]));
  226. max = FFMIN(max, 0x7FFF);
  227. bits = normalize_bits(max, 15);
  228. scale = shift_table[bits];
  229. for (i = 0; i < length; i++) {
  230. av_assert2(av_clipl_int32(vector[i] * (int64_t)scale << 1) == vector[i] * (int64_t)scale << 1);
  231. vector[i] = (vector[i] * scale) >> 3;
  232. }
  233. return bits - 3;
  234. }
  235. /**
  236. * Perform inverse quantization of LSP frequencies.
  237. *
  238. * @param cur_lsp the current LSP vector
  239. * @param prev_lsp the previous LSP vector
  240. * @param lsp_index VQ indices
  241. * @param bad_frame bad frame flag
  242. */
  243. static void inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp,
  244. uint8_t *lsp_index, int bad_frame)
  245. {
  246. int min_dist, pred;
  247. int i, j, temp, stable;
  248. /* Check for frame erasure */
  249. if (!bad_frame) {
  250. min_dist = 0x100;
  251. pred = 12288;
  252. } else {
  253. min_dist = 0x200;
  254. pred = 23552;
  255. lsp_index[0] = lsp_index[1] = lsp_index[2] = 0;
  256. }
  257. /* Get the VQ table entry corresponding to the transmitted index */
  258. cur_lsp[0] = lsp_band0[lsp_index[0]][0];
  259. cur_lsp[1] = lsp_band0[lsp_index[0]][1];
  260. cur_lsp[2] = lsp_band0[lsp_index[0]][2];
  261. cur_lsp[3] = lsp_band1[lsp_index[1]][0];
  262. cur_lsp[4] = lsp_band1[lsp_index[1]][1];
  263. cur_lsp[5] = lsp_band1[lsp_index[1]][2];
  264. cur_lsp[6] = lsp_band2[lsp_index[2]][0];
  265. cur_lsp[7] = lsp_band2[lsp_index[2]][1];
  266. cur_lsp[8] = lsp_band2[lsp_index[2]][2];
  267. cur_lsp[9] = lsp_band2[lsp_index[2]][3];
  268. /* Add predicted vector & DC component to the previously quantized vector */
  269. for (i = 0; i < LPC_ORDER; i++) {
  270. temp = ((prev_lsp[i] - dc_lsp[i]) * pred + (1 << 14)) >> 15;
  271. cur_lsp[i] += dc_lsp[i] + temp;
  272. }
  273. for (i = 0; i < LPC_ORDER; i++) {
  274. cur_lsp[0] = FFMAX(cur_lsp[0], 0x180);
  275. cur_lsp[LPC_ORDER - 1] = FFMIN(cur_lsp[LPC_ORDER - 1], 0x7e00);
  276. /* Stability check */
  277. for (j = 1; j < LPC_ORDER; j++) {
  278. temp = min_dist + cur_lsp[j - 1] - cur_lsp[j];
  279. if (temp > 0) {
  280. temp >>= 1;
  281. cur_lsp[j - 1] -= temp;
  282. cur_lsp[j] += temp;
  283. }
  284. }
  285. stable = 1;
  286. for (j = 1; j < LPC_ORDER; j++) {
  287. temp = cur_lsp[j - 1] + min_dist - cur_lsp[j] - 4;
  288. if (temp > 0) {
  289. stable = 0;
  290. break;
  291. }
  292. }
  293. if (stable)
  294. break;
  295. }
  296. if (!stable)
  297. memcpy(cur_lsp, prev_lsp, LPC_ORDER * sizeof(*cur_lsp));
  298. }
  299. /**
  300. * Bitexact implementation of 2ab scaled by 1/2^16.
  301. *
  302. * @param a 32 bit multiplicand
  303. * @param b 16 bit multiplier
  304. */
  305. #define MULL2(a, b) \
  306. MULL(a,b,15)
  307. /**
  308. * Convert LSP frequencies to LPC coefficients.
  309. *
  310. * @param lpc buffer for LPC coefficients
  311. */
  312. static void lsp2lpc(int16_t *lpc)
  313. {
  314. int f1[LPC_ORDER / 2 + 1];
  315. int f2[LPC_ORDER / 2 + 1];
  316. int i, j;
  317. /* Calculate negative cosine */
  318. for (j = 0; j < LPC_ORDER; j++) {
  319. int index = lpc[j] >> 7;
  320. int offset = lpc[j] & 0x7f;
  321. int64_t temp1 = cos_tab[index] << 16;
  322. int temp2 = (cos_tab[index + 1] - cos_tab[index]) *
  323. ((offset << 8) + 0x80) << 1;
  324. lpc[j] = -(av_clipl_int32(((temp1 + temp2) << 1) + (1 << 15)) >> 16);
  325. }
  326. /*
  327. * Compute sum and difference polynomial coefficients
  328. * (bitexact alternative to lsp2poly() in lsp.c)
  329. */
  330. /* Initialize with values in Q28 */
  331. f1[0] = 1 << 28;
  332. f1[1] = (lpc[0] << 14) + (lpc[2] << 14);
  333. f1[2] = lpc[0] * lpc[2] + (2 << 28);
  334. f2[0] = 1 << 28;
  335. f2[1] = (lpc[1] << 14) + (lpc[3] << 14);
  336. f2[2] = lpc[1] * lpc[3] + (2 << 28);
  337. /*
  338. * Calculate and scale the coefficients by 1/2 in
  339. * each iteration for a final scaling factor of Q25
  340. */
  341. for (i = 2; i < LPC_ORDER / 2; i++) {
  342. f1[i + 1] = f1[i - 1] + MULL2(f1[i], lpc[2 * i]);
  343. f2[i + 1] = f2[i - 1] + MULL2(f2[i], lpc[2 * i + 1]);
  344. for (j = i; j >= 2; j--) {
  345. f1[j] = MULL2(f1[j - 1], lpc[2 * i]) +
  346. (f1[j] >> 1) + (f1[j - 2] >> 1);
  347. f2[j] = MULL2(f2[j - 1], lpc[2 * i + 1]) +
  348. (f2[j] >> 1) + (f2[j - 2] >> 1);
  349. }
  350. f1[0] >>= 1;
  351. f2[0] >>= 1;
  352. f1[1] = ((lpc[2 * i] << 16 >> i) + f1[1]) >> 1;
  353. f2[1] = ((lpc[2 * i + 1] << 16 >> i) + f2[1]) >> 1;
  354. }
  355. /* Convert polynomial coefficients to LPC coefficients */
  356. for (i = 0; i < LPC_ORDER / 2; i++) {
  357. int64_t ff1 = f1[i + 1] + f1[i];
  358. int64_t ff2 = f2[i + 1] - f2[i];
  359. lpc[i] = av_clipl_int32(((ff1 + ff2) << 3) + (1 << 15)) >> 16;
  360. lpc[LPC_ORDER - i - 1] = av_clipl_int32(((ff1 - ff2) << 3) +
  361. (1 << 15)) >> 16;
  362. }
  363. }
  364. /**
  365. * Quantize LSP frequencies by interpolation and convert them to
  366. * the corresponding LPC coefficients.
  367. *
  368. * @param lpc buffer for LPC coefficients
  369. * @param cur_lsp the current LSP vector
  370. * @param prev_lsp the previous LSP vector
  371. */
  372. static void lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
  373. {
  374. int i;
  375. int16_t *lpc_ptr = lpc;
  376. /* cur_lsp * 0.25 + prev_lsp * 0.75 */
  377. ff_acelp_weighted_vector_sum(lpc, cur_lsp, prev_lsp,
  378. 4096, 12288, 1 << 13, 14, LPC_ORDER);
  379. ff_acelp_weighted_vector_sum(lpc + LPC_ORDER, cur_lsp, prev_lsp,
  380. 8192, 8192, 1 << 13, 14, LPC_ORDER);
  381. ff_acelp_weighted_vector_sum(lpc + 2 * LPC_ORDER, cur_lsp, prev_lsp,
  382. 12288, 4096, 1 << 13, 14, LPC_ORDER);
  383. memcpy(lpc + 3 * LPC_ORDER, cur_lsp, LPC_ORDER * sizeof(*lpc));
  384. for (i = 0; i < SUBFRAMES; i++) {
  385. lsp2lpc(lpc_ptr);
  386. lpc_ptr += LPC_ORDER;
  387. }
  388. }
  389. /**
  390. * Generate a train of dirac functions with period as pitch lag.
  391. */
  392. static void gen_dirac_train(int16_t *buf, int pitch_lag)
  393. {
  394. int16_t vector[SUBFRAME_LEN];
  395. int i, j;
  396. memcpy(vector, buf, SUBFRAME_LEN * sizeof(*vector));
  397. for (i = pitch_lag; i < SUBFRAME_LEN; i += pitch_lag) {
  398. for (j = 0; j < SUBFRAME_LEN - i; j++)
  399. buf[i + j] += vector[j];
  400. }
  401. }
  402. /**
  403. * Generate fixed codebook excitation vector.
  404. *
  405. * @param vector decoded excitation vector
  406. * @param subfrm current subframe
  407. * @param cur_rate current bitrate
  408. * @param pitch_lag closed loop pitch lag
  409. * @param index current subframe index
  410. */
  411. static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe subfrm,
  412. enum Rate cur_rate, int pitch_lag, int index)
  413. {
  414. int temp, i, j;
  415. memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
  416. if (cur_rate == RATE_6300) {
  417. if (subfrm.pulse_pos >= max_pos[index])
  418. return;
  419. /* Decode amplitudes and positions */
  420. j = PULSE_MAX - pulses[index];
  421. temp = subfrm.pulse_pos;
  422. for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
  423. temp -= combinatorial_table[j][i];
  424. if (temp >= 0)
  425. continue;
  426. temp += combinatorial_table[j++][i];
  427. if (subfrm.pulse_sign & (1 << (PULSE_MAX - j))) {
  428. vector[subfrm.grid_index + GRID_SIZE * i] =
  429. -fixed_cb_gain[subfrm.amp_index];
  430. } else {
  431. vector[subfrm.grid_index + GRID_SIZE * i] =
  432. fixed_cb_gain[subfrm.amp_index];
  433. }
  434. if (j == PULSE_MAX)
  435. break;
  436. }
  437. if (subfrm.dirac_train == 1)
  438. gen_dirac_train(vector, pitch_lag);
  439. } else { /* 5300 bps */
  440. int cb_gain = fixed_cb_gain[subfrm.amp_index];
  441. int cb_shift = subfrm.grid_index;
  442. int cb_sign = subfrm.pulse_sign;
  443. int cb_pos = subfrm.pulse_pos;
  444. int offset, beta, lag;
  445. for (i = 0; i < 8; i += 2) {
  446. offset = ((cb_pos & 7) << 3) + cb_shift + i;
  447. vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
  448. cb_pos >>= 3;
  449. cb_sign >>= 1;
  450. }
  451. /* Enhance harmonic components */
  452. lag = pitch_contrib[subfrm.ad_cb_gain << 1] + pitch_lag +
  453. subfrm.ad_cb_lag - 1;
  454. beta = pitch_contrib[(subfrm.ad_cb_gain << 1) + 1];
  455. if (lag < SUBFRAME_LEN - 2) {
  456. for (i = lag; i < SUBFRAME_LEN; i++)
  457. vector[i] += beta * vector[i - lag] >> 15;
  458. }
  459. }
  460. }
  461. /**
  462. * Get delayed contribution from the previous excitation vector.
  463. */
  464. static void get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
  465. {
  466. int offset = PITCH_MAX - PITCH_ORDER / 2 - lag;
  467. int i;
  468. residual[0] = prev_excitation[offset];
  469. residual[1] = prev_excitation[offset + 1];
  470. offset += 2;
  471. for (i = 2; i < SUBFRAME_LEN + PITCH_ORDER - 1; i++)
  472. residual[i] = prev_excitation[offset + (i - 2) % lag];
  473. }
  474. /**
  475. * Generate adaptive codebook excitation.
  476. */
  477. static void gen_acb_excitation(int16_t *vector, int16_t *prev_excitation,
  478. int pitch_lag, G723_1_Subframe subfrm,
  479. enum Rate cur_rate)
  480. {
  481. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  482. const int16_t *cb_ptr;
  483. int lag = pitch_lag + subfrm.ad_cb_lag - 1;
  484. int i;
  485. int64_t sum;
  486. get_residual(residual, prev_excitation, lag);
  487. /* Select quantization table */
  488. if (cur_rate == RATE_6300 && pitch_lag < SUBFRAME_LEN - 2) {
  489. cb_ptr = adaptive_cb_gain85;
  490. } else
  491. cb_ptr = adaptive_cb_gain170;
  492. /* Calculate adaptive vector */
  493. cb_ptr += subfrm.ad_cb_gain * 20;
  494. for (i = 0; i < SUBFRAME_LEN; i++) {
  495. sum = ff_dot_product(residual + i, cb_ptr, PITCH_ORDER);
  496. vector[i] = av_clipl_int32((sum << 2) + (1 << 15)) >> 16;
  497. }
  498. }
  499. /**
  500. * Estimate maximum auto-correlation around pitch lag.
  501. *
  502. * @param p the context
  503. * @param offset offset of the excitation vector
  504. * @param ccr_max pointer to the maximum auto-correlation
  505. * @param pitch_lag decoded pitch lag
  506. * @param length length of autocorrelation
  507. * @param dir forward lag(1) / backward lag(-1)
  508. */
  509. static int autocorr_max(G723_1_Context *p, int offset, int *ccr_max,
  510. int pitch_lag, int length, int dir)
  511. {
  512. int limit, ccr, lag = 0;
  513. int16_t *buf = p->excitation + offset;
  514. int i;
  515. pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
  516. if (dir > 0)
  517. limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
  518. else
  519. limit = pitch_lag + 3;
  520. for (i = pitch_lag - 3; i <= limit; i++) {
  521. ccr = ff_dot_product(buf, buf + dir * i, length)<<1;
  522. if (ccr > *ccr_max) {
  523. *ccr_max = ccr;
  524. lag = i;
  525. }
  526. }
  527. return lag;
  528. }
  529. /**
  530. * Calculate pitch postfilter optimal and scaling gains.
  531. *
  532. * @param lag pitch postfilter forward/backward lag
  533. * @param ppf pitch postfilter parameters
  534. * @param cur_rate current bitrate
  535. * @param tgt_eng target energy
  536. * @param ccr cross-correlation
  537. * @param res_eng residual energy
  538. */
  539. static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
  540. int tgt_eng, int ccr, int res_eng)
  541. {
  542. int pf_residual; /* square of postfiltered residual */
  543. int64_t temp1, temp2;
  544. ppf->index = lag;
  545. temp1 = tgt_eng * res_eng >> 1;
  546. temp2 = ccr * ccr << 1;
  547. if (temp2 > temp1) {
  548. if (ccr >= res_eng) {
  549. ppf->opt_gain = ppf_gain_weight[cur_rate];
  550. } else {
  551. ppf->opt_gain = (ccr << 15) / res_eng *
  552. ppf_gain_weight[cur_rate] >> 15;
  553. }
  554. /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
  555. temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
  556. temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
  557. pf_residual = av_clipl_int32(temp1 + temp2 + (1 << 15)) >> 16;
  558. if (tgt_eng >= pf_residual << 1) {
  559. temp1 = 0x7fff;
  560. } else {
  561. temp1 = (tgt_eng << 14) / pf_residual;
  562. }
  563. /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
  564. ppf->sc_gain = square_root(temp1 << 16);
  565. } else {
  566. ppf->opt_gain = 0;
  567. ppf->sc_gain = 0x7fff;
  568. }
  569. ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
  570. }
  571. /**
  572. * Calculate pitch postfilter parameters.
  573. *
  574. * @param p the context
  575. * @param offset offset of the excitation vector
  576. * @param pitch_lag decoded pitch lag
  577. * @param ppf pitch postfilter parameters
  578. * @param cur_rate current bitrate
  579. */
  580. static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag,
  581. PPFParam *ppf, enum Rate cur_rate)
  582. {
  583. int16_t scale;
  584. int i;
  585. int64_t temp1, temp2;
  586. /*
  587. * 0 - target energy
  588. * 1 - forward cross-correlation
  589. * 2 - forward residual energy
  590. * 3 - backward cross-correlation
  591. * 4 - backward residual energy
  592. */
  593. int energy[5] = {0, 0, 0, 0, 0};
  594. int16_t *buf = p->excitation + offset;
  595. int fwd_lag = autocorr_max(p, offset, &energy[1], pitch_lag,
  596. SUBFRAME_LEN, 1);
  597. int back_lag = autocorr_max(p, offset, &energy[3], pitch_lag,
  598. SUBFRAME_LEN, -1);
  599. ppf->index = 0;
  600. ppf->opt_gain = 0;
  601. ppf->sc_gain = 0x7fff;
  602. /* Case 0, Section 3.6 */
  603. if (!back_lag && !fwd_lag)
  604. return;
  605. /* Compute target energy */
  606. energy[0] = ff_dot_product(buf, buf, SUBFRAME_LEN)<<1;
  607. /* Compute forward residual energy */
  608. if (fwd_lag)
  609. energy[2] = ff_dot_product(buf + fwd_lag, buf + fwd_lag,
  610. SUBFRAME_LEN)<<1;
  611. /* Compute backward residual energy */
  612. if (back_lag)
  613. energy[4] = ff_dot_product(buf - back_lag, buf - back_lag,
  614. SUBFRAME_LEN)<<1;
  615. /* Normalize and shorten */
  616. temp1 = 0;
  617. for (i = 0; i < 5; i++)
  618. temp1 = FFMAX(energy[i], temp1);
  619. scale = normalize_bits(temp1, 31);
  620. for (i = 0; i < 5; i++)
  621. energy[i] = av_clipl_int32(energy[i] << scale) >> 16;
  622. if (fwd_lag && !back_lag) { /* Case 1 */
  623. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  624. energy[2]);
  625. } else if (!fwd_lag) { /* Case 2 */
  626. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  627. energy[4]);
  628. } else { /* Case 3 */
  629. /*
  630. * Select the largest of energy[1]^2/energy[2]
  631. * and energy[3]^2/energy[4]
  632. */
  633. temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
  634. temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
  635. if (temp1 >= temp2) {
  636. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  637. energy[2]);
  638. } else {
  639. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  640. energy[4]);
  641. }
  642. }
  643. }
  644. /**
  645. * Classify frames as voiced/unvoiced.
  646. *
  647. * @param p the context
  648. * @param pitch_lag decoded pitch_lag
  649. * @param exc_eng excitation energy estimation
  650. * @param scale scaling factor of exc_eng
  651. *
  652. * @return residual interpolation index if voiced, 0 otherwise
  653. */
  654. static int comp_interp_index(G723_1_Context *p, int pitch_lag,
  655. int *exc_eng, int *scale)
  656. {
  657. int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
  658. int16_t *buf = p->excitation + offset;
  659. int index, ccr, tgt_eng, best_eng, temp;
  660. *scale = scale_vector(p->excitation, FRAME_LEN + PITCH_MAX);
  661. /* Compute maximum backward cross-correlation */
  662. ccr = 0;
  663. index = autocorr_max(p, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
  664. ccr = av_clipl_int32((int64_t)ccr + (1 << 15)) >> 16;
  665. /* Compute target energy */
  666. tgt_eng = ff_dot_product(buf, buf, SUBFRAME_LEN * 2)<<1;
  667. *exc_eng = av_clipl_int32(tgt_eng + (1 << 15)) >> 16;
  668. if (ccr <= 0)
  669. return 0;
  670. /* Compute best energy */
  671. best_eng = ff_dot_product(buf - index, buf - index,
  672. SUBFRAME_LEN * 2)<<1;
  673. best_eng = av_clipl_int32((int64_t)best_eng + (1 << 15)) >> 16;
  674. temp = best_eng * *exc_eng >> 3;
  675. if (temp < ccr * ccr) {
  676. return index;
  677. } else
  678. return 0;
  679. }
  680. /**
  681. * Peform residual interpolation based on frame classification.
  682. *
  683. * @param buf decoded excitation vector
  684. * @param out output vector
  685. * @param lag decoded pitch lag
  686. * @param gain interpolated gain
  687. * @param rseed seed for random number generator
  688. */
  689. static void residual_interp(int16_t *buf, int16_t *out, int lag,
  690. int gain, int *rseed)
  691. {
  692. int i;
  693. if (lag) { /* Voiced */
  694. int16_t *vector_ptr = buf + PITCH_MAX;
  695. /* Attenuate */
  696. for (i = 0; i < lag; i++)
  697. vector_ptr[i - lag] = vector_ptr[i - lag] * 3 >> 2;
  698. av_memcpy_backptr((uint8_t*)vector_ptr, lag * sizeof(*vector_ptr),
  699. FRAME_LEN * sizeof(*vector_ptr));
  700. memcpy(out, vector_ptr, FRAME_LEN * sizeof(*vector_ptr));
  701. } else { /* Unvoiced */
  702. for (i = 0; i < FRAME_LEN; i++) {
  703. *rseed = *rseed * 521 + 259;
  704. out[i] = gain * *rseed >> 15;
  705. }
  706. memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
  707. }
  708. }
  709. /**
  710. * Perform IIR filtering.
  711. *
  712. * @param fir_coef FIR coefficients
  713. * @param iir_coef IIR coefficients
  714. * @param src source vector
  715. * @param dest destination vector
  716. * @param width width of the output, 16 bits(0) / 32 bits(1)
  717. */
  718. #define iir_filter(fir_coef, iir_coef, src, dest, width)\
  719. {\
  720. int m, n;\
  721. int res_shift = 16 & ~-(width);\
  722. int in_shift = 16 - res_shift;\
  723. \
  724. for (m = 0; m < SUBFRAME_LEN; m++) {\
  725. int64_t filter = 0;\
  726. for (n = 1; n <= LPC_ORDER; n++) {\
  727. filter -= (fir_coef)[n - 1] * (src)[m - n] -\
  728. (iir_coef)[n - 1] * ((dest)[m - n] >> in_shift);\
  729. }\
  730. \
  731. (dest)[m] = av_clipl_int32(((src)[m] << 16) + (filter << 3) +\
  732. (1 << 15)) >> res_shift;\
  733. }\
  734. }
  735. /**
  736. * Adjust gain of postfiltered signal.
  737. *
  738. * @param p the context
  739. * @param buf postfiltered output vector
  740. * @param energy input energy coefficient
  741. */
  742. static void gain_scale(G723_1_Context *p, int16_t * buf, int energy)
  743. {
  744. int num, denom, gain, bits1, bits2;
  745. int i;
  746. num = energy;
  747. denom = 0;
  748. for (i = 0; i < SUBFRAME_LEN; i++) {
  749. int64_t temp = buf[i] >> 2;
  750. temp = av_clipl_int32(MUL64(temp, temp) << 1);
  751. denom = av_clipl_int32(denom + temp);
  752. }
  753. if (num && denom) {
  754. bits1 = normalize_bits(num, 31);
  755. bits2 = normalize_bits(denom, 31);
  756. num = num << bits1 >> 1;
  757. denom <<= bits2;
  758. bits2 = 5 + bits1 - bits2;
  759. bits2 = FFMAX(0, bits2);
  760. gain = (num >> 1) / (denom >> 16);
  761. gain = square_root(gain << 16 >> bits2);
  762. } else {
  763. gain = 1 << 12;
  764. }
  765. for (i = 0; i < SUBFRAME_LEN; i++) {
  766. p->pf_gain = ((p->pf_gain << 4) - p->pf_gain + gain + (1 << 3)) >> 4;
  767. buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
  768. (1 << 10)) >> 11);
  769. }
  770. }
  771. /**
  772. * Perform formant filtering.
  773. *
  774. * @param p the context
  775. * @param lpc quantized lpc coefficients
  776. * @param buf output buffer
  777. */
  778. static void formant_postfilter(G723_1_Context *p, int16_t *lpc, int16_t *buf)
  779. {
  780. int16_t filter_coef[2][LPC_ORDER], *buf_ptr;
  781. int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
  782. int i, j, k;
  783. memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
  784. memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
  785. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  786. for (k = 0; k < LPC_ORDER; k++) {
  787. filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
  788. (1 << 14)) >> 15;
  789. filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
  790. (1 << 14)) >> 15;
  791. }
  792. iir_filter(filter_coef[0], filter_coef[1], buf + i,
  793. filter_signal + i, 1);
  794. lpc += LPC_ORDER;
  795. }
  796. memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(int16_t));
  797. memcpy(p->iir_mem, filter_signal + FRAME_LEN, LPC_ORDER * sizeof(int));
  798. buf_ptr = buf + LPC_ORDER;
  799. signal_ptr = filter_signal + LPC_ORDER;
  800. for (i = 0; i < SUBFRAMES; i++) {
  801. int16_t temp_vector[SUBFRAME_LEN];
  802. int16_t temp;
  803. int auto_corr[2];
  804. int scale, energy;
  805. /* Normalize */
  806. memcpy(temp_vector, buf_ptr, SUBFRAME_LEN * sizeof(*temp_vector));
  807. scale = scale_vector(temp_vector, SUBFRAME_LEN);
  808. /* Compute auto correlation coefficients */
  809. auto_corr[0] = ff_dot_product(temp_vector, temp_vector + 1,
  810. SUBFRAME_LEN - 1)<<1;
  811. auto_corr[1] = ff_dot_product(temp_vector, temp_vector,
  812. SUBFRAME_LEN)<<1;
  813. /* Compute reflection coefficient */
  814. temp = auto_corr[1] >> 16;
  815. if (temp) {
  816. temp = (auto_corr[0] >> 2) / temp;
  817. }
  818. p->reflection_coef = ((p->reflection_coef << 2) - p->reflection_coef +
  819. temp + 2) >> 2;
  820. temp = (p->reflection_coef * 0xffffc >> 3) & 0xfffc;
  821. /* Compensation filter */
  822. for (j = 0; j < SUBFRAME_LEN; j++) {
  823. buf_ptr[j] = av_clipl_int32(signal_ptr[j] +
  824. ((signal_ptr[j - 1] >> 16) *
  825. temp << 1)) >> 16;
  826. }
  827. /* Compute normalized signal energy */
  828. temp = 2 * scale + 4;
  829. if (temp < 0) {
  830. energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
  831. } else
  832. energy = auto_corr[1] >> temp;
  833. gain_scale(p, buf_ptr, energy);
  834. buf_ptr += SUBFRAME_LEN;
  835. signal_ptr += SUBFRAME_LEN;
  836. }
  837. }
  838. static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
  839. int *got_frame_ptr, AVPacket *avpkt)
  840. {
  841. G723_1_Context *p = avctx->priv_data;
  842. const uint8_t *buf = avpkt->data;
  843. int buf_size = avpkt->size;
  844. int dec_mode = buf[0] & 3;
  845. PPFParam ppf[SUBFRAMES];
  846. int16_t cur_lsp[LPC_ORDER];
  847. int16_t lpc[SUBFRAMES * LPC_ORDER];
  848. int16_t acb_vector[SUBFRAME_LEN];
  849. int16_t *vector_ptr;
  850. int16_t *out;
  851. int bad_frame = 0, i, j, ret;
  852. if (buf_size < frame_size[dec_mode]) {
  853. if (buf_size)
  854. av_log(avctx, AV_LOG_WARNING,
  855. "Expected %d bytes, got %d - skipping packet\n",
  856. frame_size[dec_mode], buf_size);
  857. *got_frame_ptr = 0;
  858. return buf_size;
  859. }
  860. if (unpack_bitstream(p, buf, buf_size) < 0) {
  861. bad_frame = 1;
  862. if (p->past_frame_type == ACTIVE_FRAME)
  863. p->cur_frame_type = ACTIVE_FRAME;
  864. else
  865. p->cur_frame_type = UNTRANSMITTED_FRAME;
  866. }
  867. p->frame.nb_samples = FRAME_LEN;
  868. if ((ret = avctx->get_buffer(avctx, &p->frame)) < 0) {
  869. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  870. return ret;
  871. }
  872. out = (int16_t *)p->frame.data[0];
  873. if (p->cur_frame_type == ACTIVE_FRAME) {
  874. if (!bad_frame)
  875. p->erased_frames = 0;
  876. else if (p->erased_frames != 3)
  877. p->erased_frames++;
  878. inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
  879. lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
  880. /* Save the lsp_vector for the next frame */
  881. memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  882. /* Generate the excitation for the frame */
  883. memcpy(p->excitation, p->prev_excitation,
  884. PITCH_MAX * sizeof(*p->excitation));
  885. vector_ptr = p->excitation + PITCH_MAX;
  886. if (!p->erased_frames) {
  887. /* Update interpolation gain memory */
  888. p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
  889. p->subframe[3].amp_index) >> 1];
  890. for (i = 0; i < SUBFRAMES; i++) {
  891. gen_fcb_excitation(vector_ptr, p->subframe[i], p->cur_rate,
  892. p->pitch_lag[i >> 1], i);
  893. gen_acb_excitation(acb_vector, &p->excitation[SUBFRAME_LEN * i],
  894. p->pitch_lag[i >> 1], p->subframe[i],
  895. p->cur_rate);
  896. /* Get the total excitation */
  897. for (j = 0; j < SUBFRAME_LEN; j++) {
  898. vector_ptr[j] = av_clip_int16(vector_ptr[j] << 1);
  899. vector_ptr[j] = av_clip_int16(vector_ptr[j] +
  900. acb_vector[j]);
  901. }
  902. vector_ptr += SUBFRAME_LEN;
  903. }
  904. vector_ptr = p->excitation + PITCH_MAX;
  905. /* Save the excitation */
  906. memcpy(p->audio + LPC_ORDER, vector_ptr, FRAME_LEN * sizeof(*p->audio));
  907. p->interp_index = comp_interp_index(p, p->pitch_lag[1],
  908. &p->sid_gain, &p->cur_gain);
  909. if (p->postfilter) {
  910. i = PITCH_MAX;
  911. for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  912. comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
  913. ppf + j, p->cur_rate);
  914. }
  915. /* Restore the original excitation */
  916. memcpy(p->excitation, p->prev_excitation,
  917. PITCH_MAX * sizeof(*p->excitation));
  918. memcpy(vector_ptr, p->audio + LPC_ORDER, FRAME_LEN * sizeof(*vector_ptr));
  919. /* Peform pitch postfiltering */
  920. if (p->postfilter)
  921. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  922. ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
  923. vector_ptr + i,
  924. vector_ptr + i + ppf[j].index,
  925. ppf[j].sc_gain,
  926. ppf[j].opt_gain,
  927. 1 << 14, 15, SUBFRAME_LEN);
  928. } else {
  929. p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
  930. if (p->erased_frames == 3) {
  931. /* Mute output */
  932. memset(p->excitation, 0,
  933. (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
  934. memset(p->frame.data[0], 0,
  935. (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
  936. } else {
  937. /* Regenerate frame */
  938. residual_interp(p->excitation, p->audio + LPC_ORDER, p->interp_index,
  939. p->interp_gain, &p->random_seed);
  940. }
  941. }
  942. /* Save the excitation for the next frame */
  943. memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
  944. PITCH_MAX * sizeof(*p->excitation));
  945. } else {
  946. memset(out, 0, FRAME_LEN * 2);
  947. av_log(avctx, AV_LOG_WARNING,
  948. "G.723.1: Comfort noise generation not supported yet\n");
  949. *got_frame_ptr = 1;
  950. *(AVFrame *)data = p->frame;
  951. return frame_size[dec_mode];
  952. }
  953. p->past_frame_type = p->cur_frame_type;
  954. memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
  955. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  956. ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
  957. p->audio + i, SUBFRAME_LEN, LPC_ORDER,
  958. 0, 1, 1 << 12);
  959. memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
  960. if (p->postfilter) {
  961. formant_postfilter(p, lpc, p->audio);
  962. memcpy(p->frame.data[0], p->audio + LPC_ORDER, FRAME_LEN * 2);
  963. } else { // if output is not postfiltered it should be scaled by 2
  964. for (i = 0; i < FRAME_LEN; i++)
  965. out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
  966. }
  967. *got_frame_ptr = 1;
  968. *(AVFrame *)data = p->frame;
  969. return frame_size[dec_mode];
  970. }
  971. #define OFFSET(x) offsetof(G723_1_Context, x)
  972. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  973. static const AVOption options[] = {
  974. { "postfilter", "postfilter on/off", OFFSET(postfilter), AV_OPT_TYPE_INT,
  975. { 1 }, 0, 1, AD },
  976. { NULL }
  977. };
  978. static const AVClass g723_1dec_class = {
  979. .class_name = "G.723.1 decoder",
  980. .item_name = av_default_item_name,
  981. .option = options,
  982. .version = LIBAVUTIL_VERSION_INT,
  983. };
  984. AVCodec ff_g723_1_decoder = {
  985. .name = "g723_1",
  986. .type = AVMEDIA_TYPE_AUDIO,
  987. .id = AV_CODEC_ID_G723_1,
  988. .priv_data_size = sizeof(G723_1_Context),
  989. .init = g723_1_decode_init,
  990. .decode = g723_1_decode_frame,
  991. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  992. .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
  993. .priv_class = &g723_1dec_class,
  994. };
  995. #if CONFIG_G723_1_ENCODER
  996. #define BITSTREAM_WRITER_LE
  997. #include "put_bits.h"
  998. static av_cold int g723_1_encode_init(AVCodecContext *avctx)
  999. {
  1000. G723_1_Context *p = avctx->priv_data;
  1001. if (avctx->sample_rate != 8000) {
  1002. av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n");
  1003. return -1;
  1004. }
  1005. if (avctx->channels != 1) {
  1006. av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
  1007. return AVERROR(EINVAL);
  1008. }
  1009. if (avctx->bit_rate == 6300) {
  1010. p->cur_rate = RATE_6300;
  1011. } else if (avctx->bit_rate == 5300) {
  1012. av_log(avctx, AV_LOG_ERROR, "Bitrate not supported yet, use 6.3k\n");
  1013. return AVERROR_PATCHWELCOME;
  1014. } else {
  1015. av_log(avctx, AV_LOG_ERROR,
  1016. "Bitrate not supported, use 6.3k\n");
  1017. return AVERROR(EINVAL);
  1018. }
  1019. avctx->frame_size = 240;
  1020. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(int16_t));
  1021. return 0;
  1022. }
  1023. /**
  1024. * Remove DC component from the input signal.
  1025. *
  1026. * @param buf input signal
  1027. * @param fir zero memory
  1028. * @param iir pole memory
  1029. */
  1030. static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
  1031. {
  1032. int i;
  1033. for (i = 0; i < FRAME_LEN; i++) {
  1034. *iir = (buf[i] << 15) + ((-*fir) << 15) + MULL2(*iir, 0x7f00);
  1035. *fir = buf[i];
  1036. buf[i] = av_clipl_int32((int64_t)*iir + (1 << 15)) >> 16;
  1037. }
  1038. }
  1039. /**
  1040. * Estimate autocorrelation of the input vector.
  1041. *
  1042. * @param buf input buffer
  1043. * @param autocorr autocorrelation coefficients vector
  1044. */
  1045. static void comp_autocorr(int16_t *buf, int16_t *autocorr)
  1046. {
  1047. int i, scale, temp;
  1048. int16_t vector[LPC_FRAME];
  1049. memcpy(vector, buf, LPC_FRAME * sizeof(int16_t));
  1050. scale_vector(vector, LPC_FRAME);
  1051. /* Apply the Hamming window */
  1052. for (i = 0; i < LPC_FRAME; i++)
  1053. vector[i] = (vector[i] * hamming_window[i] + (1 << 14)) >> 15;
  1054. /* Compute the first autocorrelation coefficient */
  1055. temp = dot_product(vector, vector, LPC_FRAME, 0);
  1056. /* Apply a white noise correlation factor of (1025/1024) */
  1057. temp += temp >> 10;
  1058. /* Normalize */
  1059. scale = normalize_bits_int32(temp);
  1060. autocorr[0] = av_clipl_int32((int64_t)(temp << scale) +
  1061. (1 << 15)) >> 16;
  1062. /* Compute the remaining coefficients */
  1063. if (!autocorr[0]) {
  1064. memset(autocorr + 1, 0, LPC_ORDER * sizeof(int16_t));
  1065. } else {
  1066. for (i = 1; i <= LPC_ORDER; i++) {
  1067. temp = dot_product(vector, vector + i, LPC_FRAME - i, 0);
  1068. temp = MULL2((temp << scale), binomial_window[i - 1]);
  1069. autocorr[i] = av_clipl_int32((int64_t)temp + (1 << 15)) >> 16;
  1070. }
  1071. }
  1072. }
  1073. /**
  1074. * Use Levinson-Durbin recursion to compute LPC coefficients from
  1075. * autocorrelation values.
  1076. *
  1077. * @param lpc LPC coefficients vector
  1078. * @param autocorr autocorrelation coefficients vector
  1079. * @param error prediction error
  1080. */
  1081. static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
  1082. {
  1083. int16_t vector[LPC_ORDER];
  1084. int16_t partial_corr;
  1085. int i, j, temp;
  1086. memset(lpc, 0, LPC_ORDER * sizeof(int16_t));
  1087. for (i = 0; i < LPC_ORDER; i++) {
  1088. /* Compute the partial correlation coefficient */
  1089. temp = 0;
  1090. for (j = 0; j < i; j++)
  1091. temp -= lpc[j] * autocorr[i - j - 1];
  1092. temp = ((autocorr[i] << 13) + temp) << 3;
  1093. if (FFABS(temp) >= (error << 16))
  1094. break;
  1095. partial_corr = temp / (error << 1);
  1096. lpc[i] = av_clipl_int32((int64_t)(partial_corr << 14) +
  1097. (1 << 15)) >> 16;
  1098. /* Update the prediction error */
  1099. temp = MULL2(temp, partial_corr);
  1100. error = av_clipl_int32((int64_t)(error << 16) - temp +
  1101. (1 << 15)) >> 16;
  1102. memcpy(vector, lpc, i * sizeof(int16_t));
  1103. for (j = 0; j < i; j++) {
  1104. temp = partial_corr * vector[i - j - 1] << 1;
  1105. lpc[j] = av_clipl_int32((int64_t)(lpc[j] << 16) - temp +
  1106. (1 << 15)) >> 16;
  1107. }
  1108. }
  1109. }
  1110. /**
  1111. * Calculate LPC coefficients for the current frame.
  1112. *
  1113. * @param buf current frame
  1114. * @param prev_data 2 trailing subframes of the previous frame
  1115. * @param lpc LPC coefficients vector
  1116. */
  1117. static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
  1118. {
  1119. int16_t autocorr[(LPC_ORDER + 1) * SUBFRAMES];
  1120. int16_t *autocorr_ptr = autocorr;
  1121. int16_t *lpc_ptr = lpc;
  1122. int i, j;
  1123. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  1124. comp_autocorr(buf + i, autocorr_ptr);
  1125. levinson_durbin(lpc_ptr, autocorr_ptr + 1, autocorr_ptr[0]);
  1126. lpc_ptr += LPC_ORDER;
  1127. autocorr_ptr += LPC_ORDER + 1;
  1128. }
  1129. }
  1130. static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
  1131. {
  1132. int f[LPC_ORDER + 2]; ///< coefficients of the sum and difference
  1133. ///< polynomials (F1, F2) ordered as
  1134. ///< f1[0], f2[0], ...., f1[5], f2[5]
  1135. int max, shift, cur_val, prev_val, count, p;
  1136. int i, j;
  1137. int64_t temp;
  1138. /* Initialize f1[0] and f2[0] to 1 in Q25 */
  1139. for (i = 0; i < LPC_ORDER; i++)
  1140. lsp[i] = (lpc[i] * bandwidth_expand[i] + (1 << 14)) >> 15;
  1141. /* Apply bandwidth expansion on the LPC coefficients */
  1142. f[0] = f[1] = 1 << 25;
  1143. /* Compute the remaining coefficients */
  1144. for (i = 0; i < LPC_ORDER / 2; i++) {
  1145. /* f1 */
  1146. f[2 * i + 2] = -f[2 * i] - ((lsp[i] + lsp[LPC_ORDER - 1 - i]) << 12);
  1147. /* f2 */
  1148. f[2 * i + 3] = f[2 * i + 1] - ((lsp[i] - lsp[LPC_ORDER - 1 - i]) << 12);
  1149. }
  1150. /* Divide f1[5] and f2[5] by 2 for use in polynomial evaluation */
  1151. f[LPC_ORDER] >>= 1;
  1152. f[LPC_ORDER + 1] >>= 1;
  1153. /* Normalize and shorten */
  1154. max = FFABS(f[0]);
  1155. for (i = 1; i < LPC_ORDER + 2; i++)
  1156. max = FFMAX(max, FFABS(f[i]));
  1157. shift = normalize_bits_int32(max);
  1158. for (i = 0; i < LPC_ORDER + 2; i++)
  1159. f[i] = av_clipl_int32((int64_t)(f[i] << shift) + (1 << 15)) >> 16;
  1160. /**
  1161. * Evaluate F1 and F2 at uniform intervals of pi/256 along the
  1162. * unit circle and check for zero crossings.
  1163. */
  1164. p = 0;
  1165. temp = 0;
  1166. for (i = 0; i <= LPC_ORDER / 2; i++)
  1167. temp += f[2 * i] * cos_tab[0];
  1168. prev_val = av_clipl_int32(temp << 1);
  1169. count = 0;
  1170. for ( i = 1; i < COS_TBL_SIZE / 2; i++) {
  1171. /* Evaluate */
  1172. temp = 0;
  1173. for (j = 0; j <= LPC_ORDER / 2; j++)
  1174. temp += f[LPC_ORDER - 2 * j + p] * cos_tab[i * j % COS_TBL_SIZE];
  1175. cur_val = av_clipl_int32(temp << 1);
  1176. /* Check for sign change, indicating a zero crossing */
  1177. if ((cur_val ^ prev_val) < 0) {
  1178. int abs_cur = FFABS(cur_val);
  1179. int abs_prev = FFABS(prev_val);
  1180. int sum = abs_cur + abs_prev;
  1181. shift = normalize_bits_int32(sum);
  1182. sum <<= shift;
  1183. abs_prev = abs_prev << shift >> 8;
  1184. lsp[count++] = ((i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
  1185. if (count == LPC_ORDER)
  1186. break;
  1187. /* Switch between sum and difference polynomials */
  1188. p ^= 1;
  1189. /* Evaluate */
  1190. temp = 0;
  1191. for (j = 0; j <= LPC_ORDER / 2; j++){
  1192. temp += f[LPC_ORDER - 2 * j + p] *
  1193. cos_tab[i * j % COS_TBL_SIZE];
  1194. }
  1195. cur_val = av_clipl_int32(temp<<1);
  1196. }
  1197. prev_val = cur_val;
  1198. }
  1199. if (count != LPC_ORDER)
  1200. memcpy(lsp, prev_lsp, LPC_ORDER * sizeof(int16_t));
  1201. }
  1202. /**
  1203. * Quantize the current LSP subvector.
  1204. *
  1205. * @param num band number
  1206. * @param offset offset of the current subvector in an LPC_ORDER vector
  1207. * @param size size of the current subvector
  1208. */
  1209. #define get_index(num, offset, size) \
  1210. {\
  1211. int error, max = -1;\
  1212. int16_t temp[4];\
  1213. int i, j;\
  1214. for (i = 0; i < LSP_CB_SIZE; i++) {\
  1215. for (j = 0; j < size; j++){\
  1216. temp[j] = (weight[j + (offset)] * lsp_band##num[i][j] +\
  1217. (1 << 14)) >> 15;\
  1218. }\
  1219. error = dot_product(lsp + (offset), temp, size, 1) << 1;\
  1220. error -= dot_product(lsp_band##num[i], temp, size, 1);\
  1221. if (error > max) {\
  1222. max = error;\
  1223. lsp_index[num] = i;\
  1224. }\
  1225. }\
  1226. }
  1227. /**
  1228. * Vector quantize the LSP frequencies.
  1229. *
  1230. * @param lsp the current lsp vector
  1231. * @param prev_lsp the previous lsp vector
  1232. */
  1233. static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
  1234. {
  1235. int16_t weight[LPC_ORDER];
  1236. int16_t min, max;
  1237. int shift, i;
  1238. /* Calculate the VQ weighting vector */
  1239. weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
  1240. weight[LPC_ORDER - 1] = (1 << 20) /
  1241. (lsp[LPC_ORDER - 1] - lsp[LPC_ORDER - 2]);
  1242. for (i = 1; i < LPC_ORDER - 1; i++) {
  1243. min = FFMIN(lsp[i] - lsp[i - 1], lsp[i + 1] - lsp[i]);
  1244. if (min > 0x20)
  1245. weight[i] = (1 << 20) / min;
  1246. else
  1247. weight[i] = INT16_MAX;
  1248. }
  1249. /* Normalize */
  1250. max = 0;
  1251. for (i = 0; i < LPC_ORDER; i++)
  1252. max = FFMAX(weight[i], max);
  1253. shift = normalize_bits_int16(max);
  1254. for (i = 0; i < LPC_ORDER; i++) {
  1255. weight[i] <<= shift;
  1256. }
  1257. /* Compute the VQ target vector */
  1258. for (i = 0; i < LPC_ORDER; i++) {
  1259. lsp[i] -= dc_lsp[i] +
  1260. (((prev_lsp[i] - dc_lsp[i]) * 12288 + (1 << 14)) >> 15);
  1261. }
  1262. get_index(0, 0, 3);
  1263. get_index(1, 3, 3);
  1264. get_index(2, 6, 4);
  1265. }
  1266. /**
  1267. * Apply the formant perceptual weighting filter.
  1268. *
  1269. * @param flt_coef filter coefficients
  1270. * @param unq_lpc unquantized lpc vector
  1271. */
  1272. static void perceptual_filter(G723_1_Context *p, int16_t *flt_coef,
  1273. int16_t *unq_lpc, int16_t *buf)
  1274. {
  1275. int16_t vector[FRAME_LEN + LPC_ORDER];
  1276. int i, j, k, l = 0;
  1277. memcpy(buf, p->iir_mem, sizeof(int16_t) * LPC_ORDER);
  1278. memcpy(vector, p->fir_mem, sizeof(int16_t) * LPC_ORDER);
  1279. memcpy(vector + LPC_ORDER, buf + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  1280. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  1281. for (k = 0; k < LPC_ORDER; k++) {
  1282. flt_coef[k + 2 * l] = (unq_lpc[k + l] * percept_flt_tbl[0][k] +
  1283. (1 << 14)) >> 15;
  1284. flt_coef[k + 2 * l + LPC_ORDER] = (unq_lpc[k + l] *
  1285. percept_flt_tbl[1][k] +
  1286. (1 << 14)) >> 15;
  1287. }
  1288. iir_filter(flt_coef + 2 * l, flt_coef + 2 * l + LPC_ORDER, vector + i,
  1289. buf + i, 0);
  1290. l += LPC_ORDER;
  1291. }
  1292. memcpy(p->iir_mem, buf + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  1293. memcpy(p->fir_mem, vector + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  1294. }
  1295. /**
  1296. * Estimate the open loop pitch period.
  1297. *
  1298. * @param buf perceptually weighted speech
  1299. * @param start estimation is carried out from this position
  1300. */
  1301. static int estimate_pitch(int16_t *buf, int start)
  1302. {
  1303. int max_exp = 32;
  1304. int max_ccr = 0x4000;
  1305. int max_eng = 0x7fff;
  1306. int index = PITCH_MIN;
  1307. int offset = start - PITCH_MIN + 1;
  1308. int ccr, eng, orig_eng, ccr_eng, exp;
  1309. int diff, temp;
  1310. int i;
  1311. orig_eng = dot_product(buf + offset, buf + offset, HALF_FRAME_LEN, 0);
  1312. for (i = PITCH_MIN; i <= PITCH_MAX - 3; i++) {
  1313. offset--;
  1314. /* Update energy and compute correlation */
  1315. orig_eng += buf[offset] * buf[offset] -
  1316. buf[offset + HALF_FRAME_LEN] * buf[offset + HALF_FRAME_LEN];
  1317. ccr = dot_product(buf + start, buf + offset, HALF_FRAME_LEN, 0);
  1318. if (ccr <= 0)
  1319. continue;
  1320. /* Split into mantissa and exponent to maintain precision */
  1321. exp = normalize_bits_int32(ccr);
  1322. ccr = av_clipl_int32((int64_t)(ccr << exp) + (1 << 15)) >> 16;
  1323. exp <<= 1;
  1324. ccr *= ccr;
  1325. temp = normalize_bits_int32(ccr);
  1326. ccr = ccr << temp >> 16;
  1327. exp += temp;
  1328. temp = normalize_bits_int32(orig_eng);
  1329. eng = av_clipl_int32((int64_t)(orig_eng << temp) + (1 << 15)) >> 16;
  1330. exp -= temp;
  1331. if (ccr >= eng) {
  1332. exp--;
  1333. ccr >>= 1;
  1334. }
  1335. if (exp > max_exp)
  1336. continue;
  1337. if (exp + 1 < max_exp)
  1338. goto update;
  1339. /* Equalize exponents before comparison */
  1340. if (exp + 1 == max_exp)
  1341. temp = max_ccr >> 1;
  1342. else
  1343. temp = max_ccr;
  1344. ccr_eng = ccr * max_eng;
  1345. diff = ccr_eng - eng * temp;
  1346. if (diff > 0 && (i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
  1347. update:
  1348. index = i;
  1349. max_exp = exp;
  1350. max_ccr = ccr;
  1351. max_eng = eng;
  1352. }
  1353. }
  1354. return index;
  1355. }
  1356. /**
  1357. * Compute harmonic noise filter parameters.
  1358. *
  1359. * @param buf perceptually weighted speech
  1360. * @param pitch_lag open loop pitch period
  1361. * @param hf harmonic filter parameters
  1362. */
  1363. static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
  1364. {
  1365. int ccr, eng, max_ccr, max_eng;
  1366. int exp, max, diff;
  1367. int energy[15];
  1368. int i, j;
  1369. for (i = 0, j = pitch_lag - 3; j <= pitch_lag + 3; i++, j++) {
  1370. /* Compute residual energy */
  1371. energy[i << 1] = dot_product(buf - j, buf - j, SUBFRAME_LEN, 0);
  1372. /* Compute correlation */
  1373. energy[(i << 1) + 1] = dot_product(buf, buf - j, SUBFRAME_LEN, 0);
  1374. }
  1375. /* Compute target energy */
  1376. energy[14] = dot_product(buf, buf, SUBFRAME_LEN, 0);
  1377. /* Normalize */
  1378. max = 0;
  1379. for (i = 0; i < 15; i++)
  1380. max = FFMAX(max, FFABS(energy[i]));
  1381. exp = normalize_bits_int32(max);
  1382. for (i = 0; i < 15; i++) {
  1383. energy[i] = av_clipl_int32((int64_t)(energy[i] << exp) +
  1384. (1 << 15)) >> 16;
  1385. }
  1386. hf->index = -1;
  1387. hf->gain = 0;
  1388. max_ccr = 1;
  1389. max_eng = 0x7fff;
  1390. for (i = 0; i <= 6; i++) {
  1391. eng = energy[i << 1];
  1392. ccr = energy[(i << 1) + 1];
  1393. if (ccr <= 0)
  1394. continue;
  1395. ccr = (ccr * ccr + (1 << 14)) >> 15;
  1396. diff = ccr * max_eng - eng * max_ccr;
  1397. if (diff > 0) {
  1398. max_ccr = ccr;
  1399. max_eng = eng;
  1400. hf->index = i;
  1401. }
  1402. }
  1403. if (hf->index == -1) {
  1404. hf->index = pitch_lag;
  1405. return;
  1406. }
  1407. eng = energy[14] * max_eng;
  1408. eng = (eng >> 2) + (eng >> 3);
  1409. ccr = energy[(hf->index << 1) + 1] * energy[(hf->index << 1) + 1];
  1410. if (eng < ccr) {
  1411. eng = energy[(hf->index << 1) + 1];
  1412. if (eng >= max_eng)
  1413. hf->gain = 0x2800;
  1414. else
  1415. hf->gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
  1416. }
  1417. hf->index += pitch_lag - 3;
  1418. }
  1419. /**
  1420. * Apply the harmonic noise shaping filter.
  1421. *
  1422. * @param hf filter parameters
  1423. */
  1424. static void harmonic_filter(HFParam *hf, int16_t *src, int16_t *dest)
  1425. {
  1426. int i;
  1427. for (i = 0; i < SUBFRAME_LEN; i++) {
  1428. int64_t temp = hf->gain * src[i - hf->index] << 1;
  1429. dest[i] = av_clipl_int32((src[i] << 16) - temp + (1 << 15)) >> 16;
  1430. }
  1431. }
  1432. static void harmonic_noise_sub(HFParam *hf, int16_t *src, int16_t *dest)
  1433. {
  1434. int i;
  1435. for (i = 0; i < SUBFRAME_LEN; i++) {
  1436. int64_t temp = hf->gain * src[i - hf->index] << 1;
  1437. dest[i] = av_clipl_int32(((dest[i] - src[i]) << 16) + temp +
  1438. (1 << 15)) >> 16;
  1439. }
  1440. }
  1441. /**
  1442. * Combined synthesis and formant perceptual weighting filer.
  1443. *
  1444. * @param qnt_lpc quantized lpc coefficients
  1445. * @param perf_lpc perceptual filter coefficients
  1446. * @param perf_fir perceptual filter fir memory
  1447. * @param perf_iir perceptual filter iir memory
  1448. * @param scale the filter output will be scaled by 2^scale
  1449. */
  1450. static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc,
  1451. int16_t *perf_fir, int16_t *perf_iir,
  1452. int16_t *src, int16_t *dest, int scale)
  1453. {
  1454. int i, j;
  1455. int16_t buf_16[SUBFRAME_LEN + LPC_ORDER];
  1456. int64_t buf[SUBFRAME_LEN];
  1457. int16_t *bptr_16 = buf_16 + LPC_ORDER;
  1458. memcpy(buf_16, perf_fir, sizeof(int16_t) * LPC_ORDER);
  1459. memcpy(dest - LPC_ORDER, perf_iir, sizeof(int16_t) * LPC_ORDER);
  1460. for (i = 0; i < SUBFRAME_LEN; i++) {
  1461. int64_t temp = 0;
  1462. for (j = 1; j <= LPC_ORDER; j++)
  1463. temp -= qnt_lpc[j - 1] * bptr_16[i - j];
  1464. buf[i] = (src[i] << 15) + (temp << 3);
  1465. bptr_16[i] = av_clipl_int32(buf[i] + (1 << 15)) >> 16;
  1466. }
  1467. for (i = 0; i < SUBFRAME_LEN; i++) {
  1468. int64_t fir = 0, iir = 0;
  1469. for (j = 1; j <= LPC_ORDER; j++) {
  1470. fir -= perf_lpc[j - 1] * bptr_16[i - j];
  1471. iir += perf_lpc[j + LPC_ORDER - 1] * dest[i - j];
  1472. }
  1473. dest[i] = av_clipl_int32(((buf[i] + (fir << 3)) << scale) + (iir << 3) +
  1474. (1 << 15)) >> 16;
  1475. }
  1476. memcpy(perf_fir, buf_16 + SUBFRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  1477. memcpy(perf_iir, dest + SUBFRAME_LEN - LPC_ORDER,
  1478. sizeof(int16_t) * LPC_ORDER);
  1479. }
  1480. /**
  1481. * Compute the adaptive codebook contribution.
  1482. *
  1483. * @param buf input signal
  1484. * @param index the current subframe index
  1485. */
  1486. static void acb_search(G723_1_Context *p, int16_t *residual,
  1487. int16_t *impulse_resp, int16_t *buf,
  1488. int index)
  1489. {
  1490. int16_t flt_buf[PITCH_ORDER][SUBFRAME_LEN];
  1491. const int16_t *cb_tbl = adaptive_cb_gain85;
  1492. int ccr_buf[PITCH_ORDER * SUBFRAMES << 2];
  1493. int pitch_lag = p->pitch_lag[index >> 1];
  1494. int acb_lag = 1;
  1495. int acb_gain = 0;
  1496. int odd_frame = index & 1;
  1497. int iter = 3 + odd_frame;
  1498. int count = 0;
  1499. int tbl_size = 85;
  1500. int i, j, k, l, max;
  1501. int64_t temp;
  1502. if (!odd_frame) {
  1503. if (pitch_lag == PITCH_MIN)
  1504. pitch_lag++;
  1505. else
  1506. pitch_lag = FFMIN(pitch_lag, PITCH_MAX - 5);
  1507. }
  1508. for (i = 0; i < iter; i++) {
  1509. get_residual(residual, p->prev_excitation, pitch_lag + i - 1);
  1510. for (j = 0; j < SUBFRAME_LEN; j++) {
  1511. temp = 0;
  1512. for (k = 0; k <= j; k++)
  1513. temp += residual[PITCH_ORDER - 1 + k] * impulse_resp[j - k];
  1514. flt_buf[PITCH_ORDER - 1][j] = av_clipl_int32((temp << 1) +
  1515. (1 << 15)) >> 16;
  1516. }
  1517. for (j = PITCH_ORDER - 2; j >= 0; j--) {
  1518. flt_buf[j][0] = ((residual[j] << 13) + (1 << 14)) >> 15;
  1519. for (k = 1; k < SUBFRAME_LEN; k++) {
  1520. temp = (flt_buf[j + 1][k - 1] << 15) +
  1521. residual[j] * impulse_resp[k];
  1522. flt_buf[j][k] = av_clipl_int32((temp << 1) + (1 << 15)) >> 16;
  1523. }
  1524. }
  1525. /* Compute crosscorrelation with the signal */
  1526. for (j = 0; j < PITCH_ORDER; j++) {
  1527. temp = dot_product(buf, flt_buf[j], SUBFRAME_LEN, 0);
  1528. ccr_buf[count++] = av_clipl_int32(temp << 1);
  1529. }
  1530. /* Compute energies */
  1531. for (j = 0; j < PITCH_ORDER; j++) {
  1532. ccr_buf[count++] = dot_product(flt_buf[j], flt_buf[j],
  1533. SUBFRAME_LEN, 1);
  1534. }
  1535. for (j = 1; j < PITCH_ORDER; j++) {
  1536. for (k = 0; k < j; k++) {
  1537. temp = dot_product(flt_buf[j], flt_buf[k], SUBFRAME_LEN, 0);
  1538. ccr_buf[count++] = av_clipl_int32(temp<<2);
  1539. }
  1540. }
  1541. }
  1542. /* Normalize and shorten */
  1543. max = 0;
  1544. for (i = 0; i < 20 * iter; i++)
  1545. max = FFMAX(max, FFABS(ccr_buf[i]));
  1546. temp = normalize_bits_int32(max);
  1547. for (i = 0; i < 20 * iter; i++){
  1548. ccr_buf[i] = av_clipl_int32((int64_t)(ccr_buf[i] << temp) +
  1549. (1 << 15)) >> 16;
  1550. }
  1551. max = 0;
  1552. for (i = 0; i < iter; i++) {
  1553. /* Select quantization table */
  1554. if (!odd_frame && pitch_lag + i - 1 >= SUBFRAME_LEN - 2 ||
  1555. odd_frame && pitch_lag >= SUBFRAME_LEN - 2) {
  1556. cb_tbl = adaptive_cb_gain170;
  1557. tbl_size = 170;
  1558. }
  1559. for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
  1560. temp = 0;
  1561. for (l = 0; l < 20; l++)
  1562. temp += ccr_buf[20 * i + l] * cb_tbl[k + l];
  1563. temp = av_clipl_int32(temp);
  1564. if (temp > max) {
  1565. max = temp;
  1566. acb_gain = j;
  1567. acb_lag = i;
  1568. }
  1569. }
  1570. }
  1571. if (!odd_frame) {
  1572. pitch_lag += acb_lag - 1;
  1573. acb_lag = 1;
  1574. }
  1575. p->pitch_lag[index >> 1] = pitch_lag;
  1576. p->subframe[index].ad_cb_lag = acb_lag;
  1577. p->subframe[index].ad_cb_gain = acb_gain;
  1578. }
  1579. /**
  1580. * Subtract the adaptive codebook contribution from the input
  1581. * to obtain the residual.
  1582. *
  1583. * @param buf target vector
  1584. */
  1585. static void sub_acb_contrib(int16_t *residual, int16_t *impulse_resp,
  1586. int16_t *buf)
  1587. {
  1588. int i, j;
  1589. /* Subtract adaptive CB contribution to obtain the residual */
  1590. for (i = 0; i < SUBFRAME_LEN; i++) {
  1591. int64_t temp = buf[i] << 14;
  1592. for (j = 0; j <= i; j++)
  1593. temp -= residual[j] * impulse_resp[i - j];
  1594. buf[i] = av_clipl_int32((temp << 2) + (1 << 15)) >> 16;
  1595. }
  1596. }
  1597. /**
  1598. * Quantize the residual signal using the fixed codebook (MP-MLQ).
  1599. *
  1600. * @param optim optimized fixed codebook parameters
  1601. * @param buf excitation vector
  1602. */
  1603. static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp,
  1604. int16_t *buf, int pulse_cnt, int pitch_lag)
  1605. {
  1606. FCBParam param;
  1607. int16_t impulse_r[SUBFRAME_LEN];
  1608. int16_t temp_corr[SUBFRAME_LEN];
  1609. int16_t impulse_corr[SUBFRAME_LEN];
  1610. int ccr1[SUBFRAME_LEN];
  1611. int ccr2[SUBFRAME_LEN];
  1612. int amp, err, max, max_amp_index, min, scale, i, j, k, l;
  1613. int64_t temp;
  1614. /* Update impulse response */
  1615. memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN);
  1616. param.dirac_train = 0;
  1617. if (pitch_lag < SUBFRAME_LEN - 2) {
  1618. param.dirac_train = 1;
  1619. gen_dirac_train(impulse_r, pitch_lag);
  1620. }
  1621. for (i = 0; i < SUBFRAME_LEN; i++)
  1622. temp_corr[i] = impulse_r[i] >> 1;
  1623. /* Compute impulse response autocorrelation */
  1624. temp = dot_product(temp_corr, temp_corr, SUBFRAME_LEN, 1);
  1625. scale = normalize_bits_int32(temp);
  1626. impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
  1627. for (i = 1; i < SUBFRAME_LEN; i++) {
  1628. temp = dot_product(temp_corr + i, temp_corr, SUBFRAME_LEN - i, 1);
  1629. impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
  1630. }
  1631. /* Compute crosscorrelation of impulse response with residual signal */
  1632. scale -= 4;
  1633. for (i = 0; i < SUBFRAME_LEN; i++){
  1634. temp = dot_product(buf + i, impulse_r, SUBFRAME_LEN - i, 1);
  1635. if (scale < 0)
  1636. ccr1[i] = temp >> -scale;
  1637. else
  1638. ccr1[i] = av_clipl_int32(temp << scale);
  1639. }
  1640. /* Search loop */
  1641. for (i = 0; i < GRID_SIZE; i++) {
  1642. /* Maximize the crosscorrelation */
  1643. max = 0;
  1644. for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) {
  1645. temp = FFABS(ccr1[j]);
  1646. if (temp >= max) {
  1647. max = temp;
  1648. param.pulse_pos[0] = j;
  1649. }
  1650. }
  1651. /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */
  1652. amp = max;
  1653. min = 1 << 30;
  1654. max_amp_index = GAIN_LEVELS - 2;
  1655. for (j = max_amp_index; j >= 2; j--) {
  1656. temp = av_clipl_int32((int64_t)fixed_cb_gain[j] *
  1657. impulse_corr[0] << 1);
  1658. temp = FFABS(temp - amp);
  1659. if (temp < min) {
  1660. min = temp;
  1661. max_amp_index = j;
  1662. }
  1663. }
  1664. max_amp_index--;
  1665. /* Select additional gain values */
  1666. for (j = 1; j < 5; j++) {
  1667. for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) {
  1668. temp_corr[k] = 0;
  1669. ccr2[k] = ccr1[k];
  1670. }
  1671. param.amp_index = max_amp_index + j - 2;
  1672. amp = fixed_cb_gain[param.amp_index];
  1673. param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp;
  1674. temp_corr[param.pulse_pos[0]] = 1;
  1675. for (k = 1; k < pulse_cnt; k++) {
  1676. max = -1 << 30;
  1677. for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) {
  1678. if (temp_corr[l])
  1679. continue;
  1680. temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])];
  1681. temp = av_clipl_int32((int64_t)temp *
  1682. param.pulse_sign[k - 1] << 1);
  1683. ccr2[l] -= temp;
  1684. temp = FFABS(ccr2[l]);
  1685. if (temp > max) {
  1686. max = temp;
  1687. param.pulse_pos[k] = l;
  1688. }
  1689. }
  1690. param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ?
  1691. -amp : amp;
  1692. temp_corr[param.pulse_pos[k]] = 1;
  1693. }
  1694. /* Create the error vector */
  1695. memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN);
  1696. for (k = 0; k < pulse_cnt; k++)
  1697. temp_corr[param.pulse_pos[k]] = param.pulse_sign[k];
  1698. for (k = SUBFRAME_LEN - 1; k >= 0; k--) {
  1699. temp = 0;
  1700. for (l = 0; l <= k; l++) {
  1701. int prod = av_clipl_int32((int64_t)temp_corr[l] *
  1702. impulse_r[k - l] << 1);
  1703. temp = av_clipl_int32(temp + prod);
  1704. }
  1705. temp_corr[k] = temp << 2 >> 16;
  1706. }
  1707. /* Compute square of error */
  1708. err = 0;
  1709. for (k = 0; k < SUBFRAME_LEN; k++) {
  1710. int64_t prod;
  1711. prod = av_clipl_int32((int64_t)buf[k] * temp_corr[k] << 1);
  1712. err = av_clipl_int32(err - prod);
  1713. prod = av_clipl_int32((int64_t)temp_corr[k] * temp_corr[k]);
  1714. err = av_clipl_int32(err + prod);
  1715. }
  1716. /* Minimize */
  1717. if (err < optim->min_err) {
  1718. optim->min_err = err;
  1719. optim->grid_index = i;
  1720. optim->amp_index = param.amp_index;
  1721. optim->dirac_train = param.dirac_train;
  1722. for (k = 0; k < pulse_cnt; k++) {
  1723. optim->pulse_sign[k] = param.pulse_sign[k];
  1724. optim->pulse_pos[k] = param.pulse_pos[k];
  1725. }
  1726. }
  1727. }
  1728. }
  1729. }
  1730. /**
  1731. * Encode the pulse position and gain of the current subframe.
  1732. *
  1733. * @param optim optimized fixed CB parameters
  1734. * @param buf excitation vector
  1735. */
  1736. static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim,
  1737. int16_t *buf, int pulse_cnt)
  1738. {
  1739. int i, j;
  1740. j = PULSE_MAX - pulse_cnt;
  1741. subfrm->pulse_sign = 0;
  1742. subfrm->pulse_pos = 0;
  1743. for (i = 0; i < SUBFRAME_LEN >> 1; i++) {
  1744. int val = buf[optim->grid_index + (i << 1)];
  1745. if (!val) {
  1746. subfrm->pulse_pos += combinatorial_table[j][i];
  1747. } else {
  1748. subfrm->pulse_sign <<= 1;
  1749. if (val < 0) subfrm->pulse_sign++;
  1750. j++;
  1751. if (j == PULSE_MAX) break;
  1752. }
  1753. }
  1754. subfrm->amp_index = optim->amp_index;
  1755. subfrm->grid_index = optim->grid_index;
  1756. subfrm->dirac_train = optim->dirac_train;
  1757. }
  1758. /**
  1759. * Compute the fixed codebook excitation.
  1760. *
  1761. * @param buf target vector
  1762. * @param impulse_resp impulse response of the combined filter
  1763. */
  1764. static void fcb_search(G723_1_Context *p, int16_t *impulse_resp,
  1765. int16_t *buf, int index)
  1766. {
  1767. FCBParam optim;
  1768. int pulse_cnt = pulses[index];
  1769. int i;
  1770. optim.min_err = 1 << 30;
  1771. get_fcb_param(&optim, impulse_resp, buf, pulse_cnt, SUBFRAME_LEN);
  1772. if (p->pitch_lag[index >> 1] < SUBFRAME_LEN - 2) {
  1773. get_fcb_param(&optim, impulse_resp, buf, pulse_cnt,
  1774. p->pitch_lag[index >> 1]);
  1775. }
  1776. /* Reconstruct the excitation */
  1777. memset(buf, 0, sizeof(int16_t) * SUBFRAME_LEN);
  1778. for (i = 0; i < pulse_cnt; i++)
  1779. buf[optim.pulse_pos[i]] = optim.pulse_sign[i];
  1780. pack_fcb_param(&p->subframe[index], &optim, buf, pulse_cnt);
  1781. if (optim.dirac_train)
  1782. gen_dirac_train(buf, p->pitch_lag[index >> 1]);
  1783. }
  1784. /**
  1785. * Pack the frame parameters into output bitstream.
  1786. *
  1787. * @param frame output buffer
  1788. * @param size size of the buffer
  1789. */
  1790. static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
  1791. {
  1792. PutBitContext pb;
  1793. int info_bits, i, temp;
  1794. init_put_bits(&pb, frame, size);
  1795. if (p->cur_rate == RATE_6300) {
  1796. info_bits = 0;
  1797. put_bits(&pb, 2, info_bits);
  1798. }
  1799. put_bits(&pb, 8, p->lsp_index[2]);
  1800. put_bits(&pb, 8, p->lsp_index[1]);
  1801. put_bits(&pb, 8, p->lsp_index[0]);
  1802. put_bits(&pb, 7, p->pitch_lag[0] - PITCH_MIN);
  1803. put_bits(&pb, 2, p->subframe[1].ad_cb_lag);
  1804. put_bits(&pb, 7, p->pitch_lag[1] - PITCH_MIN);
  1805. put_bits(&pb, 2, p->subframe[3].ad_cb_lag);
  1806. /* Write 12 bit combined gain */
  1807. for (i = 0; i < SUBFRAMES; i++) {
  1808. temp = p->subframe[i].ad_cb_gain * GAIN_LEVELS +
  1809. p->subframe[i].amp_index;
  1810. if (p->cur_rate == RATE_6300)
  1811. temp += p->subframe[i].dirac_train << 11;
  1812. put_bits(&pb, 12, temp);
  1813. }
  1814. put_bits(&pb, 1, p->subframe[0].grid_index);
  1815. put_bits(&pb, 1, p->subframe[1].grid_index);
  1816. put_bits(&pb, 1, p->subframe[2].grid_index);
  1817. put_bits(&pb, 1, p->subframe[3].grid_index);
  1818. if (p->cur_rate == RATE_6300) {
  1819. skip_put_bits(&pb, 1); /* reserved bit */
  1820. /* Write 13 bit combined position index */
  1821. temp = (p->subframe[0].pulse_pos >> 16) * 810 +
  1822. (p->subframe[1].pulse_pos >> 14) * 90 +
  1823. (p->subframe[2].pulse_pos >> 16) * 9 +
  1824. (p->subframe[3].pulse_pos >> 14);
  1825. put_bits(&pb, 13, temp);
  1826. put_bits(&pb, 16, p->subframe[0].pulse_pos & 0xffff);
  1827. put_bits(&pb, 14, p->subframe[1].pulse_pos & 0x3fff);
  1828. put_bits(&pb, 16, p->subframe[2].pulse_pos & 0xffff);
  1829. put_bits(&pb, 14, p->subframe[3].pulse_pos & 0x3fff);
  1830. put_bits(&pb, 6, p->subframe[0].pulse_sign);
  1831. put_bits(&pb, 5, p->subframe[1].pulse_sign);
  1832. put_bits(&pb, 6, p->subframe[2].pulse_sign);
  1833. put_bits(&pb, 5, p->subframe[3].pulse_sign);
  1834. }
  1835. flush_put_bits(&pb);
  1836. return frame_size[info_bits];
  1837. }
  1838. static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  1839. const AVFrame *frame, int *got_packet_ptr)
  1840. {
  1841. G723_1_Context *p = avctx->priv_data;
  1842. int16_t unq_lpc[LPC_ORDER * SUBFRAMES];
  1843. int16_t qnt_lpc[LPC_ORDER * SUBFRAMES];
  1844. int16_t cur_lsp[LPC_ORDER];
  1845. int16_t weighted_lpc[LPC_ORDER * SUBFRAMES << 1];
  1846. int16_t vector[FRAME_LEN + PITCH_MAX];
  1847. int offset, ret;
  1848. int16_t *in = (const int16_t *)frame->data[0];
  1849. HFParam hf[4];
  1850. int i, j;
  1851. highpass_filter(in, &p->hpf_fir_mem, &p->hpf_iir_mem);
  1852. memcpy(vector, p->prev_data, HALF_FRAME_LEN * sizeof(int16_t));
  1853. memcpy(vector + HALF_FRAME_LEN, in, FRAME_LEN * sizeof(int16_t));
  1854. comp_lpc_coeff(vector, unq_lpc);
  1855. lpc2lsp(&unq_lpc[LPC_ORDER * 3], p->prev_lsp, cur_lsp);
  1856. lsp_quantize(p->lsp_index, cur_lsp, p->prev_lsp);
  1857. /* Update memory */
  1858. memcpy(vector + LPC_ORDER, p->prev_data + SUBFRAME_LEN,
  1859. sizeof(int16_t) * SUBFRAME_LEN);
  1860. memcpy(vector + LPC_ORDER + SUBFRAME_LEN, in,
  1861. sizeof(int16_t) * (HALF_FRAME_LEN + SUBFRAME_LEN));
  1862. memcpy(p->prev_data, in + HALF_FRAME_LEN,
  1863. sizeof(int16_t) * HALF_FRAME_LEN);
  1864. memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  1865. perceptual_filter(p, weighted_lpc, unq_lpc, vector);
  1866. memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  1867. memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
  1868. memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
  1869. scale_vector(vector, FRAME_LEN + PITCH_MAX);
  1870. p->pitch_lag[0] = estimate_pitch(vector, PITCH_MAX);
  1871. p->pitch_lag[1] = estimate_pitch(vector, PITCH_MAX + HALF_FRAME_LEN);
  1872. for (i = PITCH_MAX, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1873. comp_harmonic_coeff(vector + i, p->pitch_lag[j >> 1], hf + j);
  1874. memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
  1875. memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
  1876. memcpy(p->prev_weight_sig, vector + FRAME_LEN, sizeof(int16_t) * PITCH_MAX);
  1877. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  1878. harmonic_filter(hf + j, vector + PITCH_MAX + i, in + i);
  1879. inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, 0);
  1880. lsp_interpolate(qnt_lpc, cur_lsp, p->prev_lsp);
  1881. memcpy(p->prev_lsp, cur_lsp, sizeof(int16_t) * LPC_ORDER);
  1882. offset = 0;
  1883. for (i = 0; i < SUBFRAMES; i++) {
  1884. int16_t impulse_resp[SUBFRAME_LEN];
  1885. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  1886. int16_t flt_in[SUBFRAME_LEN];
  1887. int16_t zero[LPC_ORDER], fir[LPC_ORDER], iir[LPC_ORDER];
  1888. /**
  1889. * Compute the combined impulse response of the synthesis filter,
  1890. * formant perceptual weighting filter and harmonic noise shaping filter
  1891. */
  1892. memset(zero, 0, sizeof(int16_t) * LPC_ORDER);
  1893. memset(vector, 0, sizeof(int16_t) * PITCH_MAX);
  1894. memset(flt_in, 0, sizeof(int16_t) * SUBFRAME_LEN);
  1895. flt_in[0] = 1 << 13; /* Unit impulse */
  1896. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  1897. zero, zero, flt_in, vector + PITCH_MAX, 1);
  1898. harmonic_filter(hf + i, vector + PITCH_MAX, impulse_resp);
  1899. /* Compute the combined zero input response */
  1900. flt_in[0] = 0;
  1901. memcpy(fir, p->perf_fir_mem, sizeof(int16_t) * LPC_ORDER);
  1902. memcpy(iir, p->perf_iir_mem, sizeof(int16_t) * LPC_ORDER);
  1903. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  1904. fir, iir, flt_in, vector + PITCH_MAX, 0);
  1905. memcpy(vector, p->harmonic_mem, sizeof(int16_t) * PITCH_MAX);
  1906. harmonic_noise_sub(hf + i, vector + PITCH_MAX, in);
  1907. acb_search(p, residual, impulse_resp, in, i);
  1908. gen_acb_excitation(residual, p->prev_excitation,p->pitch_lag[i >> 1],
  1909. p->subframe[i], p->cur_rate);
  1910. sub_acb_contrib(residual, impulse_resp, in);
  1911. fcb_search(p, impulse_resp, in, i);
  1912. /* Reconstruct the excitation */
  1913. gen_acb_excitation(impulse_resp, p->prev_excitation, p->pitch_lag[i >> 1],
  1914. p->subframe[i], RATE_6300);
  1915. memmove(p->prev_excitation, p->prev_excitation + SUBFRAME_LEN,
  1916. sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
  1917. for (j = 0; j < SUBFRAME_LEN; j++)
  1918. in[j] = av_clip_int16((in[j] << 1) + impulse_resp[j]);
  1919. memcpy(p->prev_excitation + PITCH_MAX - SUBFRAME_LEN, in,
  1920. sizeof(int16_t) * SUBFRAME_LEN);
  1921. /* Update filter memories */
  1922. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  1923. p->perf_fir_mem, p->perf_iir_mem,
  1924. in, vector + PITCH_MAX, 0);
  1925. memmove(p->harmonic_mem, p->harmonic_mem + SUBFRAME_LEN,
  1926. sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
  1927. memcpy(p->harmonic_mem + PITCH_MAX - SUBFRAME_LEN, vector + PITCH_MAX,
  1928. sizeof(int16_t) * SUBFRAME_LEN);
  1929. in += SUBFRAME_LEN;
  1930. offset += LPC_ORDER;
  1931. }
  1932. if ((ret = ff_alloc_packet2(avctx, avpkt, 24)))
  1933. return ret;
  1934. *got_packet_ptr = 1;
  1935. avpkt->size = pack_bitstream(p, avpkt->data, avpkt->size);
  1936. return 0;
  1937. }
  1938. AVCodec ff_g723_1_encoder = {
  1939. .name = "g723_1",
  1940. .type = AVMEDIA_TYPE_AUDIO,
  1941. .id = AV_CODEC_ID_G723_1,
  1942. .priv_data_size = sizeof(G723_1_Context),
  1943. .init = g723_1_encode_init,
  1944. .encode2 = g723_1_encode_frame,
  1945. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  1946. .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
  1947. AV_SAMPLE_FMT_NONE},
  1948. };
  1949. #endif