You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1188 lines
37KB

  1. /*
  2. * G.723.1 compatible decoder
  3. * Copyright (c) 2006 Benjamin Larsson
  4. * Copyright (c) 2010 Mohamed Naufal Basheer
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * G.723.1 compatible decoder
  25. */
  26. #define BITSTREAM_READER_LE
  27. #include "libavutil/audioconvert.h"
  28. #include "libavutil/lzo.h"
  29. #include "libavutil/opt.h"
  30. #include "avcodec.h"
  31. #include "get_bits.h"
  32. #include "acelp_vectors.h"
  33. #include "celp_filters.h"
  34. #include "celp_math.h"
  35. #include "lsp.h"
  36. #include "g723_1_data.h"
  37. /**
  38. * G723.1 frame types
  39. */
  40. enum FrameType {
  41. ACTIVE_FRAME, ///< Active speech
  42. SID_FRAME, ///< Silence Insertion Descriptor frame
  43. UNTRANSMITTED_FRAME
  44. };
  45. enum Rate {
  46. RATE_6300,
  47. RATE_5300
  48. };
  49. /**
  50. * G723.1 unpacked data subframe
  51. */
  52. typedef struct {
  53. int ad_cb_lag; ///< adaptive codebook lag
  54. int ad_cb_gain;
  55. int dirac_train;
  56. int pulse_sign;
  57. int grid_index;
  58. int amp_index;
  59. int pulse_pos;
  60. } G723_1_Subframe;
  61. /**
  62. * Pitch postfilter parameters
  63. */
  64. typedef struct {
  65. int index; ///< postfilter backward/forward lag
  66. int16_t opt_gain; ///< optimal gain
  67. int16_t sc_gain; ///< scaling gain
  68. } PPFParam;
  69. typedef struct g723_1_context {
  70. AVClass *class;
  71. AVFrame frame;
  72. G723_1_Subframe subframe[4];
  73. enum FrameType cur_frame_type;
  74. enum FrameType past_frame_type;
  75. enum Rate cur_rate;
  76. uint8_t lsp_index[LSP_BANDS];
  77. int pitch_lag[2];
  78. int erased_frames;
  79. int16_t prev_lsp[LPC_ORDER];
  80. int16_t prev_excitation[PITCH_MAX];
  81. int16_t excitation[PITCH_MAX + FRAME_LEN + 4];
  82. int16_t synth_mem[LPC_ORDER];
  83. int16_t fir_mem[LPC_ORDER];
  84. int iir_mem[LPC_ORDER];
  85. int random_seed;
  86. int interp_index;
  87. int interp_gain;
  88. int sid_gain;
  89. int cur_gain;
  90. int reflection_coef;
  91. int pf_gain;
  92. int postfilter;
  93. int16_t audio[FRAME_LEN + LPC_ORDER];
  94. } G723_1_Context;
  95. static av_cold int g723_1_decode_init(AVCodecContext *avctx)
  96. {
  97. G723_1_Context *p = avctx->priv_data;
  98. avctx->channel_layout = AV_CH_LAYOUT_MONO;
  99. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  100. avctx->channels = 1;
  101. avctx->sample_rate = 8000;
  102. p->pf_gain = 1 << 12;
  103. avcodec_get_frame_defaults(&p->frame);
  104. avctx->coded_frame = &p->frame;
  105. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  106. return 0;
  107. }
  108. /**
  109. * Unpack the frame into parameters.
  110. *
  111. * @param p the context
  112. * @param buf pointer to the input buffer
  113. * @param buf_size size of the input buffer
  114. */
  115. static int unpack_bitstream(G723_1_Context *p, const uint8_t *buf,
  116. int buf_size)
  117. {
  118. GetBitContext gb;
  119. int ad_cb_len;
  120. int temp, info_bits, i;
  121. init_get_bits(&gb, buf, buf_size * 8);
  122. /* Extract frame type and rate info */
  123. info_bits = get_bits(&gb, 2);
  124. if (info_bits == 3) {
  125. p->cur_frame_type = UNTRANSMITTED_FRAME;
  126. return 0;
  127. }
  128. /* Extract 24 bit lsp indices, 8 bit for each band */
  129. p->lsp_index[2] = get_bits(&gb, 8);
  130. p->lsp_index[1] = get_bits(&gb, 8);
  131. p->lsp_index[0] = get_bits(&gb, 8);
  132. if (info_bits == 2) {
  133. p->cur_frame_type = SID_FRAME;
  134. p->subframe[0].amp_index = get_bits(&gb, 6);
  135. return 0;
  136. }
  137. /* Extract the info common to both rates */
  138. p->cur_rate = info_bits ? RATE_5300 : RATE_6300;
  139. p->cur_frame_type = ACTIVE_FRAME;
  140. p->pitch_lag[0] = get_bits(&gb, 7);
  141. if (p->pitch_lag[0] > 123) /* test if forbidden code */
  142. return -1;
  143. p->pitch_lag[0] += PITCH_MIN;
  144. p->subframe[1].ad_cb_lag = get_bits(&gb, 2);
  145. p->pitch_lag[1] = get_bits(&gb, 7);
  146. if (p->pitch_lag[1] > 123)
  147. return -1;
  148. p->pitch_lag[1] += PITCH_MIN;
  149. p->subframe[3].ad_cb_lag = get_bits(&gb, 2);
  150. p->subframe[0].ad_cb_lag = 1;
  151. p->subframe[2].ad_cb_lag = 1;
  152. for (i = 0; i < SUBFRAMES; i++) {
  153. /* Extract combined gain */
  154. temp = get_bits(&gb, 12);
  155. ad_cb_len = 170;
  156. p->subframe[i].dirac_train = 0;
  157. if (p->cur_rate == RATE_6300 && p->pitch_lag[i >> 1] < SUBFRAME_LEN - 2) {
  158. p->subframe[i].dirac_train = temp >> 11;
  159. temp &= 0x7FF;
  160. ad_cb_len = 85;
  161. }
  162. p->subframe[i].ad_cb_gain = FASTDIV(temp, GAIN_LEVELS);
  163. if (p->subframe[i].ad_cb_gain < ad_cb_len) {
  164. p->subframe[i].amp_index = temp - p->subframe[i].ad_cb_gain *
  165. GAIN_LEVELS;
  166. } else {
  167. return -1;
  168. }
  169. }
  170. p->subframe[0].grid_index = get_bits(&gb, 1);
  171. p->subframe[1].grid_index = get_bits(&gb, 1);
  172. p->subframe[2].grid_index = get_bits(&gb, 1);
  173. p->subframe[3].grid_index = get_bits(&gb, 1);
  174. if (p->cur_rate == RATE_6300) {
  175. skip_bits(&gb, 1); /* skip reserved bit */
  176. /* Compute pulse_pos index using the 13-bit combined position index */
  177. temp = get_bits(&gb, 13);
  178. p->subframe[0].pulse_pos = temp / 810;
  179. temp -= p->subframe[0].pulse_pos * 810;
  180. p->subframe[1].pulse_pos = FASTDIV(temp, 90);
  181. temp -= p->subframe[1].pulse_pos * 90;
  182. p->subframe[2].pulse_pos = FASTDIV(temp, 9);
  183. p->subframe[3].pulse_pos = temp - p->subframe[2].pulse_pos * 9;
  184. p->subframe[0].pulse_pos = (p->subframe[0].pulse_pos << 16) +
  185. get_bits(&gb, 16);
  186. p->subframe[1].pulse_pos = (p->subframe[1].pulse_pos << 14) +
  187. get_bits(&gb, 14);
  188. p->subframe[2].pulse_pos = (p->subframe[2].pulse_pos << 16) +
  189. get_bits(&gb, 16);
  190. p->subframe[3].pulse_pos = (p->subframe[3].pulse_pos << 14) +
  191. get_bits(&gb, 14);
  192. p->subframe[0].pulse_sign = get_bits(&gb, 6);
  193. p->subframe[1].pulse_sign = get_bits(&gb, 5);
  194. p->subframe[2].pulse_sign = get_bits(&gb, 6);
  195. p->subframe[3].pulse_sign = get_bits(&gb, 5);
  196. } else { /* 5300 bps */
  197. p->subframe[0].pulse_pos = get_bits(&gb, 12);
  198. p->subframe[1].pulse_pos = get_bits(&gb, 12);
  199. p->subframe[2].pulse_pos = get_bits(&gb, 12);
  200. p->subframe[3].pulse_pos = get_bits(&gb, 12);
  201. p->subframe[0].pulse_sign = get_bits(&gb, 4);
  202. p->subframe[1].pulse_sign = get_bits(&gb, 4);
  203. p->subframe[2].pulse_sign = get_bits(&gb, 4);
  204. p->subframe[3].pulse_sign = get_bits(&gb, 4);
  205. }
  206. return 0;
  207. }
  208. /**
  209. * Bitexact implementation of sqrt(val/2).
  210. */
  211. static int16_t square_root(int val)
  212. {
  213. int16_t res = 0;
  214. int16_t exp = 0x4000;
  215. int i;
  216. for (i = 0; i < 14; i ++) {
  217. int res_exp = res + exp;
  218. if (val >= res_exp * res_exp << 1)
  219. res += exp;
  220. exp >>= 1;
  221. }
  222. return res;
  223. }
  224. /**
  225. * Calculate the number of left-shifts required for normalizing the input.
  226. *
  227. * @param num input number
  228. * @param width width of the input, 16 bits(0) / 32 bits(1)
  229. */
  230. static int normalize_bits(int num, int width)
  231. {
  232. if (!num)
  233. return 0;
  234. if (num == -1)
  235. return width;
  236. if (num < 0)
  237. num = ~num;
  238. return width - av_log2(num) - 1;
  239. }
  240. /**
  241. * Scale vector contents based on the largest of their absolutes.
  242. */
  243. static int scale_vector(int16_t *vector, int length)
  244. {
  245. int bits, max = 0;
  246. int64_t scale;
  247. int i;
  248. for (i = 0; i < length; i++)
  249. max = FFMAX(max, FFABS(vector[i]));
  250. max = FFMIN(max, 0x7FFF);
  251. bits = normalize_bits(max, 15);
  252. scale = (bits == 15) ? 0x7FFF : (1 << bits);
  253. for (i = 0; i < length; i++)
  254. vector[i] = av_clipl_int32(vector[i] * scale << 1) >> 4;
  255. return bits - 3;
  256. }
  257. /**
  258. * Perform inverse quantization of LSP frequencies.
  259. *
  260. * @param cur_lsp the current LSP vector
  261. * @param prev_lsp the previous LSP vector
  262. * @param lsp_index VQ indices
  263. * @param bad_frame bad frame flag
  264. */
  265. static void inverse_quant(int16_t *cur_lsp, int16_t *prev_lsp,
  266. uint8_t *lsp_index, int bad_frame)
  267. {
  268. int min_dist, pred;
  269. int i, j, temp, stable;
  270. /* Check for frame erasure */
  271. if (!bad_frame) {
  272. min_dist = 0x100;
  273. pred = 12288;
  274. } else {
  275. min_dist = 0x200;
  276. pred = 23552;
  277. lsp_index[0] = lsp_index[1] = lsp_index[2] = 0;
  278. }
  279. /* Get the VQ table entry corresponding to the transmitted index */
  280. cur_lsp[0] = lsp_band0[lsp_index[0]][0];
  281. cur_lsp[1] = lsp_band0[lsp_index[0]][1];
  282. cur_lsp[2] = lsp_band0[lsp_index[0]][2];
  283. cur_lsp[3] = lsp_band1[lsp_index[1]][0];
  284. cur_lsp[4] = lsp_band1[lsp_index[1]][1];
  285. cur_lsp[5] = lsp_band1[lsp_index[1]][2];
  286. cur_lsp[6] = lsp_band2[lsp_index[2]][0];
  287. cur_lsp[7] = lsp_band2[lsp_index[2]][1];
  288. cur_lsp[8] = lsp_band2[lsp_index[2]][2];
  289. cur_lsp[9] = lsp_band2[lsp_index[2]][3];
  290. /* Add predicted vector & DC component to the previously quantized vector */
  291. for (i = 0; i < LPC_ORDER; i++) {
  292. temp = ((prev_lsp[i] - dc_lsp[i]) * pred + (1 << 14)) >> 15;
  293. cur_lsp[i] += dc_lsp[i] + temp;
  294. }
  295. for (i = 0; i < LPC_ORDER; i++) {
  296. cur_lsp[0] = FFMAX(cur_lsp[0], 0x180);
  297. cur_lsp[LPC_ORDER - 1] = FFMIN(cur_lsp[LPC_ORDER - 1], 0x7e00);
  298. /* Stability check */
  299. for (j = 1; j < LPC_ORDER; j++) {
  300. temp = min_dist + cur_lsp[j - 1] - cur_lsp[j];
  301. if (temp > 0) {
  302. temp >>= 1;
  303. cur_lsp[j - 1] -= temp;
  304. cur_lsp[j] += temp;
  305. }
  306. }
  307. stable = 1;
  308. for (j = 1; j < LPC_ORDER; j++) {
  309. temp = cur_lsp[j - 1] + min_dist - cur_lsp[j] - 4;
  310. if (temp > 0) {
  311. stable = 0;
  312. break;
  313. }
  314. }
  315. if (stable)
  316. break;
  317. }
  318. if (!stable)
  319. memcpy(cur_lsp, prev_lsp, LPC_ORDER * sizeof(*cur_lsp));
  320. }
  321. /**
  322. * Bitexact implementation of 2ab scaled by 1/2^16.
  323. *
  324. * @param a 32 bit multiplicand
  325. * @param b 16 bit multiplier
  326. */
  327. #define MULL2(a, b) \
  328. ((((a) >> 16) * (b) << 1) + (((a) & 0xffff) * (b) >> 15))
  329. /**
  330. * Convert LSP frequencies to LPC coefficients.
  331. *
  332. * @param lpc buffer for LPC coefficients
  333. */
  334. static void lsp2lpc(int16_t *lpc)
  335. {
  336. int f1[LPC_ORDER / 2 + 1];
  337. int f2[LPC_ORDER / 2 + 1];
  338. int i, j;
  339. /* Calculate negative cosine */
  340. for (j = 0; j < LPC_ORDER; j++) {
  341. int index = lpc[j] >> 7;
  342. int offset = lpc[j] & 0x7f;
  343. int64_t temp1 = cos_tab[index] << 16;
  344. int temp2 = (cos_tab[index + 1] - cos_tab[index]) *
  345. ((offset << 8) + 0x80) << 1;
  346. lpc[j] = -(av_clipl_int32(((temp1 + temp2) << 1) + (1 << 15)) >> 16);
  347. }
  348. /*
  349. * Compute sum and difference polynomial coefficients
  350. * (bitexact alternative to lsp2poly() in lsp.c)
  351. */
  352. /* Initialize with values in Q28 */
  353. f1[0] = 1 << 28;
  354. f1[1] = (lpc[0] << 14) + (lpc[2] << 14);
  355. f1[2] = lpc[0] * lpc[2] + (2 << 28);
  356. f2[0] = 1 << 28;
  357. f2[1] = (lpc[1] << 14) + (lpc[3] << 14);
  358. f2[2] = lpc[1] * lpc[3] + (2 << 28);
  359. /*
  360. * Calculate and scale the coefficients by 1/2 in
  361. * each iteration for a final scaling factor of Q25
  362. */
  363. for (i = 2; i < LPC_ORDER / 2; i++) {
  364. f1[i + 1] = f1[i - 1] + MULL2(f1[i], lpc[2 * i]);
  365. f2[i + 1] = f2[i - 1] + MULL2(f2[i], lpc[2 * i + 1]);
  366. for (j = i; j >= 2; j--) {
  367. f1[j] = MULL2(f1[j - 1], lpc[2 * i]) +
  368. (f1[j] >> 1) + (f1[j - 2] >> 1);
  369. f2[j] = MULL2(f2[j - 1], lpc[2 * i + 1]) +
  370. (f2[j] >> 1) + (f2[j - 2] >> 1);
  371. }
  372. f1[0] >>= 1;
  373. f2[0] >>= 1;
  374. f1[1] = ((lpc[2 * i] << 16 >> i) + f1[1]) >> 1;
  375. f2[1] = ((lpc[2 * i + 1] << 16 >> i) + f2[1]) >> 1;
  376. }
  377. /* Convert polynomial coefficients to LPC coefficients */
  378. for (i = 0; i < LPC_ORDER / 2; i++) {
  379. int64_t ff1 = f1[i + 1] + f1[i];
  380. int64_t ff2 = f2[i + 1] - f2[i];
  381. lpc[i] = av_clipl_int32(((ff1 + ff2) << 3) + (1 << 15)) >> 16;
  382. lpc[LPC_ORDER - i - 1] = av_clipl_int32(((ff1 - ff2) << 3) +
  383. (1 << 15)) >> 16;
  384. }
  385. }
  386. /**
  387. * Quantize LSP frequencies by interpolation and convert them to
  388. * the corresponding LPC coefficients.
  389. *
  390. * @param lpc buffer for LPC coefficients
  391. * @param cur_lsp the current LSP vector
  392. * @param prev_lsp the previous LSP vector
  393. */
  394. static void lsp_interpolate(int16_t *lpc, int16_t *cur_lsp, int16_t *prev_lsp)
  395. {
  396. int i;
  397. int16_t *lpc_ptr = lpc;
  398. /* cur_lsp * 0.25 + prev_lsp * 0.75 */
  399. ff_acelp_weighted_vector_sum(lpc, cur_lsp, prev_lsp,
  400. 4096, 12288, 1 << 13, 14, LPC_ORDER);
  401. ff_acelp_weighted_vector_sum(lpc + LPC_ORDER, cur_lsp, prev_lsp,
  402. 8192, 8192, 1 << 13, 14, LPC_ORDER);
  403. ff_acelp_weighted_vector_sum(lpc + 2 * LPC_ORDER, cur_lsp, prev_lsp,
  404. 12288, 4096, 1 << 13, 14, LPC_ORDER);
  405. memcpy(lpc + 3 * LPC_ORDER, cur_lsp, LPC_ORDER * sizeof(*lpc));
  406. for (i = 0; i < SUBFRAMES; i++) {
  407. lsp2lpc(lpc_ptr);
  408. lpc_ptr += LPC_ORDER;
  409. }
  410. }
  411. /**
  412. * Generate a train of dirac functions with period as pitch lag.
  413. */
  414. static void gen_dirac_train(int16_t *buf, int pitch_lag)
  415. {
  416. int16_t vector[SUBFRAME_LEN];
  417. int i, j;
  418. memcpy(vector, buf, SUBFRAME_LEN * sizeof(*vector));
  419. for (i = pitch_lag; i < SUBFRAME_LEN; i += pitch_lag) {
  420. for (j = 0; j < SUBFRAME_LEN - i; j++)
  421. buf[i + j] += vector[j];
  422. }
  423. }
  424. /**
  425. * Generate fixed codebook excitation vector.
  426. *
  427. * @param vector decoded excitation vector
  428. * @param subfrm current subframe
  429. * @param cur_rate current bitrate
  430. * @param pitch_lag closed loop pitch lag
  431. * @param index current subframe index
  432. */
  433. static void gen_fcb_excitation(int16_t *vector, G723_1_Subframe subfrm,
  434. enum Rate cur_rate, int pitch_lag, int index)
  435. {
  436. int temp, i, j;
  437. memset(vector, 0, SUBFRAME_LEN * sizeof(*vector));
  438. if (cur_rate == RATE_6300) {
  439. if (subfrm.pulse_pos >= max_pos[index])
  440. return;
  441. /* Decode amplitudes and positions */
  442. j = PULSE_MAX - pulses[index];
  443. temp = subfrm.pulse_pos;
  444. for (i = 0; i < SUBFRAME_LEN / GRID_SIZE; i++) {
  445. temp -= combinatorial_table[j][i];
  446. if (temp >= 0)
  447. continue;
  448. temp += combinatorial_table[j++][i];
  449. if (subfrm.pulse_sign & (1 << (PULSE_MAX - j))) {
  450. vector[subfrm.grid_index + GRID_SIZE * i] =
  451. -fixed_cb_gain[subfrm.amp_index];
  452. } else {
  453. vector[subfrm.grid_index + GRID_SIZE * i] =
  454. fixed_cb_gain[subfrm.amp_index];
  455. }
  456. if (j == PULSE_MAX)
  457. break;
  458. }
  459. if (subfrm.dirac_train == 1)
  460. gen_dirac_train(vector, pitch_lag);
  461. } else { /* 5300 bps */
  462. int cb_gain = fixed_cb_gain[subfrm.amp_index];
  463. int cb_shift = subfrm.grid_index;
  464. int cb_sign = subfrm.pulse_sign;
  465. int cb_pos = subfrm.pulse_pos;
  466. int offset, beta, lag;
  467. for (i = 0; i < 8; i += 2) {
  468. offset = ((cb_pos & 7) << 3) + cb_shift + i;
  469. vector[offset] = (cb_sign & 1) ? cb_gain : -cb_gain;
  470. cb_pos >>= 3;
  471. cb_sign >>= 1;
  472. }
  473. /* Enhance harmonic components */
  474. lag = pitch_contrib[subfrm.ad_cb_gain << 1] + pitch_lag +
  475. subfrm.ad_cb_lag - 1;
  476. beta = pitch_contrib[(subfrm.ad_cb_gain << 1) + 1];
  477. if (lag < SUBFRAME_LEN - 2) {
  478. for (i = lag; i < SUBFRAME_LEN; i++)
  479. vector[i] += beta * vector[i - lag] >> 15;
  480. }
  481. }
  482. }
  483. /**
  484. * Get delayed contribution from the previous excitation vector.
  485. */
  486. static void get_residual(int16_t *residual, int16_t *prev_excitation, int lag)
  487. {
  488. int offset = PITCH_MAX - PITCH_ORDER / 2 - lag;
  489. int i;
  490. residual[0] = prev_excitation[offset];
  491. residual[1] = prev_excitation[offset + 1];
  492. offset += 2;
  493. for (i = 2; i < SUBFRAME_LEN + PITCH_ORDER - 1; i++)
  494. residual[i] = prev_excitation[offset + (i - 2) % lag];
  495. }
  496. static int dot_product(const int16_t *a, const int16_t *b, int length,
  497. int shift)
  498. {
  499. int i, sum = 0;
  500. for (i = 0; i < length; i++) {
  501. int64_t prod = av_clipl_int32(MUL64(a[i], b[i]) << shift);
  502. sum = av_clipl_int32(sum + prod);
  503. }
  504. return sum;
  505. }
  506. /**
  507. * Generate adaptive codebook excitation.
  508. */
  509. static void gen_acb_excitation(int16_t *vector, int16_t *prev_excitation,
  510. int pitch_lag, G723_1_Subframe subfrm,
  511. enum Rate cur_rate)
  512. {
  513. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  514. const int16_t *cb_ptr;
  515. int lag = pitch_lag + subfrm.ad_cb_lag - 1;
  516. int i;
  517. int64_t sum;
  518. get_residual(residual, prev_excitation, lag);
  519. /* Select quantization table */
  520. if (cur_rate == RATE_6300 && pitch_lag < SUBFRAME_LEN - 2)
  521. cb_ptr = adaptive_cb_gain85;
  522. else
  523. cb_ptr = adaptive_cb_gain170;
  524. /* Calculate adaptive vector */
  525. cb_ptr += subfrm.ad_cb_gain * 20;
  526. for (i = 0; i < SUBFRAME_LEN; i++) {
  527. sum = dot_product(residual + i, cb_ptr, PITCH_ORDER, 1);
  528. vector[i] = av_clipl_int32((sum << 1) + (1 << 15)) >> 16;
  529. }
  530. }
  531. /**
  532. * Estimate maximum auto-correlation around pitch lag.
  533. *
  534. * @param p the context
  535. * @param offset offset of the excitation vector
  536. * @param ccr_max pointer to the maximum auto-correlation
  537. * @param pitch_lag decoded pitch lag
  538. * @param length length of autocorrelation
  539. * @param dir forward lag(1) / backward lag(-1)
  540. */
  541. static int autocorr_max(G723_1_Context *p, int offset, int *ccr_max,
  542. int pitch_lag, int length, int dir)
  543. {
  544. int limit, ccr, lag = 0;
  545. int16_t *buf = p->excitation + offset;
  546. int i;
  547. pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
  548. if (dir > 0)
  549. limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
  550. else
  551. limit = pitch_lag + 3;
  552. for (i = pitch_lag - 3; i <= limit; i++) {
  553. ccr = dot_product(buf, buf + dir * i, length, 1);
  554. if (ccr > *ccr_max) {
  555. *ccr_max = ccr;
  556. lag = i;
  557. }
  558. }
  559. return lag;
  560. }
  561. /**
  562. * Calculate pitch postfilter optimal and scaling gains.
  563. *
  564. * @param lag pitch postfilter forward/backward lag
  565. * @param ppf pitch postfilter parameters
  566. * @param cur_rate current bitrate
  567. * @param tgt_eng target energy
  568. * @param ccr cross-correlation
  569. * @param res_eng residual energy
  570. */
  571. static void comp_ppf_gains(int lag, PPFParam *ppf, enum Rate cur_rate,
  572. int tgt_eng, int ccr, int res_eng)
  573. {
  574. int pf_residual; /* square of postfiltered residual */
  575. int64_t temp1, temp2;
  576. ppf->index = lag;
  577. temp1 = tgt_eng * res_eng >> 1;
  578. temp2 = ccr * ccr << 1;
  579. if (temp2 > temp1) {
  580. if (ccr >= res_eng) {
  581. ppf->opt_gain = ppf_gain_weight[cur_rate];
  582. } else {
  583. ppf->opt_gain = (ccr << 15) / res_eng *
  584. ppf_gain_weight[cur_rate] >> 15;
  585. }
  586. /* pf_res^2 = tgt_eng + 2*ccr*gain + res_eng*gain^2 */
  587. temp1 = (tgt_eng << 15) + (ccr * ppf->opt_gain << 1);
  588. temp2 = (ppf->opt_gain * ppf->opt_gain >> 15) * res_eng;
  589. pf_residual = av_clipl_int32(temp1 + temp2 + (1 << 15)) >> 16;
  590. if (tgt_eng >= pf_residual << 1) {
  591. temp1 = 0x7fff;
  592. } else {
  593. temp1 = (tgt_eng << 14) / pf_residual;
  594. }
  595. /* scaling_gain = sqrt(tgt_eng/pf_res^2) */
  596. ppf->sc_gain = square_root(temp1 << 16);
  597. } else {
  598. ppf->opt_gain = 0;
  599. ppf->sc_gain = 0x7fff;
  600. }
  601. ppf->opt_gain = av_clip_int16(ppf->opt_gain * ppf->sc_gain >> 15);
  602. }
  603. /**
  604. * Calculate pitch postfilter parameters.
  605. *
  606. * @param p the context
  607. * @param offset offset of the excitation vector
  608. * @param pitch_lag decoded pitch lag
  609. * @param ppf pitch postfilter parameters
  610. * @param cur_rate current bitrate
  611. */
  612. static void comp_ppf_coeff(G723_1_Context *p, int offset, int pitch_lag,
  613. PPFParam *ppf, enum Rate cur_rate)
  614. {
  615. int16_t scale;
  616. int i;
  617. int64_t temp1, temp2;
  618. /*
  619. * 0 - target energy
  620. * 1 - forward cross-correlation
  621. * 2 - forward residual energy
  622. * 3 - backward cross-correlation
  623. * 4 - backward residual energy
  624. */
  625. int energy[5] = {0, 0, 0, 0, 0};
  626. int16_t *buf = p->excitation + offset;
  627. int fwd_lag = autocorr_max(p, offset, &energy[1], pitch_lag,
  628. SUBFRAME_LEN, 1);
  629. int back_lag = autocorr_max(p, offset, &energy[3], pitch_lag,
  630. SUBFRAME_LEN, -1);
  631. ppf->index = 0;
  632. ppf->opt_gain = 0;
  633. ppf->sc_gain = 0x7fff;
  634. /* Case 0, Section 3.6 */
  635. if (!back_lag && !fwd_lag)
  636. return;
  637. /* Compute target energy */
  638. energy[0] = dot_product(buf, buf, SUBFRAME_LEN, 1);
  639. /* Compute forward residual energy */
  640. if (fwd_lag)
  641. energy[2] = dot_product(buf + fwd_lag, buf + fwd_lag,
  642. SUBFRAME_LEN, 1);
  643. /* Compute backward residual energy */
  644. if (back_lag)
  645. energy[4] = dot_product(buf - back_lag, buf - back_lag,
  646. SUBFRAME_LEN, 1);
  647. /* Normalize and shorten */
  648. temp1 = 0;
  649. for (i = 0; i < 5; i++)
  650. temp1 = FFMAX(energy[i], temp1);
  651. scale = normalize_bits(temp1, 31);
  652. for (i = 0; i < 5; i++)
  653. energy[i] = (energy[i] << scale) >> 16;
  654. if (fwd_lag && !back_lag) { /* Case 1 */
  655. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  656. energy[2]);
  657. } else if (!fwd_lag) { /* Case 2 */
  658. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  659. energy[4]);
  660. } else { /* Case 3 */
  661. /*
  662. * Select the largest of energy[1]^2/energy[2]
  663. * and energy[3]^2/energy[4]
  664. */
  665. temp1 = energy[4] * ((energy[1] * energy[1] + (1 << 14)) >> 15);
  666. temp2 = energy[2] * ((energy[3] * energy[3] + (1 << 14)) >> 15);
  667. if (temp1 >= temp2) {
  668. comp_ppf_gains(fwd_lag, ppf, cur_rate, energy[0], energy[1],
  669. energy[2]);
  670. } else {
  671. comp_ppf_gains(-back_lag, ppf, cur_rate, energy[0], energy[3],
  672. energy[4]);
  673. }
  674. }
  675. }
  676. /**
  677. * Classify frames as voiced/unvoiced.
  678. *
  679. * @param p the context
  680. * @param pitch_lag decoded pitch_lag
  681. * @param exc_eng excitation energy estimation
  682. * @param scale scaling factor of exc_eng
  683. *
  684. * @return residual interpolation index if voiced, 0 otherwise
  685. */
  686. static int comp_interp_index(G723_1_Context *p, int pitch_lag,
  687. int *exc_eng, int *scale)
  688. {
  689. int offset = PITCH_MAX + 2 * SUBFRAME_LEN;
  690. int16_t *buf = p->excitation + offset;
  691. int index, ccr, tgt_eng, best_eng, temp;
  692. *scale = scale_vector(p->excitation, FRAME_LEN + PITCH_MAX);
  693. /* Compute maximum backward cross-correlation */
  694. ccr = 0;
  695. index = autocorr_max(p, offset, &ccr, pitch_lag, SUBFRAME_LEN * 2, -1);
  696. ccr = av_clipl_int32((int64_t)ccr + (1 << 15)) >> 16;
  697. /* Compute target energy */
  698. tgt_eng = dot_product(buf, buf, SUBFRAME_LEN * 2, 1);
  699. *exc_eng = av_clipl_int32((int64_t)tgt_eng + (1 << 15)) >> 16;
  700. if (ccr <= 0)
  701. return 0;
  702. /* Compute best energy */
  703. best_eng = dot_product(buf - index, buf - index,
  704. SUBFRAME_LEN * 2, 1);
  705. best_eng = av_clipl_int32((int64_t)best_eng + (1 << 15)) >> 16;
  706. temp = best_eng * *exc_eng >> 3;
  707. if (temp < ccr * ccr)
  708. return index;
  709. else
  710. return 0;
  711. }
  712. /**
  713. * Peform residual interpolation based on frame classification.
  714. *
  715. * @param buf decoded excitation vector
  716. * @param out output vector
  717. * @param lag decoded pitch lag
  718. * @param gain interpolated gain
  719. * @param rseed seed for random number generator
  720. */
  721. static void residual_interp(int16_t *buf, int16_t *out, int lag,
  722. int gain, int *rseed)
  723. {
  724. int i;
  725. if (lag) { /* Voiced */
  726. int16_t *vector_ptr = buf + PITCH_MAX;
  727. /* Attenuate */
  728. for (i = 0; i < lag; i++)
  729. vector_ptr[i - lag] = vector_ptr[i - lag] * 3 >> 2;
  730. av_memcpy_backptr((uint8_t*)vector_ptr, lag * sizeof(*vector_ptr),
  731. FRAME_LEN * sizeof(*vector_ptr));
  732. memcpy(out, vector_ptr, FRAME_LEN * sizeof(*vector_ptr));
  733. } else { /* Unvoiced */
  734. for (i = 0; i < FRAME_LEN; i++) {
  735. *rseed = *rseed * 521 + 259;
  736. out[i] = gain * *rseed >> 15;
  737. }
  738. memset(buf, 0, (FRAME_LEN + PITCH_MAX) * sizeof(*buf));
  739. }
  740. }
  741. /**
  742. * Perform IIR filtering.
  743. *
  744. * @param fir_coef FIR coefficients
  745. * @param iir_coef IIR coefficients
  746. * @param src source vector
  747. * @param dest destination vector
  748. */
  749. static inline void iir_filter(int16_t *fir_coef, int16_t *iir_coef,
  750. int16_t *src, int *dest)
  751. {
  752. int m, n;
  753. for (m = 0; m < SUBFRAME_LEN; m++) {
  754. int64_t filter = 0;
  755. for (n = 1; n <= LPC_ORDER; n++) {
  756. filter -= fir_coef[n - 1] * src[m - n] -
  757. iir_coef[n - 1] * (dest[m - n] >> 16);
  758. }
  759. dest[m] = av_clipl_int32((src[m] << 16) + (filter << 3) + (1 << 15));
  760. }
  761. }
  762. /**
  763. * Adjust gain of postfiltered signal.
  764. *
  765. * @param p the context
  766. * @param buf postfiltered output vector
  767. * @param energy input energy coefficient
  768. */
  769. static void gain_scale(G723_1_Context *p, int16_t * buf, int energy)
  770. {
  771. int num, denom, gain, bits1, bits2;
  772. int i;
  773. num = energy;
  774. denom = 0;
  775. for (i = 0; i < SUBFRAME_LEN; i++) {
  776. int64_t temp = buf[i] >> 2;
  777. temp = av_clipl_int32(MUL64(temp, temp) << 1);
  778. denom = av_clipl_int32(denom + temp);
  779. }
  780. if (num && denom) {
  781. bits1 = normalize_bits(num, 31);
  782. bits2 = normalize_bits(denom, 31);
  783. num = num << bits1 >> 1;
  784. denom <<= bits2;
  785. bits2 = 5 + bits1 - bits2;
  786. bits2 = FFMAX(0, bits2);
  787. gain = (num >> 1) / (denom >> 16);
  788. gain = square_root(gain << 16 >> bits2);
  789. } else {
  790. gain = 1 << 12;
  791. }
  792. for (i = 0; i < SUBFRAME_LEN; i++) {
  793. p->pf_gain = ((p->pf_gain << 4) - p->pf_gain + gain + (1 << 3)) >> 4;
  794. buf[i] = av_clip_int16((buf[i] * (p->pf_gain + (p->pf_gain >> 4)) +
  795. (1 << 10)) >> 11);
  796. }
  797. }
  798. /**
  799. * Perform formant filtering.
  800. *
  801. * @param p the context
  802. * @param lpc quantized lpc coefficients
  803. * @param buf output buffer
  804. */
  805. static void formant_postfilter(G723_1_Context *p, int16_t *lpc, int16_t *buf)
  806. {
  807. int16_t filter_coef[2][LPC_ORDER], *buf_ptr;
  808. int filter_signal[LPC_ORDER + FRAME_LEN], *signal_ptr;
  809. int i, j, k;
  810. memcpy(buf, p->fir_mem, LPC_ORDER * sizeof(*buf));
  811. memcpy(filter_signal, p->iir_mem, LPC_ORDER * sizeof(*filter_signal));
  812. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  813. for (k = 0; k < LPC_ORDER; k++) {
  814. filter_coef[0][k] = (-lpc[k] * postfilter_tbl[0][k] +
  815. (1 << 14)) >> 15;
  816. filter_coef[1][k] = (-lpc[k] * postfilter_tbl[1][k] +
  817. (1 << 14)) >> 15;
  818. }
  819. iir_filter(filter_coef[0], filter_coef[1], buf + i,
  820. filter_signal + i);
  821. lpc += LPC_ORDER;
  822. }
  823. memcpy(p->fir_mem, buf + FRAME_LEN, LPC_ORDER * sizeof(*p->fir_mem));
  824. memcpy(p->iir_mem, filter_signal + FRAME_LEN,
  825. LPC_ORDER * sizeof(*p->iir_mem));
  826. buf_ptr = buf + LPC_ORDER;
  827. signal_ptr = filter_signal + LPC_ORDER;
  828. for (i = 0; i < SUBFRAMES; i++) {
  829. int16_t temp_vector[SUBFRAME_LEN];
  830. int16_t temp;
  831. int auto_corr[2];
  832. int scale, energy;
  833. /* Normalize */
  834. memcpy(temp_vector, buf_ptr, SUBFRAME_LEN * sizeof(*temp_vector));
  835. scale = scale_vector(temp_vector, SUBFRAME_LEN);
  836. /* Compute auto correlation coefficients */
  837. auto_corr[0] = dot_product(temp_vector, temp_vector + 1,
  838. SUBFRAME_LEN - 1, 1);
  839. auto_corr[1] = dot_product(temp_vector, temp_vector, SUBFRAME_LEN, 1);
  840. /* Compute reflection coefficient */
  841. temp = auto_corr[1] >> 16;
  842. if (temp) {
  843. temp = (auto_corr[0] >> 2) / temp;
  844. }
  845. p->reflection_coef = ((p->reflection_coef << 2) - p->reflection_coef +
  846. temp + 2) >> 2;
  847. temp = (p->reflection_coef * 0xffffc >> 3) & 0xfffc;
  848. /* Compensation filter */
  849. for (j = 0; j < SUBFRAME_LEN; j++) {
  850. buf_ptr[j] = av_clipl_int32(signal_ptr[j] +
  851. ((signal_ptr[j - 1] >> 16) *
  852. temp << 1)) >> 16;
  853. }
  854. /* Compute normalized signal energy */
  855. temp = 2 * scale + 4;
  856. if (temp < 0) {
  857. energy = av_clipl_int32((int64_t)auto_corr[1] << -temp);
  858. } else
  859. energy = auto_corr[1] >> temp;
  860. gain_scale(p, buf_ptr, energy);
  861. buf_ptr += SUBFRAME_LEN;
  862. signal_ptr += SUBFRAME_LEN;
  863. }
  864. }
  865. static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
  866. int *got_frame_ptr, AVPacket *avpkt)
  867. {
  868. G723_1_Context *p = avctx->priv_data;
  869. const uint8_t *buf = avpkt->data;
  870. int buf_size = avpkt->size;
  871. int dec_mode = buf[0] & 3;
  872. PPFParam ppf[SUBFRAMES];
  873. int16_t cur_lsp[LPC_ORDER];
  874. int16_t lpc[SUBFRAMES * LPC_ORDER];
  875. int16_t acb_vector[SUBFRAME_LEN];
  876. int16_t *vector_ptr;
  877. int16_t *out;
  878. int bad_frame = 0, i, j, ret;
  879. if (buf_size < frame_size[dec_mode]) {
  880. if (buf_size)
  881. av_log(avctx, AV_LOG_WARNING,
  882. "Expected %d bytes, got %d - skipping packet\n",
  883. frame_size[dec_mode], buf_size);
  884. *got_frame_ptr = 0;
  885. return buf_size;
  886. }
  887. if (unpack_bitstream(p, buf, buf_size) < 0) {
  888. bad_frame = 1;
  889. if (p->past_frame_type == ACTIVE_FRAME)
  890. p->cur_frame_type = ACTIVE_FRAME;
  891. else
  892. p->cur_frame_type = UNTRANSMITTED_FRAME;
  893. }
  894. p->frame.nb_samples = FRAME_LEN;
  895. if ((ret = avctx->get_buffer(avctx, &p->frame)) < 0) {
  896. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  897. return ret;
  898. }
  899. out = (int16_t *)p->frame.data[0];
  900. if (p->cur_frame_type == ACTIVE_FRAME) {
  901. if (!bad_frame)
  902. p->erased_frames = 0;
  903. else if (p->erased_frames != 3)
  904. p->erased_frames++;
  905. inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, bad_frame);
  906. lsp_interpolate(lpc, cur_lsp, p->prev_lsp);
  907. /* Save the lsp_vector for the next frame */
  908. memcpy(p->prev_lsp, cur_lsp, LPC_ORDER * sizeof(*p->prev_lsp));
  909. /* Generate the excitation for the frame */
  910. memcpy(p->excitation, p->prev_excitation,
  911. PITCH_MAX * sizeof(*p->excitation));
  912. vector_ptr = p->excitation + PITCH_MAX;
  913. if (!p->erased_frames) {
  914. /* Update interpolation gain memory */
  915. p->interp_gain = fixed_cb_gain[(p->subframe[2].amp_index +
  916. p->subframe[3].amp_index) >> 1];
  917. for (i = 0; i < SUBFRAMES; i++) {
  918. gen_fcb_excitation(vector_ptr, p->subframe[i], p->cur_rate,
  919. p->pitch_lag[i >> 1], i);
  920. gen_acb_excitation(acb_vector, &p->excitation[SUBFRAME_LEN * i],
  921. p->pitch_lag[i >> 1], p->subframe[i],
  922. p->cur_rate);
  923. /* Get the total excitation */
  924. for (j = 0; j < SUBFRAME_LEN; j++) {
  925. vector_ptr[j] = av_clip_int16(vector_ptr[j] << 1);
  926. vector_ptr[j] = av_clip_int16(vector_ptr[j] +
  927. acb_vector[j]);
  928. }
  929. vector_ptr += SUBFRAME_LEN;
  930. }
  931. vector_ptr = p->excitation + PITCH_MAX;
  932. /* Save the excitation */
  933. memcpy(p->audio + LPC_ORDER, vector_ptr, FRAME_LEN * sizeof(*p->audio));
  934. p->interp_index = comp_interp_index(p, p->pitch_lag[1],
  935. &p->sid_gain, &p->cur_gain);
  936. if (p->postfilter) {
  937. i = PITCH_MAX;
  938. for (j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  939. comp_ppf_coeff(p, i, p->pitch_lag[j >> 1],
  940. ppf + j, p->cur_rate);
  941. }
  942. /* Restore the original excitation */
  943. memcpy(p->excitation, p->prev_excitation,
  944. PITCH_MAX * sizeof(*p->excitation));
  945. memcpy(vector_ptr, p->audio + LPC_ORDER, FRAME_LEN * sizeof(*vector_ptr));
  946. /* Peform pitch postfiltering */
  947. if (p->postfilter)
  948. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  949. ff_acelp_weighted_vector_sum(p->audio + LPC_ORDER + i,
  950. vector_ptr + i,
  951. vector_ptr + i + ppf[j].index,
  952. ppf[j].sc_gain,
  953. ppf[j].opt_gain,
  954. 1 << 14, 15, SUBFRAME_LEN);
  955. } else {
  956. p->interp_gain = (p->interp_gain * 3 + 2) >> 2;
  957. if (p->erased_frames == 3) {
  958. /* Mute output */
  959. memset(p->excitation, 0,
  960. (FRAME_LEN + PITCH_MAX) * sizeof(*p->excitation));
  961. memset(p->frame.data[0], 0,
  962. (FRAME_LEN + LPC_ORDER) * sizeof(int16_t));
  963. } else {
  964. /* Regenerate frame */
  965. residual_interp(p->excitation, p->audio + LPC_ORDER, p->interp_index,
  966. p->interp_gain, &p->random_seed);
  967. }
  968. }
  969. /* Save the excitation for the next frame */
  970. memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
  971. PITCH_MAX * sizeof(*p->excitation));
  972. } else {
  973. memset(out, 0, FRAME_LEN * 2);
  974. av_log(avctx, AV_LOG_WARNING,
  975. "G.723.1: Comfort noise generation not supported yet\n");
  976. *got_frame_ptr = 1;
  977. *(AVFrame *)data = p->frame;
  978. return frame_size[dec_mode];
  979. }
  980. p->past_frame_type = p->cur_frame_type;
  981. memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
  982. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  983. ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
  984. p->audio + i, SUBFRAME_LEN, LPC_ORDER,
  985. 0, 1, 1 << 12);
  986. memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
  987. if (p->postfilter) {
  988. formant_postfilter(p, lpc, p->audio);
  989. memcpy(p->frame.data[0], p->audio + LPC_ORDER, FRAME_LEN * 2);
  990. } else { // if output is not postfiltered it should be scaled by 2
  991. for (i = 0; i < FRAME_LEN; i++)
  992. out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
  993. }
  994. *got_frame_ptr = 1;
  995. *(AVFrame *)data = p->frame;
  996. return frame_size[dec_mode];
  997. }
  998. #define OFFSET(x) offsetof(G723_1_Context, x)
  999. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1000. static const AVOption options[] = {
  1001. { "postfilter", "postfilter on/off", OFFSET(postfilter), AV_OPT_TYPE_INT,
  1002. { 1 }, 0, 1, AD },
  1003. { NULL }
  1004. };
  1005. static const AVClass g723_1dec_class = {
  1006. .class_name = "G.723.1 decoder",
  1007. .item_name = av_default_item_name,
  1008. .option = options,
  1009. .version = LIBAVUTIL_VERSION_INT,
  1010. };
  1011. AVCodec ff_g723_1_decoder = {
  1012. .name = "g723_1",
  1013. .type = AVMEDIA_TYPE_AUDIO,
  1014. .id = AV_CODEC_ID_G723_1,
  1015. .priv_data_size = sizeof(G723_1_Context),
  1016. .init = g723_1_decode_init,
  1017. .decode = g723_1_decode_frame,
  1018. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  1019. .capabilities = CODEC_CAP_SUBFRAMES,
  1020. .priv_class = &g723_1dec_class,
  1021. };