You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1204 lines
39KB

  1. /*
  2. * G.723.1 compatible encoder
  3. * Copyright (c) Mohamed Naufal <naufal22@gmail.com>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * G.723.1 compatible encoder
  24. */
  25. #include <stdint.h>
  26. #include <string.h>
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/common.h"
  29. #include "libavutil/mem.h"
  30. #include "libavutil/opt.h"
  31. #include "avcodec.h"
  32. #include "celp_math.h"
  33. #include "g723_1.h"
  34. #include "internal.h"
  35. #define BITSTREAM_WRITER_LE
  36. #include "put_bits.h"
  37. static av_cold int g723_1_encode_init(AVCodecContext *avctx)
  38. {
  39. G723_1_Context *p = avctx->priv_data;
  40. if (avctx->sample_rate != 8000) {
  41. av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n");
  42. return AVERROR(EINVAL);
  43. }
  44. if (avctx->channels != 1) {
  45. av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
  46. return AVERROR(EINVAL);
  47. }
  48. if (avctx->bit_rate == 6300) {
  49. p->cur_rate = RATE_6300;
  50. } else if (avctx->bit_rate == 5300) {
  51. av_log(avctx, AV_LOG_ERROR, "Use bitrate 6300 instead of 5300.\n");
  52. avpriv_report_missing_feature(avctx, "Bitrate 5300");
  53. return AVERROR_PATCHWELCOME;
  54. } else {
  55. av_log(avctx, AV_LOG_ERROR, "Bitrate not supported, use 6300\n");
  56. return AVERROR(EINVAL);
  57. }
  58. avctx->frame_size = 240;
  59. memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(int16_t));
  60. return 0;
  61. }
  62. /**
  63. * Remove DC component from the input signal.
  64. *
  65. * @param buf input signal
  66. * @param fir zero memory
  67. * @param iir pole memory
  68. */
  69. static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
  70. {
  71. int i;
  72. for (i = 0; i < FRAME_LEN; i++) {
  73. *iir = (buf[i] << 15) + ((-*fir) << 15) + MULL2(*iir, 0x7f00);
  74. *fir = buf[i];
  75. buf[i] = av_clipl_int32((int64_t) *iir + (1 << 15)) >> 16;
  76. }
  77. }
  78. /**
  79. * Estimate autocorrelation of the input vector.
  80. *
  81. * @param buf input buffer
  82. * @param autocorr autocorrelation coefficients vector
  83. */
  84. static void comp_autocorr(int16_t *buf, int16_t *autocorr)
  85. {
  86. int i, scale, temp;
  87. int16_t vector[LPC_FRAME];
  88. ff_g723_1_scale_vector(vector, buf, LPC_FRAME);
  89. /* Apply the Hamming window */
  90. for (i = 0; i < LPC_FRAME; i++)
  91. vector[i] = (vector[i] * hamming_window[i] + (1 << 14)) >> 15;
  92. /* Compute the first autocorrelation coefficient */
  93. temp = ff_dot_product(vector, vector, LPC_FRAME);
  94. /* Apply a white noise correlation factor of (1025/1024) */
  95. temp += temp >> 10;
  96. /* Normalize */
  97. scale = ff_g723_1_normalize_bits(temp, 31);
  98. autocorr[0] = av_clipl_int32((int64_t) (temp << scale) +
  99. (1 << 15)) >> 16;
  100. /* Compute the remaining coefficients */
  101. if (!autocorr[0]) {
  102. memset(autocorr + 1, 0, LPC_ORDER * sizeof(int16_t));
  103. } else {
  104. for (i = 1; i <= LPC_ORDER; i++) {
  105. temp = ff_dot_product(vector, vector + i, LPC_FRAME - i);
  106. temp = MULL2((temp << scale), binomial_window[i - 1]);
  107. autocorr[i] = av_clipl_int32((int64_t) temp + (1 << 15)) >> 16;
  108. }
  109. }
  110. }
  111. /**
  112. * Use Levinson-Durbin recursion to compute LPC coefficients from
  113. * autocorrelation values.
  114. *
  115. * @param lpc LPC coefficients vector
  116. * @param autocorr autocorrelation coefficients vector
  117. * @param error prediction error
  118. */
  119. static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
  120. {
  121. int16_t vector[LPC_ORDER];
  122. int16_t partial_corr;
  123. int i, j, temp;
  124. memset(lpc, 0, LPC_ORDER * sizeof(int16_t));
  125. for (i = 0; i < LPC_ORDER; i++) {
  126. /* Compute the partial correlation coefficient */
  127. temp = 0;
  128. for (j = 0; j < i; j++)
  129. temp -= lpc[j] * autocorr[i - j - 1];
  130. temp = ((autocorr[i] << 13) + temp) << 3;
  131. if (FFABS(temp) >= (error << 16))
  132. break;
  133. partial_corr = temp / (error << 1);
  134. lpc[i] = av_clipl_int32((int64_t) (partial_corr << 14) +
  135. (1 << 15)) >> 16;
  136. /* Update the prediction error */
  137. temp = MULL2(temp, partial_corr);
  138. error = av_clipl_int32((int64_t) (error << 16) - temp +
  139. (1 << 15)) >> 16;
  140. memcpy(vector, lpc, i * sizeof(int16_t));
  141. for (j = 0; j < i; j++) {
  142. temp = partial_corr * vector[i - j - 1] << 1;
  143. lpc[j] = av_clipl_int32((int64_t) (lpc[j] << 16) - temp +
  144. (1 << 15)) >> 16;
  145. }
  146. }
  147. }
  148. /**
  149. * Calculate LPC coefficients for the current frame.
  150. *
  151. * @param buf current frame
  152. * @param prev_data 2 trailing subframes of the previous frame
  153. * @param lpc LPC coefficients vector
  154. */
  155. static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
  156. {
  157. int16_t autocorr[(LPC_ORDER + 1) * SUBFRAMES];
  158. int16_t *autocorr_ptr = autocorr;
  159. int16_t *lpc_ptr = lpc;
  160. int i, j;
  161. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  162. comp_autocorr(buf + i, autocorr_ptr);
  163. levinson_durbin(lpc_ptr, autocorr_ptr + 1, autocorr_ptr[0]);
  164. lpc_ptr += LPC_ORDER;
  165. autocorr_ptr += LPC_ORDER + 1;
  166. }
  167. }
  168. static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
  169. {
  170. int f[LPC_ORDER + 2]; ///< coefficients of the sum and difference
  171. ///< polynomials (F1, F2) ordered as
  172. ///< f1[0], f2[0], ...., f1[5], f2[5]
  173. int max, shift, cur_val, prev_val, count, p;
  174. int i, j;
  175. int64_t temp;
  176. /* Initialize f1[0] and f2[0] to 1 in Q25 */
  177. for (i = 0; i < LPC_ORDER; i++)
  178. lsp[i] = (lpc[i] * bandwidth_expand[i] + (1 << 14)) >> 15;
  179. /* Apply bandwidth expansion on the LPC coefficients */
  180. f[0] = f[1] = 1 << 25;
  181. /* Compute the remaining coefficients */
  182. for (i = 0; i < LPC_ORDER / 2; i++) {
  183. /* f1 */
  184. f[2 * i + 2] = -f[2 * i] - ((lsp[i] + lsp[LPC_ORDER - 1 - i]) << 12);
  185. /* f2 */
  186. f[2 * i + 3] = f[2 * i + 1] - ((lsp[i] - lsp[LPC_ORDER - 1 - i]) << 12);
  187. }
  188. /* Divide f1[5] and f2[5] by 2 for use in polynomial evaluation */
  189. f[LPC_ORDER] >>= 1;
  190. f[LPC_ORDER + 1] >>= 1;
  191. /* Normalize and shorten */
  192. max = FFABS(f[0]);
  193. for (i = 1; i < LPC_ORDER + 2; i++)
  194. max = FFMAX(max, FFABS(f[i]));
  195. shift = ff_g723_1_normalize_bits(max, 31);
  196. for (i = 0; i < LPC_ORDER + 2; i++)
  197. f[i] = av_clipl_int32((int64_t) (f[i] << shift) + (1 << 15)) >> 16;
  198. /**
  199. * Evaluate F1 and F2 at uniform intervals of pi/256 along the
  200. * unit circle and check for zero crossings.
  201. */
  202. p = 0;
  203. temp = 0;
  204. for (i = 0; i <= LPC_ORDER / 2; i++)
  205. temp += f[2 * i] * cos_tab[0];
  206. prev_val = av_clipl_int32(temp << 1);
  207. count = 0;
  208. for (i = 1; i < COS_TBL_SIZE / 2; i++) {
  209. /* Evaluate */
  210. temp = 0;
  211. for (j = 0; j <= LPC_ORDER / 2; j++)
  212. temp += f[LPC_ORDER - 2 * j + p] * cos_tab[i * j % COS_TBL_SIZE];
  213. cur_val = av_clipl_int32(temp << 1);
  214. /* Check for sign change, indicating a zero crossing */
  215. if ((cur_val ^ prev_val) < 0) {
  216. int abs_cur = FFABS(cur_val);
  217. int abs_prev = FFABS(prev_val);
  218. int sum = abs_cur + abs_prev;
  219. shift = ff_g723_1_normalize_bits(sum, 31);
  220. sum <<= shift;
  221. abs_prev = abs_prev << shift >> 8;
  222. lsp[count++] = ((i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
  223. if (count == LPC_ORDER)
  224. break;
  225. /* Switch between sum and difference polynomials */
  226. p ^= 1;
  227. /* Evaluate */
  228. temp = 0;
  229. for (j = 0; j <= LPC_ORDER / 2; j++)
  230. temp += f[LPC_ORDER - 2 * j + p] *
  231. cos_tab[i * j % COS_TBL_SIZE];
  232. cur_val = av_clipl_int32(temp << 1);
  233. }
  234. prev_val = cur_val;
  235. }
  236. if (count != LPC_ORDER)
  237. memcpy(lsp, prev_lsp, LPC_ORDER * sizeof(int16_t));
  238. }
  239. /**
  240. * Quantize the current LSP subvector.
  241. *
  242. * @param num band number
  243. * @param offset offset of the current subvector in an LPC_ORDER vector
  244. * @param size size of the current subvector
  245. */
  246. #define get_index(num, offset, size) \
  247. { \
  248. int error, max = -1; \
  249. int16_t temp[4]; \
  250. int i, j; \
  251. \
  252. for (i = 0; i < LSP_CB_SIZE; i++) { \
  253. for (j = 0; j < size; j++){ \
  254. temp[j] = (weight[j + (offset)] * lsp_band##num[i][j] + \
  255. (1 << 14)) >> 15; \
  256. } \
  257. error = ff_g723_1_dot_product(lsp + (offset), temp, size) << 1; \
  258. error -= ff_g723_1_dot_product(lsp_band##num[i], temp, size); \
  259. if (error > max) { \
  260. max = error; \
  261. lsp_index[num] = i; \
  262. } \
  263. } \
  264. }
  265. /**
  266. * Vector quantize the LSP frequencies.
  267. *
  268. * @param lsp the current lsp vector
  269. * @param prev_lsp the previous lsp vector
  270. */
  271. static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
  272. {
  273. int16_t weight[LPC_ORDER];
  274. int16_t min, max;
  275. int shift, i;
  276. /* Calculate the VQ weighting vector */
  277. weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
  278. weight[LPC_ORDER - 1] = (1 << 20) /
  279. (lsp[LPC_ORDER - 1] - lsp[LPC_ORDER - 2]);
  280. for (i = 1; i < LPC_ORDER - 1; i++) {
  281. min = FFMIN(lsp[i] - lsp[i - 1], lsp[i + 1] - lsp[i]);
  282. if (min > 0x20)
  283. weight[i] = (1 << 20) / min;
  284. else
  285. weight[i] = INT16_MAX;
  286. }
  287. /* Normalize */
  288. max = 0;
  289. for (i = 0; i < LPC_ORDER; i++)
  290. max = FFMAX(weight[i], max);
  291. shift = ff_g723_1_normalize_bits(max, 15);
  292. for (i = 0; i < LPC_ORDER; i++) {
  293. weight[i] <<= shift;
  294. }
  295. /* Compute the VQ target vector */
  296. for (i = 0; i < LPC_ORDER; i++) {
  297. lsp[i] -= dc_lsp[i] +
  298. (((prev_lsp[i] - dc_lsp[i]) * 12288 + (1 << 14)) >> 15);
  299. }
  300. get_index(0, 0, 3);
  301. get_index(1, 3, 3);
  302. get_index(2, 6, 4);
  303. }
  304. /**
  305. * Perform IIR filtering.
  306. *
  307. * @param fir_coef FIR coefficients
  308. * @param iir_coef IIR coefficients
  309. * @param src source vector
  310. * @param dest destination vector
  311. */
  312. static void iir_filter(int16_t *fir_coef, int16_t *iir_coef,
  313. int16_t *src, int16_t *dest)
  314. {
  315. int m, n;
  316. for (m = 0; m < SUBFRAME_LEN; m++) {
  317. int64_t filter = 0;
  318. for (n = 1; n <= LPC_ORDER; n++) {
  319. filter -= fir_coef[n - 1] * src[m - n] -
  320. iir_coef[n - 1] * dest[m - n];
  321. }
  322. dest[m] = av_clipl_int32((src[m] << 16) + (filter << 3) +
  323. (1 << 15)) >> 16;
  324. }
  325. }
  326. /**
  327. * Apply the formant perceptual weighting filter.
  328. *
  329. * @param flt_coef filter coefficients
  330. * @param unq_lpc unquantized lpc vector
  331. */
  332. static void perceptual_filter(G723_1_Context *p, int16_t *flt_coef,
  333. int16_t *unq_lpc, int16_t *buf)
  334. {
  335. int16_t vector[FRAME_LEN + LPC_ORDER];
  336. int i, j, k, l = 0;
  337. memcpy(buf, p->iir_mem, sizeof(int16_t) * LPC_ORDER);
  338. memcpy(vector, p->fir_mem, sizeof(int16_t) * LPC_ORDER);
  339. memcpy(vector + LPC_ORDER, buf + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  340. for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
  341. for (k = 0; k < LPC_ORDER; k++) {
  342. flt_coef[k + 2 * l] = (unq_lpc[k + l] * percept_flt_tbl[0][k] +
  343. (1 << 14)) >> 15;
  344. flt_coef[k + 2 * l + LPC_ORDER] = (unq_lpc[k + l] *
  345. percept_flt_tbl[1][k] +
  346. (1 << 14)) >> 15;
  347. }
  348. iir_filter(flt_coef + 2 * l, flt_coef + 2 * l + LPC_ORDER,
  349. vector + i, buf + i);
  350. l += LPC_ORDER;
  351. }
  352. memcpy(p->iir_mem, buf + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  353. memcpy(p->fir_mem, vector + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  354. }
  355. /**
  356. * Estimate the open loop pitch period.
  357. *
  358. * @param buf perceptually weighted speech
  359. * @param start estimation is carried out from this position
  360. */
  361. static int estimate_pitch(int16_t *buf, int start)
  362. {
  363. int max_exp = 32;
  364. int max_ccr = 0x4000;
  365. int max_eng = 0x7fff;
  366. int index = PITCH_MIN;
  367. int offset = start - PITCH_MIN + 1;
  368. int ccr, eng, orig_eng, ccr_eng, exp;
  369. int diff, temp;
  370. int i;
  371. orig_eng = ff_dot_product(buf + offset, buf + offset, HALF_FRAME_LEN);
  372. for (i = PITCH_MIN; i <= PITCH_MAX - 3; i++) {
  373. offset--;
  374. /* Update energy and compute correlation */
  375. orig_eng += buf[offset] * buf[offset] -
  376. buf[offset + HALF_FRAME_LEN] * buf[offset + HALF_FRAME_LEN];
  377. ccr = ff_dot_product(buf + start, buf + offset, HALF_FRAME_LEN);
  378. if (ccr <= 0)
  379. continue;
  380. /* Split into mantissa and exponent to maintain precision */
  381. exp = ff_g723_1_normalize_bits(ccr, 31);
  382. ccr = av_clipl_int32((int64_t) (ccr << exp) + (1 << 15)) >> 16;
  383. exp <<= 1;
  384. ccr *= ccr;
  385. temp = ff_g723_1_normalize_bits(ccr, 31);
  386. ccr = ccr << temp >> 16;
  387. exp += temp;
  388. temp = ff_g723_1_normalize_bits(orig_eng, 31);
  389. eng = av_clipl_int32((int64_t) (orig_eng << temp) + (1 << 15)) >> 16;
  390. exp -= temp;
  391. if (ccr >= eng) {
  392. exp--;
  393. ccr >>= 1;
  394. }
  395. if (exp > max_exp)
  396. continue;
  397. if (exp + 1 < max_exp)
  398. goto update;
  399. /* Equalize exponents before comparison */
  400. if (exp + 1 == max_exp)
  401. temp = max_ccr >> 1;
  402. else
  403. temp = max_ccr;
  404. ccr_eng = ccr * max_eng;
  405. diff = ccr_eng - eng * temp;
  406. if (diff > 0 && (i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
  407. update:
  408. index = i;
  409. max_exp = exp;
  410. max_ccr = ccr;
  411. max_eng = eng;
  412. }
  413. }
  414. return index;
  415. }
  416. /**
  417. * Compute harmonic noise filter parameters.
  418. *
  419. * @param buf perceptually weighted speech
  420. * @param pitch_lag open loop pitch period
  421. * @param hf harmonic filter parameters
  422. */
  423. static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
  424. {
  425. int ccr, eng, max_ccr, max_eng;
  426. int exp, max, diff;
  427. int energy[15];
  428. int i, j;
  429. for (i = 0, j = pitch_lag - 3; j <= pitch_lag + 3; i++, j++) {
  430. /* Compute residual energy */
  431. energy[i << 1] = ff_dot_product(buf - j, buf - j, SUBFRAME_LEN);
  432. /* Compute correlation */
  433. energy[(i << 1) + 1] = ff_dot_product(buf, buf - j, SUBFRAME_LEN);
  434. }
  435. /* Compute target energy */
  436. energy[14] = ff_dot_product(buf, buf, SUBFRAME_LEN);
  437. /* Normalize */
  438. max = 0;
  439. for (i = 0; i < 15; i++)
  440. max = FFMAX(max, FFABS(energy[i]));
  441. exp = ff_g723_1_normalize_bits(max, 31);
  442. for (i = 0; i < 15; i++) {
  443. energy[i] = av_clipl_int32((int64_t)(energy[i] << exp) +
  444. (1 << 15)) >> 16;
  445. }
  446. hf->index = -1;
  447. hf->gain = 0;
  448. max_ccr = 1;
  449. max_eng = 0x7fff;
  450. for (i = 0; i <= 6; i++) {
  451. eng = energy[i << 1];
  452. ccr = energy[(i << 1) + 1];
  453. if (ccr <= 0)
  454. continue;
  455. ccr = (ccr * ccr + (1 << 14)) >> 15;
  456. diff = ccr * max_eng - eng * max_ccr;
  457. if (diff > 0) {
  458. max_ccr = ccr;
  459. max_eng = eng;
  460. hf->index = i;
  461. }
  462. }
  463. if (hf->index == -1) {
  464. hf->index = pitch_lag;
  465. return;
  466. }
  467. eng = energy[14] * max_eng;
  468. eng = (eng >> 2) + (eng >> 3);
  469. ccr = energy[(hf->index << 1) + 1] * energy[(hf->index << 1) + 1];
  470. if (eng < ccr) {
  471. eng = energy[(hf->index << 1) + 1];
  472. if (eng >= max_eng)
  473. hf->gain = 0x2800;
  474. else
  475. hf->gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
  476. }
  477. hf->index += pitch_lag - 3;
  478. }
  479. /**
  480. * Apply the harmonic noise shaping filter.
  481. *
  482. * @param hf filter parameters
  483. */
  484. static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
  485. {
  486. int i;
  487. for (i = 0; i < SUBFRAME_LEN; i++) {
  488. int64_t temp = hf->gain * src[i - hf->index] << 1;
  489. dest[i] = av_clipl_int32((src[i] << 16) - temp + (1 << 15)) >> 16;
  490. }
  491. }
  492. static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
  493. {
  494. int i;
  495. for (i = 0; i < SUBFRAME_LEN; i++) {
  496. int64_t temp = hf->gain * src[i - hf->index] << 1;
  497. dest[i] = av_clipl_int32(((dest[i] - src[i]) << 16) + temp +
  498. (1 << 15)) >> 16;
  499. }
  500. }
  501. /**
  502. * Combined synthesis and formant perceptual weighting filer.
  503. *
  504. * @param qnt_lpc quantized lpc coefficients
  505. * @param perf_lpc perceptual filter coefficients
  506. * @param perf_fir perceptual filter fir memory
  507. * @param perf_iir perceptual filter iir memory
  508. * @param scale the filter output will be scaled by 2^scale
  509. */
  510. static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc,
  511. int16_t *perf_fir, int16_t *perf_iir,
  512. const int16_t *src, int16_t *dest, int scale)
  513. {
  514. int i, j;
  515. int16_t buf_16[SUBFRAME_LEN + LPC_ORDER];
  516. int64_t buf[SUBFRAME_LEN];
  517. int16_t *bptr_16 = buf_16 + LPC_ORDER;
  518. memcpy(buf_16, perf_fir, sizeof(int16_t) * LPC_ORDER);
  519. memcpy(dest - LPC_ORDER, perf_iir, sizeof(int16_t) * LPC_ORDER);
  520. for (i = 0; i < SUBFRAME_LEN; i++) {
  521. int64_t temp = 0;
  522. for (j = 1; j <= LPC_ORDER; j++)
  523. temp -= qnt_lpc[j - 1] * bptr_16[i - j];
  524. buf[i] = (src[i] << 15) + (temp << 3);
  525. bptr_16[i] = av_clipl_int32(buf[i] + (1 << 15)) >> 16;
  526. }
  527. for (i = 0; i < SUBFRAME_LEN; i++) {
  528. int64_t fir = 0, iir = 0;
  529. for (j = 1; j <= LPC_ORDER; j++) {
  530. fir -= perf_lpc[j - 1] * bptr_16[i - j];
  531. iir += perf_lpc[j + LPC_ORDER - 1] * dest[i - j];
  532. }
  533. dest[i] = av_clipl_int32(((buf[i] + (fir << 3)) << scale) + (iir << 3) +
  534. (1 << 15)) >> 16;
  535. }
  536. memcpy(perf_fir, buf_16 + SUBFRAME_LEN, sizeof(int16_t) * LPC_ORDER);
  537. memcpy(perf_iir, dest + SUBFRAME_LEN - LPC_ORDER,
  538. sizeof(int16_t) * LPC_ORDER);
  539. }
  540. /**
  541. * Compute the adaptive codebook contribution.
  542. *
  543. * @param buf input signal
  544. * @param index the current subframe index
  545. */
  546. static void acb_search(G723_1_Context *p, int16_t *residual,
  547. int16_t *impulse_resp, const int16_t *buf,
  548. int index)
  549. {
  550. int16_t flt_buf[PITCH_ORDER][SUBFRAME_LEN];
  551. const int16_t *cb_tbl = adaptive_cb_gain85;
  552. int ccr_buf[PITCH_ORDER * SUBFRAMES << 2];
  553. int pitch_lag = p->pitch_lag[index >> 1];
  554. int acb_lag = 1;
  555. int acb_gain = 0;
  556. int odd_frame = index & 1;
  557. int iter = 3 + odd_frame;
  558. int count = 0;
  559. int tbl_size = 85;
  560. int i, j, k, l, max;
  561. int64_t temp;
  562. if (!odd_frame) {
  563. if (pitch_lag == PITCH_MIN)
  564. pitch_lag++;
  565. else
  566. pitch_lag = FFMIN(pitch_lag, PITCH_MAX - 5);
  567. }
  568. for (i = 0; i < iter; i++) {
  569. ff_g723_1_get_residual(residual, p->prev_excitation, pitch_lag + i - 1);
  570. for (j = 0; j < SUBFRAME_LEN; j++) {
  571. temp = 0;
  572. for (k = 0; k <= j; k++)
  573. temp += residual[PITCH_ORDER - 1 + k] * impulse_resp[j - k];
  574. flt_buf[PITCH_ORDER - 1][j] = av_clipl_int32((temp << 1) +
  575. (1 << 15)) >> 16;
  576. }
  577. for (j = PITCH_ORDER - 2; j >= 0; j--) {
  578. flt_buf[j][0] = ((residual[j] << 13) + (1 << 14)) >> 15;
  579. for (k = 1; k < SUBFRAME_LEN; k++) {
  580. temp = (flt_buf[j + 1][k - 1] << 15) +
  581. residual[j] * impulse_resp[k];
  582. flt_buf[j][k] = av_clipl_int32((temp << 1) + (1 << 15)) >> 16;
  583. }
  584. }
  585. /* Compute crosscorrelation with the signal */
  586. for (j = 0; j < PITCH_ORDER; j++) {
  587. temp = ff_dot_product(buf, flt_buf[j], SUBFRAME_LEN);
  588. ccr_buf[count++] = av_clipl_int32(temp << 1);
  589. }
  590. /* Compute energies */
  591. for (j = 0; j < PITCH_ORDER; j++) {
  592. ccr_buf[count++] = ff_g723_1_dot_product(flt_buf[j], flt_buf[j],
  593. SUBFRAME_LEN);
  594. }
  595. for (j = 1; j < PITCH_ORDER; j++) {
  596. for (k = 0; k < j; k++) {
  597. temp = ff_dot_product(flt_buf[j], flt_buf[k], SUBFRAME_LEN);
  598. ccr_buf[count++] = av_clipl_int32(temp << 2);
  599. }
  600. }
  601. }
  602. /* Normalize and shorten */
  603. max = 0;
  604. for (i = 0; i < 20 * iter; i++)
  605. max = FFMAX(max, FFABS(ccr_buf[i]));
  606. temp = ff_g723_1_normalize_bits(max, 31);
  607. for (i = 0; i < 20 * iter; i++)
  608. ccr_buf[i] = av_clipl_int32((int64_t) (ccr_buf[i] << temp) +
  609. (1 << 15)) >> 16;
  610. max = 0;
  611. for (i = 0; i < iter; i++) {
  612. /* Select quantization table */
  613. if (!odd_frame && pitch_lag + i - 1 >= SUBFRAME_LEN - 2 ||
  614. odd_frame && pitch_lag >= SUBFRAME_LEN - 2) {
  615. cb_tbl = adaptive_cb_gain170;
  616. tbl_size = 170;
  617. }
  618. for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
  619. temp = 0;
  620. for (l = 0; l < 20; l++)
  621. temp += ccr_buf[20 * i + l] * cb_tbl[k + l];
  622. temp = av_clipl_int32(temp);
  623. if (temp > max) {
  624. max = temp;
  625. acb_gain = j;
  626. acb_lag = i;
  627. }
  628. }
  629. }
  630. if (!odd_frame) {
  631. pitch_lag += acb_lag - 1;
  632. acb_lag = 1;
  633. }
  634. p->pitch_lag[index >> 1] = pitch_lag;
  635. p->subframe[index].ad_cb_lag = acb_lag;
  636. p->subframe[index].ad_cb_gain = acb_gain;
  637. }
  638. /**
  639. * Subtract the adaptive codebook contribution from the input
  640. * to obtain the residual.
  641. *
  642. * @param buf target vector
  643. */
  644. static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp,
  645. int16_t *buf)
  646. {
  647. int i, j;
  648. /* Subtract adaptive CB contribution to obtain the residual */
  649. for (i = 0; i < SUBFRAME_LEN; i++) {
  650. int64_t temp = buf[i] << 14;
  651. for (j = 0; j <= i; j++)
  652. temp -= residual[j] * impulse_resp[i - j];
  653. buf[i] = av_clipl_int32((temp << 2) + (1 << 15)) >> 16;
  654. }
  655. }
  656. /**
  657. * Quantize the residual signal using the fixed codebook (MP-MLQ).
  658. *
  659. * @param optim optimized fixed codebook parameters
  660. * @param buf excitation vector
  661. */
  662. static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp,
  663. int16_t *buf, int pulse_cnt, int pitch_lag)
  664. {
  665. FCBParam param;
  666. int16_t impulse_r[SUBFRAME_LEN];
  667. int16_t temp_corr[SUBFRAME_LEN];
  668. int16_t impulse_corr[SUBFRAME_LEN];
  669. int ccr1[SUBFRAME_LEN];
  670. int ccr2[SUBFRAME_LEN];
  671. int amp, err, max, max_amp_index, min, scale, i, j, k, l;
  672. int64_t temp;
  673. /* Update impulse response */
  674. memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN);
  675. param.dirac_train = 0;
  676. if (pitch_lag < SUBFRAME_LEN - 2) {
  677. param.dirac_train = 1;
  678. ff_g723_1_gen_dirac_train(impulse_r, pitch_lag);
  679. }
  680. for (i = 0; i < SUBFRAME_LEN; i++)
  681. temp_corr[i] = impulse_r[i] >> 1;
  682. /* Compute impulse response autocorrelation */
  683. temp = ff_g723_1_dot_product(temp_corr, temp_corr, SUBFRAME_LEN);
  684. scale = ff_g723_1_normalize_bits(temp, 31);
  685. impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
  686. for (i = 1; i < SUBFRAME_LEN; i++) {
  687. temp = ff_g723_1_dot_product(temp_corr + i, temp_corr,
  688. SUBFRAME_LEN - i);
  689. impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
  690. }
  691. /* Compute crosscorrelation of impulse response with residual signal */
  692. scale -= 4;
  693. for (i = 0; i < SUBFRAME_LEN; i++) {
  694. temp = ff_g723_1_dot_product(buf + i, impulse_r, SUBFRAME_LEN - i);
  695. if (scale < 0)
  696. ccr1[i] = temp >> -scale;
  697. else
  698. ccr1[i] = av_clipl_int32(temp << scale);
  699. }
  700. /* Search loop */
  701. for (i = 0; i < GRID_SIZE; i++) {
  702. /* Maximize the crosscorrelation */
  703. max = 0;
  704. for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) {
  705. temp = FFABS(ccr1[j]);
  706. if (temp >= max) {
  707. max = temp;
  708. param.pulse_pos[0] = j;
  709. }
  710. }
  711. /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */
  712. amp = max;
  713. min = 1 << 30;
  714. max_amp_index = GAIN_LEVELS - 2;
  715. for (j = max_amp_index; j >= 2; j--) {
  716. temp = av_clipl_int32((int64_t) fixed_cb_gain[j] *
  717. impulse_corr[0] << 1);
  718. temp = FFABS(temp - amp);
  719. if (temp < min) {
  720. min = temp;
  721. max_amp_index = j;
  722. }
  723. }
  724. max_amp_index--;
  725. /* Select additional gain values */
  726. for (j = 1; j < 5; j++) {
  727. for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) {
  728. temp_corr[k] = 0;
  729. ccr2[k] = ccr1[k];
  730. }
  731. param.amp_index = max_amp_index + j - 2;
  732. amp = fixed_cb_gain[param.amp_index];
  733. param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp;
  734. temp_corr[param.pulse_pos[0]] = 1;
  735. for (k = 1; k < pulse_cnt; k++) {
  736. max = INT_MIN;
  737. for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) {
  738. if (temp_corr[l])
  739. continue;
  740. temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])];
  741. temp = av_clipl_int32((int64_t) temp *
  742. param.pulse_sign[k - 1] << 1);
  743. ccr2[l] -= temp;
  744. temp = FFABS(ccr2[l]);
  745. if (temp > max) {
  746. max = temp;
  747. param.pulse_pos[k] = l;
  748. }
  749. }
  750. param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ?
  751. -amp : amp;
  752. temp_corr[param.pulse_pos[k]] = 1;
  753. }
  754. /* Create the error vector */
  755. memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN);
  756. for (k = 0; k < pulse_cnt; k++)
  757. temp_corr[param.pulse_pos[k]] = param.pulse_sign[k];
  758. for (k = SUBFRAME_LEN - 1; k >= 0; k--) {
  759. temp = 0;
  760. for (l = 0; l <= k; l++) {
  761. int prod = av_clipl_int32((int64_t) temp_corr[l] *
  762. impulse_r[k - l] << 1);
  763. temp = av_clipl_int32(temp + prod);
  764. }
  765. temp_corr[k] = temp << 2 >> 16;
  766. }
  767. /* Compute square of error */
  768. err = 0;
  769. for (k = 0; k < SUBFRAME_LEN; k++) {
  770. int64_t prod;
  771. prod = av_clipl_int32((int64_t) buf[k] * temp_corr[k] << 1);
  772. err = av_clipl_int32(err - prod);
  773. prod = av_clipl_int32((int64_t) temp_corr[k] * temp_corr[k]);
  774. err = av_clipl_int32(err + prod);
  775. }
  776. /* Minimize */
  777. if (err < optim->min_err) {
  778. optim->min_err = err;
  779. optim->grid_index = i;
  780. optim->amp_index = param.amp_index;
  781. optim->dirac_train = param.dirac_train;
  782. for (k = 0; k < pulse_cnt; k++) {
  783. optim->pulse_sign[k] = param.pulse_sign[k];
  784. optim->pulse_pos[k] = param.pulse_pos[k];
  785. }
  786. }
  787. }
  788. }
  789. }
  790. /**
  791. * Encode the pulse position and gain of the current subframe.
  792. *
  793. * @param optim optimized fixed CB parameters
  794. * @param buf excitation vector
  795. */
  796. static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim,
  797. int16_t *buf, int pulse_cnt)
  798. {
  799. int i, j;
  800. j = PULSE_MAX - pulse_cnt;
  801. subfrm->pulse_sign = 0;
  802. subfrm->pulse_pos = 0;
  803. for (i = 0; i < SUBFRAME_LEN >> 1; i++) {
  804. int val = buf[optim->grid_index + (i << 1)];
  805. if (!val) {
  806. subfrm->pulse_pos += combinatorial_table[j][i];
  807. } else {
  808. subfrm->pulse_sign <<= 1;
  809. if (val < 0)
  810. subfrm->pulse_sign++;
  811. j++;
  812. if (j == PULSE_MAX)
  813. break;
  814. }
  815. }
  816. subfrm->amp_index = optim->amp_index;
  817. subfrm->grid_index = optim->grid_index;
  818. subfrm->dirac_train = optim->dirac_train;
  819. }
  820. /**
  821. * Compute the fixed codebook excitation.
  822. *
  823. * @param buf target vector
  824. * @param impulse_resp impulse response of the combined filter
  825. */
  826. static void fcb_search(G723_1_Context *p, int16_t *impulse_resp,
  827. int16_t *buf, int index)
  828. {
  829. FCBParam optim;
  830. int pulse_cnt = pulses[index];
  831. int i;
  832. optim.min_err = 1 << 30;
  833. get_fcb_param(&optim, impulse_resp, buf, pulse_cnt, SUBFRAME_LEN);
  834. if (p->pitch_lag[index >> 1] < SUBFRAME_LEN - 2) {
  835. get_fcb_param(&optim, impulse_resp, buf, pulse_cnt,
  836. p->pitch_lag[index >> 1]);
  837. }
  838. /* Reconstruct the excitation */
  839. memset(buf, 0, sizeof(int16_t) * SUBFRAME_LEN);
  840. for (i = 0; i < pulse_cnt; i++)
  841. buf[optim.pulse_pos[i]] = optim.pulse_sign[i];
  842. pack_fcb_param(&p->subframe[index], &optim, buf, pulse_cnt);
  843. if (optim.dirac_train)
  844. ff_g723_1_gen_dirac_train(buf, p->pitch_lag[index >> 1]);
  845. }
  846. /**
  847. * Pack the frame parameters into output bitstream.
  848. *
  849. * @param frame output buffer
  850. * @param size size of the buffer
  851. */
  852. static int pack_bitstream(G723_1_Context *p, AVPacket *avpkt)
  853. {
  854. PutBitContext pb;
  855. int info_bits = 0;
  856. int i, temp;
  857. init_put_bits(&pb, avpkt->data, avpkt->size);
  858. put_bits(&pb, 2, info_bits);
  859. put_bits(&pb, 8, p->lsp_index[2]);
  860. put_bits(&pb, 8, p->lsp_index[1]);
  861. put_bits(&pb, 8, p->lsp_index[0]);
  862. put_bits(&pb, 7, p->pitch_lag[0] - PITCH_MIN);
  863. put_bits(&pb, 2, p->subframe[1].ad_cb_lag);
  864. put_bits(&pb, 7, p->pitch_lag[1] - PITCH_MIN);
  865. put_bits(&pb, 2, p->subframe[3].ad_cb_lag);
  866. /* Write 12 bit combined gain */
  867. for (i = 0; i < SUBFRAMES; i++) {
  868. temp = p->subframe[i].ad_cb_gain * GAIN_LEVELS +
  869. p->subframe[i].amp_index;
  870. if (p->cur_rate == RATE_6300)
  871. temp += p->subframe[i].dirac_train << 11;
  872. put_bits(&pb, 12, temp);
  873. }
  874. put_bits(&pb, 1, p->subframe[0].grid_index);
  875. put_bits(&pb, 1, p->subframe[1].grid_index);
  876. put_bits(&pb, 1, p->subframe[2].grid_index);
  877. put_bits(&pb, 1, p->subframe[3].grid_index);
  878. if (p->cur_rate == RATE_6300) {
  879. skip_put_bits(&pb, 1); /* reserved bit */
  880. /* Write 13 bit combined position index */
  881. temp = (p->subframe[0].pulse_pos >> 16) * 810 +
  882. (p->subframe[1].pulse_pos >> 14) * 90 +
  883. (p->subframe[2].pulse_pos >> 16) * 9 +
  884. (p->subframe[3].pulse_pos >> 14);
  885. put_bits(&pb, 13, temp);
  886. put_bits(&pb, 16, p->subframe[0].pulse_pos & 0xffff);
  887. put_bits(&pb, 14, p->subframe[1].pulse_pos & 0x3fff);
  888. put_bits(&pb, 16, p->subframe[2].pulse_pos & 0xffff);
  889. put_bits(&pb, 14, p->subframe[3].pulse_pos & 0x3fff);
  890. put_bits(&pb, 6, p->subframe[0].pulse_sign);
  891. put_bits(&pb, 5, p->subframe[1].pulse_sign);
  892. put_bits(&pb, 6, p->subframe[2].pulse_sign);
  893. put_bits(&pb, 5, p->subframe[3].pulse_sign);
  894. }
  895. flush_put_bits(&pb);
  896. return frame_size[info_bits];
  897. }
  898. static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  899. const AVFrame *frame, int *got_packet_ptr)
  900. {
  901. G723_1_Context *p = avctx->priv_data;
  902. int16_t unq_lpc[LPC_ORDER * SUBFRAMES];
  903. int16_t qnt_lpc[LPC_ORDER * SUBFRAMES];
  904. int16_t cur_lsp[LPC_ORDER];
  905. int16_t weighted_lpc[LPC_ORDER * SUBFRAMES << 1];
  906. int16_t vector[FRAME_LEN + PITCH_MAX];
  907. int offset, ret, i, j;
  908. int16_t *in, *start;
  909. HFParam hf[4];
  910. /* duplicate input */
  911. start = in = av_malloc(frame->nb_samples * sizeof(int16_t));
  912. if (!in)
  913. return AVERROR(ENOMEM);
  914. memcpy(in, frame->data[0], frame->nb_samples * sizeof(int16_t));
  915. highpass_filter(in, &p->hpf_fir_mem, &p->hpf_iir_mem);
  916. memcpy(vector, p->prev_data, HALF_FRAME_LEN * sizeof(int16_t));
  917. memcpy(vector + HALF_FRAME_LEN, in, FRAME_LEN * sizeof(int16_t));
  918. comp_lpc_coeff(vector, unq_lpc);
  919. lpc2lsp(&unq_lpc[LPC_ORDER * 3], p->prev_lsp, cur_lsp);
  920. lsp_quantize(p->lsp_index, cur_lsp, p->prev_lsp);
  921. /* Update memory */
  922. memcpy(vector + LPC_ORDER, p->prev_data + SUBFRAME_LEN,
  923. sizeof(int16_t) * SUBFRAME_LEN);
  924. memcpy(vector + LPC_ORDER + SUBFRAME_LEN, in,
  925. sizeof(int16_t) * (HALF_FRAME_LEN + SUBFRAME_LEN));
  926. memcpy(p->prev_data, in + HALF_FRAME_LEN,
  927. sizeof(int16_t) * HALF_FRAME_LEN);
  928. memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  929. perceptual_filter(p, weighted_lpc, unq_lpc, vector);
  930. memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
  931. memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
  932. memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
  933. ff_g723_1_scale_vector(vector, vector, FRAME_LEN + PITCH_MAX);
  934. p->pitch_lag[0] = estimate_pitch(vector, PITCH_MAX);
  935. p->pitch_lag[1] = estimate_pitch(vector, PITCH_MAX + HALF_FRAME_LEN);
  936. for (i = PITCH_MAX, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  937. comp_harmonic_coeff(vector + i, p->pitch_lag[j >> 1], hf + j);
  938. memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
  939. memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
  940. memcpy(p->prev_weight_sig, vector + FRAME_LEN, sizeof(int16_t) * PITCH_MAX);
  941. for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
  942. harmonic_filter(hf + j, vector + PITCH_MAX + i, in + i);
  943. ff_g723_1_inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, 0);
  944. ff_g723_1_lsp_interpolate(qnt_lpc, cur_lsp, p->prev_lsp);
  945. memcpy(p->prev_lsp, cur_lsp, sizeof(int16_t) * LPC_ORDER);
  946. offset = 0;
  947. for (i = 0; i < SUBFRAMES; i++) {
  948. int16_t impulse_resp[SUBFRAME_LEN];
  949. int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
  950. int16_t flt_in[SUBFRAME_LEN];
  951. int16_t zero[LPC_ORDER], fir[LPC_ORDER], iir[LPC_ORDER];
  952. /**
  953. * Compute the combined impulse response of the synthesis filter,
  954. * formant perceptual weighting filter and harmonic noise shaping filter
  955. */
  956. memset(zero, 0, sizeof(int16_t) * LPC_ORDER);
  957. memset(vector, 0, sizeof(int16_t) * PITCH_MAX);
  958. memset(flt_in, 0, sizeof(int16_t) * SUBFRAME_LEN);
  959. flt_in[0] = 1 << 13; /* Unit impulse */
  960. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  961. zero, zero, flt_in, vector + PITCH_MAX, 1);
  962. harmonic_filter(hf + i, vector + PITCH_MAX, impulse_resp);
  963. /* Compute the combined zero input response */
  964. flt_in[0] = 0;
  965. memcpy(fir, p->perf_fir_mem, sizeof(int16_t) * LPC_ORDER);
  966. memcpy(iir, p->perf_iir_mem, sizeof(int16_t) * LPC_ORDER);
  967. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  968. fir, iir, flt_in, vector + PITCH_MAX, 0);
  969. memcpy(vector, p->harmonic_mem, sizeof(int16_t) * PITCH_MAX);
  970. harmonic_noise_sub(hf + i, vector + PITCH_MAX, in);
  971. acb_search(p, residual, impulse_resp, in, i);
  972. ff_g723_1_gen_acb_excitation(residual, p->prev_excitation,
  973. p->pitch_lag[i >> 1], &p->subframe[i],
  974. RATE_6300);
  975. sub_acb_contrib(residual, impulse_resp, in);
  976. fcb_search(p, impulse_resp, in, i);
  977. /* Reconstruct the excitation */
  978. ff_g723_1_gen_acb_excitation(impulse_resp, p->prev_excitation,
  979. p->pitch_lag[i >> 1], &p->subframe[i],
  980. RATE_6300);
  981. memmove(p->prev_excitation, p->prev_excitation + SUBFRAME_LEN,
  982. sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
  983. for (j = 0; j < SUBFRAME_LEN; j++)
  984. in[j] = av_clip_int16((in[j] << 1) + impulse_resp[j]);
  985. memcpy(p->prev_excitation + PITCH_MAX - SUBFRAME_LEN, in,
  986. sizeof(int16_t) * SUBFRAME_LEN);
  987. /* Update filter memories */
  988. synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
  989. p->perf_fir_mem, p->perf_iir_mem,
  990. in, vector + PITCH_MAX, 0);
  991. memmove(p->harmonic_mem, p->harmonic_mem + SUBFRAME_LEN,
  992. sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
  993. memcpy(p->harmonic_mem + PITCH_MAX - SUBFRAME_LEN, vector + PITCH_MAX,
  994. sizeof(int16_t) * SUBFRAME_LEN);
  995. in += SUBFRAME_LEN;
  996. offset += LPC_ORDER;
  997. }
  998. av_free(start);
  999. ret = ff_alloc_packet(avpkt, 24);
  1000. if (ret < 0)
  1001. return ret;
  1002. *got_packet_ptr = 1;
  1003. return pack_bitstream(p, avpkt);
  1004. }
  1005. AVCodec ff_g723_1_encoder = {
  1006. .name = "g723_1",
  1007. .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
  1008. .type = AVMEDIA_TYPE_AUDIO,
  1009. .id = AV_CODEC_ID_G723_1,
  1010. .priv_data_size = sizeof(G723_1_Context),
  1011. .init = g723_1_encode_init,
  1012. .encode2 = g723_1_encode_frame,
  1013. .sample_fmts = (const enum AVSampleFormat[]) {
  1014. AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
  1015. },
  1016. };