You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

709 lines
25KB

  1. /*
  2. * G.729, G729 Annex D decoders
  3. * Copyright (c) 2008 Vladimir Voroshilov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdlib.h>
  22. #include <inttypes.h>
  23. #include <limits.h>
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include <math.h>
  27. #include <assert.h>
  28. #include "avcodec.h"
  29. #include "libavutil/avutil.h"
  30. #include "get_bits.h"
  31. #include "dsputil.h"
  32. #include "g729.h"
  33. #include "lsp.h"
  34. #include "celp_math.h"
  35. #include "celp_filters.h"
  36. #include "acelp_filters.h"
  37. #include "acelp_pitch_delay.h"
  38. #include "acelp_vectors.h"
  39. #include "g729data.h"
  40. #include "g729postfilter.h"
  41. /**
  42. * minimum quantized LSF value (3.2.4)
  43. * 0.005 in Q13
  44. */
  45. #define LSFQ_MIN 40
  46. /**
  47. * maximum quantized LSF value (3.2.4)
  48. * 3.135 in Q13
  49. */
  50. #define LSFQ_MAX 25681
  51. /**
  52. * minimum LSF distance (3.2.4)
  53. * 0.0391 in Q13
  54. */
  55. #define LSFQ_DIFF_MIN 321
  56. /// interpolation filter length
  57. #define INTERPOL_LEN 11
  58. /**
  59. * minimum gain pitch value (3.8, Equation 47)
  60. * 0.2 in (1.14)
  61. */
  62. #define SHARP_MIN 3277
  63. /**
  64. * maximum gain pitch value (3.8, Equation 47)
  65. * (EE) This does not comply with the specification.
  66. * Specification says about 0.8, which should be
  67. * 13107 in (1.14), but reference C code uses
  68. * 13017 (equals to 0.7945) instead of it.
  69. */
  70. #define SHARP_MAX 13017
  71. /**
  72. * MR_ENERGY (mean removed energy) = mean_energy + 10 * log10(2^26 * subframe_size) in (7.13)
  73. */
  74. #define MR_ENERGY 1018156
  75. #define DECISION_NOISE 0
  76. #define DECISION_INTERMEDIATE 1
  77. #define DECISION_VOICE 2
  78. typedef enum {
  79. FORMAT_G729_8K = 0,
  80. FORMAT_G729D_6K4,
  81. FORMAT_COUNT,
  82. } G729Formats;
  83. typedef struct {
  84. uint8_t ac_index_bits[2]; ///< adaptive codebook index for second subframe (size in bits)
  85. uint8_t parity_bit; ///< parity bit for pitch delay
  86. uint8_t gc_1st_index_bits; ///< gain codebook (first stage) index (size in bits)
  87. uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits)
  88. uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector
  89. uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry
  90. } G729FormatDescription;
  91. typedef struct {
  92. DSPContext dsp;
  93. /// past excitation signal buffer
  94. int16_t exc_base[2*SUBFRAME_SIZE+PITCH_DELAY_MAX+INTERPOL_LEN];
  95. int16_t* exc; ///< start of past excitation data in buffer
  96. int pitch_delay_int_prev; ///< integer part of previous subframe's pitch delay (4.1.3)
  97. /// (2.13) LSP quantizer outputs
  98. int16_t past_quantizer_output_buf[MA_NP + 1][10];
  99. int16_t* past_quantizer_outputs[MA_NP + 1];
  100. int16_t lsfq[10]; ///< (2.13) quantized LSF coefficients from previous frame
  101. int16_t lsp_buf[2][10]; ///< (0.15) LSP coefficients (previous and current frames) (3.2.5)
  102. int16_t *lsp[2]; ///< pointers to lsp_buf
  103. int16_t quant_energy[4]; ///< (5.10) past quantized energy
  104. /// previous speech data for LP synthesis filter
  105. int16_t syn_filter_data[10];
  106. /// residual signal buffer (used in long-term postfilter)
  107. int16_t residual[SUBFRAME_SIZE + RES_PREV_DATA_SIZE];
  108. /// previous speech data for residual calculation filter
  109. int16_t res_filter_data[SUBFRAME_SIZE+10];
  110. /// previous speech data for short-term postfilter
  111. int16_t pos_filter_data[SUBFRAME_SIZE+10];
  112. /// (1.14) pitch gain of current and five previous subframes
  113. int16_t past_gain_pitch[6];
  114. /// (14.1) gain code from current and previous subframe
  115. int16_t past_gain_code[2];
  116. /// voice decision on previous subframe (0-noise, 1-intermediate, 2-voice), G.729D
  117. int16_t voice_decision;
  118. int16_t onset; ///< detected onset level (0-2)
  119. int16_t was_periodic; ///< whether previous frame was declared as periodic or not (4.4)
  120. int16_t ht_prev_data; ///< previous data for 4.2.3, equation 86
  121. int gain_coeff; ///< (1.14) gain coefficient (4.2.4)
  122. uint16_t rand_value; ///< random number generator value (4.4.4)
  123. int ma_predictor_prev; ///< switched MA predictor of LSP quantizer from last good frame
  124. /// (14.14) high-pass filter data (past input)
  125. int hpf_f[2];
  126. /// high-pass filter data (past output)
  127. int16_t hpf_z[2];
  128. } G729Context;
  129. static const G729FormatDescription format_g729_8k = {
  130. .ac_index_bits = {8,5},
  131. .parity_bit = 1,
  132. .gc_1st_index_bits = GC_1ST_IDX_BITS_8K,
  133. .gc_2nd_index_bits = GC_2ND_IDX_BITS_8K,
  134. .fc_signs_bits = 4,
  135. .fc_indexes_bits = 13,
  136. };
  137. static const G729FormatDescription format_g729d_6k4 = {
  138. .ac_index_bits = {8,4},
  139. .parity_bit = 0,
  140. .gc_1st_index_bits = GC_1ST_IDX_BITS_6K4,
  141. .gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4,
  142. .fc_signs_bits = 2,
  143. .fc_indexes_bits = 9,
  144. };
  145. /**
  146. * @brief pseudo random number generator
  147. */
  148. static inline uint16_t g729_prng(uint16_t value)
  149. {
  150. return 31821 * value + 13849;
  151. }
  152. /**
  153. * Get parity bit of bit 2..7
  154. */
  155. static inline int get_parity(uint8_t value)
  156. {
  157. return (0x6996966996696996ULL >> (value >> 2)) & 1;
  158. }
  159. /*
  160. * Decodes LSF (Line Spectral Frequencies) from L0-L3 (3.2.4).
  161. * @param lsfq [out] (2.13) quantized LSF coefficients
  162. * @param past_quantizer_outputs [in/out] (2.13) quantizer outputs from previous frames
  163. * @param ma_predictor switched MA predictor of LSP quantizer
  164. * @param vq_1st first stage vector of quantizer
  165. * @param vq_2nd_low second stage lower vector of LSP quantizer
  166. * @param vq_2nd_high second stage higher vector of LSP quantizer
  167. */
  168. static void lsf_decode(int16_t* lsfq, int16_t* past_quantizer_outputs[MA_NP + 1],
  169. int16_t ma_predictor,
  170. int16_t vq_1st, int16_t vq_2nd_low, int16_t vq_2nd_high)
  171. {
  172. int i,j;
  173. static const uint8_t min_distance[2]={10, 5}; //(2.13)
  174. int16_t* quantizer_output = past_quantizer_outputs[MA_NP];
  175. for (i = 0; i < 5; i++) {
  176. quantizer_output[i] = cb_lsp_1st[vq_1st][i ] + cb_lsp_2nd[vq_2nd_low ][i ];
  177. quantizer_output[i + 5] = cb_lsp_1st[vq_1st][i + 5] + cb_lsp_2nd[vq_2nd_high][i + 5];
  178. }
  179. for (j = 0; j < 2; j++) {
  180. for (i = 1; i < 10; i++) {
  181. int diff = (quantizer_output[i - 1] - quantizer_output[i] + min_distance[j]) >> 1;
  182. if (diff > 0) {
  183. quantizer_output[i - 1] -= diff;
  184. quantizer_output[i ] += diff;
  185. }
  186. }
  187. }
  188. for (i = 0; i < 10; i++) {
  189. int sum = quantizer_output[i] * cb_ma_predictor_sum[ma_predictor][i];
  190. for (j = 0; j < MA_NP; j++)
  191. sum += past_quantizer_outputs[j][i] * cb_ma_predictor[ma_predictor][j][i];
  192. lsfq[i] = sum >> 15;
  193. }
  194. ff_acelp_reorder_lsf(lsfq, LSFQ_DIFF_MIN, LSFQ_MIN, LSFQ_MAX, 10);
  195. }
  196. /**
  197. * Restores past LSP quantizer output using LSF from previous frame
  198. * @param lsfq [in/out] (2.13) quantized LSF coefficients
  199. * @param past_quantizer_outputs [in/out] (2.13) quantizer outputs from previous frames
  200. * @param ma_predictor_prev MA predictor from previous frame
  201. * @param lsfq_prev (2.13) quantized LSF coefficients from previous frame
  202. */
  203. static void lsf_restore_from_previous(int16_t* lsfq,
  204. int16_t* past_quantizer_outputs[MA_NP + 1],
  205. int ma_predictor_prev)
  206. {
  207. int16_t* quantizer_output = past_quantizer_outputs[MA_NP];
  208. int i,k;
  209. for (i = 0; i < 10; i++) {
  210. int tmp = lsfq[i] << 15;
  211. for (k = 0; k < MA_NP; k++)
  212. tmp -= past_quantizer_outputs[k][i] * cb_ma_predictor[ma_predictor_prev][k][i];
  213. quantizer_output[i] = ((tmp >> 15) * cb_ma_predictor_sum_inv[ma_predictor_prev][i]) >> 12;
  214. }
  215. }
  216. /**
  217. * Constructs new excitation signal and applies phase filter to it
  218. * @param out[out] constructed speech signal
  219. * @param in original excitation signal
  220. * @param fc_cur (2.13) original fixed-codebook vector
  221. * @param gain_code (14.1) gain code
  222. * @param subframe_size length of the subframe
  223. */
  224. static void g729d_get_new_exc(
  225. int16_t* out,
  226. const int16_t* in,
  227. const int16_t* fc_cur,
  228. int dstate,
  229. int gain_code,
  230. int subframe_size)
  231. {
  232. int i;
  233. int16_t fc_new[SUBFRAME_SIZE];
  234. ff_celp_convolve_circ(fc_new, fc_cur, phase_filter[dstate], subframe_size);
  235. for(i=0; i<subframe_size; i++)
  236. {
  237. out[i] = in[i];
  238. out[i] -= (gain_code * fc_cur[i] + 0x2000) >> 14;
  239. out[i] += (gain_code * fc_new[i] + 0x2000) >> 14;
  240. }
  241. }
  242. /**
  243. * Makes decision about onset in current subframe
  244. * @param past_onset decision result of previous subframe
  245. * @param past_gain_code gain code of current and previous subframe
  246. *
  247. * @return onset decision result for current subframe
  248. */
  249. static int g729d_onset_decision(int past_onset, const int16_t* past_gain_code)
  250. {
  251. if((past_gain_code[0] >> 1) > past_gain_code[1])
  252. return 2;
  253. else
  254. return FFMAX(past_onset-1, 0);
  255. }
  256. /**
  257. * Makes decision about voice presence in current subframe
  258. * @param onset onset level
  259. * @param prev_voice_decision voice decision result from previous subframe
  260. * @param past_gain_pitch pitch gain of current and previous subframes
  261. *
  262. * @return voice decision result for current subframe
  263. */
  264. static int16_t g729d_voice_decision(int onset, int prev_voice_decision, const int16_t* past_gain_pitch)
  265. {
  266. int i, low_gain_pitch_cnt, voice_decision;
  267. if(past_gain_pitch[0] >= 14745) // 0.9
  268. voice_decision = DECISION_VOICE;
  269. else if (past_gain_pitch[0] <= 9830) // 0.6
  270. voice_decision = DECISION_NOISE;
  271. else
  272. voice_decision = DECISION_INTERMEDIATE;
  273. for(i=0, low_gain_pitch_cnt=0; i<6; i++)
  274. if(past_gain_pitch[i] < 9830)
  275. low_gain_pitch_cnt++;
  276. if(low_gain_pitch_cnt > 2 && !onset)
  277. voice_decision = DECISION_NOISE;
  278. if(!onset && voice_decision > prev_voice_decision + 1)
  279. voice_decision--;
  280. if(onset && voice_decision < DECISION_VOICE)
  281. voice_decision++;
  282. return voice_decision;
  283. }
  284. static av_cold int decoder_init(AVCodecContext * avctx)
  285. {
  286. G729Context* ctx = avctx->priv_data;
  287. int i,k;
  288. if (avctx->channels != 1) {
  289. av_log(avctx, AV_LOG_ERROR, "Only mono sound is supported (requested channels: %d).\n", avctx->channels);
  290. return AVERROR(EINVAL);
  291. }
  292. /* Both 8kbit/s and 6.4kbit/s modes uses two subframes per frame. */
  293. avctx->frame_size = SUBFRAME_SIZE << 1;
  294. ctx->gain_coeff = 16384; // 1.0 in (1.14)
  295. for (k = 0; k < MA_NP + 1; k++) {
  296. ctx->past_quantizer_outputs[k] = ctx->past_quantizer_output_buf[k];
  297. for (i = 1; i < 11; i++)
  298. ctx->past_quantizer_outputs[k][i - 1] = (18717 * i) >> 3;
  299. }
  300. ctx->lsp[0] = ctx->lsp_buf[0];
  301. ctx->lsp[1] = ctx->lsp_buf[1];
  302. memcpy(ctx->lsp[0], lsp_init, 10 * sizeof(int16_t));
  303. ctx->exc = &ctx->exc_base[PITCH_DELAY_MAX+INTERPOL_LEN];
  304. /* random seed initialization */
  305. ctx->rand_value = 21845;
  306. /* quantized prediction error */
  307. for(i=0; i<4; i++)
  308. ctx->quant_energy[i] = -14336; // -14 in (5.10)
  309. dsputil_init(&ctx->dsp, avctx);
  310. return 0;
  311. }
  312. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  313. AVPacket *avpkt)
  314. {
  315. const uint8_t *buf = avpkt->data;
  316. int buf_size = avpkt->size;
  317. int16_t *out_frame = data;
  318. GetBitContext gb;
  319. G729FormatDescription format;
  320. int frame_erasure = 0; ///< frame erasure detected during decoding
  321. int bad_pitch = 0; ///< parity check failed
  322. int i;
  323. int16_t *tmp;
  324. G729Formats packet_type;
  325. G729Context *ctx = avctx->priv_data;
  326. int16_t lp[2][11]; // (3.12)
  327. uint8_t ma_predictor; ///< switched MA predictor of LSP quantizer
  328. uint8_t quantizer_1st; ///< first stage vector of quantizer
  329. uint8_t quantizer_2nd_lo; ///< second stage lower vector of quantizer (size in bits)
  330. uint8_t quantizer_2nd_hi; ///< second stage higher vector of quantizer (size in bits)
  331. int pitch_delay_int[2]; // pitch delay, integer part
  332. int pitch_delay_3x; // pitch delay, multiplied by 3
  333. int16_t fc[SUBFRAME_SIZE]; // fixed-codebook vector
  334. int16_t synth[SUBFRAME_SIZE+10]; // fixed-codebook vector
  335. int j;
  336. int gain_before, gain_after;
  337. int is_periodic = 0; // whether one of the subframes is declared as periodic or not
  338. if (*data_size < SUBFRAME_SIZE << 2) {
  339. av_log(avctx, AV_LOG_ERROR, "Error processing packet: output buffer too small\n");
  340. return AVERROR(EIO);
  341. }
  342. if (buf_size == 10) {
  343. packet_type = FORMAT_G729_8K;
  344. format = format_g729_8k;
  345. //Reset voice decision
  346. ctx->onset = 0;
  347. ctx->voice_decision = DECISION_VOICE;
  348. av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s");
  349. } else if (buf_size == 8) {
  350. packet_type = FORMAT_G729D_6K4;
  351. format = format_g729d_6k4;
  352. av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s");
  353. } else {
  354. av_log(avctx, AV_LOG_ERROR, "Packet size %d is unknown.\n", buf_size);
  355. return AVERROR_INVALIDDATA;
  356. }
  357. for (i=0; i < buf_size; i++)
  358. frame_erasure |= buf[i];
  359. frame_erasure = !frame_erasure;
  360. init_get_bits(&gb, buf, buf_size);
  361. ma_predictor = get_bits(&gb, 1);
  362. quantizer_1st = get_bits(&gb, VQ_1ST_BITS);
  363. quantizer_2nd_lo = get_bits(&gb, VQ_2ND_BITS);
  364. quantizer_2nd_hi = get_bits(&gb, VQ_2ND_BITS);
  365. if(frame_erasure)
  366. lsf_restore_from_previous(ctx->lsfq, ctx->past_quantizer_outputs,
  367. ctx->ma_predictor_prev);
  368. else {
  369. lsf_decode(ctx->lsfq, ctx->past_quantizer_outputs,
  370. ma_predictor,
  371. quantizer_1st, quantizer_2nd_lo, quantizer_2nd_hi);
  372. ctx->ma_predictor_prev = ma_predictor;
  373. }
  374. tmp = ctx->past_quantizer_outputs[MA_NP];
  375. memmove(ctx->past_quantizer_outputs + 1, ctx->past_quantizer_outputs,
  376. MA_NP * sizeof(int16_t*));
  377. ctx->past_quantizer_outputs[0] = tmp;
  378. ff_acelp_lsf2lsp(ctx->lsp[1], ctx->lsfq, 10);
  379. ff_acelp_lp_decode(&lp[0][0], &lp[1][0], ctx->lsp[1], ctx->lsp[0], 10);
  380. FFSWAP(int16_t*, ctx->lsp[1], ctx->lsp[0]);
  381. for (i = 0; i < 2; i++) {
  382. int gain_corr_factor;
  383. uint8_t ac_index; ///< adaptive codebook index
  384. uint8_t pulses_signs; ///< fixed-codebook vector pulse signs
  385. int fc_indexes; ///< fixed-codebook indexes
  386. uint8_t gc_1st_index; ///< gain codebook (first stage) index
  387. uint8_t gc_2nd_index; ///< gain codebook (second stage) index
  388. ac_index = get_bits(&gb, format.ac_index_bits[i]);
  389. if(!i && format.parity_bit)
  390. bad_pitch = get_parity(ac_index) == get_bits1(&gb);
  391. fc_indexes = get_bits(&gb, format.fc_indexes_bits);
  392. pulses_signs = get_bits(&gb, format.fc_signs_bits);
  393. gc_1st_index = get_bits(&gb, format.gc_1st_index_bits);
  394. gc_2nd_index = get_bits(&gb, format.gc_2nd_index_bits);
  395. if (frame_erasure)
  396. pitch_delay_3x = 3 * ctx->pitch_delay_int_prev;
  397. else if(!i) {
  398. if (bad_pitch)
  399. pitch_delay_3x = 3 * ctx->pitch_delay_int_prev;
  400. else
  401. pitch_delay_3x = ff_acelp_decode_8bit_to_1st_delay3(ac_index);
  402. } else {
  403. int pitch_delay_min = av_clip(ctx->pitch_delay_int_prev - 5,
  404. PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9);
  405. if(packet_type == FORMAT_G729D_6K4)
  406. pitch_delay_3x = ff_acelp_decode_4bit_to_2nd_delay3(ac_index, pitch_delay_min);
  407. else
  408. pitch_delay_3x = ff_acelp_decode_5_6_bit_to_2nd_delay3(ac_index, pitch_delay_min);
  409. }
  410. /* Round pitch delay to nearest (used everywhere except ff_acelp_interpolate). */
  411. pitch_delay_int[i] = (pitch_delay_3x + 1) / 3;
  412. if (frame_erasure) {
  413. ctx->rand_value = g729_prng(ctx->rand_value);
  414. fc_indexes = ctx->rand_value & ((1 << format.fc_indexes_bits) - 1);
  415. ctx->rand_value = g729_prng(ctx->rand_value);
  416. pulses_signs = ctx->rand_value;
  417. }
  418. memset(fc, 0, sizeof(int16_t) * SUBFRAME_SIZE);
  419. switch (packet_type) {
  420. case FORMAT_G729_8K:
  421. ff_acelp_fc_pulse_per_track(fc, ff_fc_4pulses_8bits_tracks_13,
  422. ff_fc_4pulses_8bits_track_4,
  423. fc_indexes, pulses_signs, 3, 3);
  424. break;
  425. case FORMAT_G729D_6K4:
  426. ff_acelp_fc_pulse_per_track(fc, ff_fc_2pulses_9bits_track1_gray,
  427. ff_fc_2pulses_9bits_track2_gray,
  428. fc_indexes, pulses_signs, 1, 4);
  429. break;
  430. }
  431. /*
  432. This filter enhances harmonic components of the fixed-codebook vector to
  433. improve the quality of the reconstructed speech.
  434. / fc_v[i], i < pitch_delay
  435. fc_v[i] = <
  436. \ fc_v[i] + gain_pitch * fc_v[i-pitch_delay], i >= pitch_delay
  437. */
  438. ff_acelp_weighted_vector_sum(fc + pitch_delay_int[i],
  439. fc + pitch_delay_int[i],
  440. fc, 1 << 14,
  441. av_clip(ctx->past_gain_pitch[0], SHARP_MIN, SHARP_MAX),
  442. 0, 14,
  443. SUBFRAME_SIZE - pitch_delay_int[i]);
  444. memmove(ctx->past_gain_pitch+1, ctx->past_gain_pitch, 5 * sizeof(int16_t));
  445. ctx->past_gain_code[1] = ctx->past_gain_code[0];
  446. if (frame_erasure) {
  447. ctx->past_gain_pitch[0] = (29491 * ctx->past_gain_pitch[0]) >> 15; // 0.90 (0.15)
  448. ctx->past_gain_code[0] = ( 2007 * ctx->past_gain_code[0] ) >> 11; // 0.98 (0.11)
  449. gain_corr_factor = 0;
  450. } else {
  451. if (packet_type == FORMAT_G729D_6K4) {
  452. ctx->past_gain_pitch[0] = cb_gain_1st_6k4[gc_1st_index][0] +
  453. cb_gain_2nd_6k4[gc_2nd_index][0];
  454. gain_corr_factor = cb_gain_1st_6k4[gc_1st_index][1] +
  455. cb_gain_2nd_6k4[gc_2nd_index][1];
  456. /* Without check below overflow can occure in ff_acelp_update_past_gain.
  457. It is not issue for G.729, because gain_corr_factor in it's case is always
  458. greater than 1024, while in G.729D it can be even zero. */
  459. gain_corr_factor = FFMAX(gain_corr_factor, 1024);
  460. #ifndef G729_BITEXACT
  461. gain_corr_factor >>= 1;
  462. #endif
  463. } else {
  464. ctx->past_gain_pitch[0] = cb_gain_1st_8k[gc_1st_index][0] +
  465. cb_gain_2nd_8k[gc_2nd_index][0];
  466. gain_corr_factor = cb_gain_1st_8k[gc_1st_index][1] +
  467. cb_gain_2nd_8k[gc_2nd_index][1];
  468. }
  469. /* Decode the fixed-codebook gain. */
  470. ctx->past_gain_code[0] = ff_acelp_decode_gain_code(&ctx->dsp, gain_corr_factor,
  471. fc, MR_ENERGY,
  472. ctx->quant_energy,
  473. ma_prediction_coeff,
  474. SUBFRAME_SIZE, 4);
  475. #ifdef G729_BITEXACT
  476. /*
  477. This correction required to get bit-exact result with
  478. reference code, because gain_corr_factor in G.729D is
  479. two times larger than in original G.729.
  480. If bit-exact result is not issue then gain_corr_factor
  481. can be simpler devided by 2 before call to g729_get_gain_code
  482. instead of using correction below.
  483. */
  484. if (packet_type == FORMAT_G729D_6K4) {
  485. gain_corr_factor >>= 1;
  486. ctx->past_gain_code[0] >>= 1;
  487. }
  488. #endif
  489. }
  490. ff_acelp_update_past_gain(ctx->quant_energy, gain_corr_factor, 2, frame_erasure);
  491. /* Routine requires rounding to lowest. */
  492. ff_acelp_interpolate(ctx->exc + i * SUBFRAME_SIZE,
  493. ctx->exc + i * SUBFRAME_SIZE - pitch_delay_3x / 3,
  494. ff_acelp_interp_filter, 6,
  495. (pitch_delay_3x % 3) << 1,
  496. 10, SUBFRAME_SIZE);
  497. ff_acelp_weighted_vector_sum(ctx->exc + i * SUBFRAME_SIZE,
  498. ctx->exc + i * SUBFRAME_SIZE, fc,
  499. (!ctx->was_periodic && frame_erasure) ? 0 : ctx->past_gain_pitch[0],
  500. ( ctx->was_periodic && frame_erasure) ? 0 : ctx->past_gain_code[0],
  501. 1 << 13, 14, SUBFRAME_SIZE);
  502. memcpy(synth, ctx->syn_filter_data, 10 * sizeof(int16_t));
  503. if (ff_celp_lp_synthesis_filter(
  504. synth+10,
  505. &lp[i][1],
  506. ctx->exc + i * SUBFRAME_SIZE,
  507. SUBFRAME_SIZE,
  508. 10,
  509. 1,
  510. 0x800))
  511. /* Overflow occured, downscale excitation signal... */
  512. for (j = 0; j < 2 * SUBFRAME_SIZE + PITCH_DELAY_MAX + INTERPOL_LEN; j++)
  513. ctx->exc_base[j] >>= 2;
  514. /* ... and make synthesis again. */
  515. if (packet_type == FORMAT_G729D_6K4) {
  516. int16_t exc_new[SUBFRAME_SIZE];
  517. ctx->onset = g729d_onset_decision(ctx->onset, ctx->past_gain_code);
  518. ctx->voice_decision = g729d_voice_decision(ctx->onset, ctx->voice_decision, ctx->past_gain_pitch);
  519. g729d_get_new_exc(exc_new, ctx->exc + i * SUBFRAME_SIZE, fc, ctx->voice_decision, ctx->past_gain_code[0], SUBFRAME_SIZE);
  520. ff_celp_lp_synthesis_filter(
  521. synth+10,
  522. &lp[i][1],
  523. exc_new,
  524. SUBFRAME_SIZE,
  525. 10,
  526. 0,
  527. 0x800);
  528. } else {
  529. ff_celp_lp_synthesis_filter(
  530. synth+10,
  531. &lp[i][1],
  532. ctx->exc + i * SUBFRAME_SIZE,
  533. SUBFRAME_SIZE,
  534. 10,
  535. 0,
  536. 0x800);
  537. }
  538. /* Save data (without postfilter) for use in next subframe. */
  539. memcpy(ctx->syn_filter_data, synth+SUBFRAME_SIZE, 10 * sizeof(int16_t));
  540. /* Calculate gain of unfiltered signal for use in AGC. */
  541. gain_before = 0;
  542. for (j = 0; j < SUBFRAME_SIZE; j++)
  543. gain_before += FFABS(synth[j+10]);
  544. /* Call postfilter and also update voicing decision for use in next frame. */
  545. g729_postfilter(
  546. &ctx->dsp,
  547. &ctx->ht_prev_data,
  548. &is_periodic,
  549. &lp[i][0],
  550. pitch_delay_int[0],
  551. ctx->residual,
  552. ctx->res_filter_data,
  553. ctx->pos_filter_data,
  554. synth+10,
  555. SUBFRAME_SIZE);
  556. /* Calculate gain of filtered signal for use in AGC. */
  557. gain_after = 0;
  558. for(j=0; j<SUBFRAME_SIZE; j++)
  559. gain_after += FFABS(synth[j+10]);
  560. ctx->gain_coeff = g729_adaptive_gain_control(
  561. gain_before,
  562. gain_after,
  563. synth+10,
  564. SUBFRAME_SIZE,
  565. ctx->gain_coeff);
  566. if (frame_erasure)
  567. ctx->pitch_delay_int_prev = FFMIN(ctx->pitch_delay_int_prev + 1, PITCH_DELAY_MAX);
  568. else
  569. ctx->pitch_delay_int_prev = pitch_delay_int[i];
  570. memcpy(synth+8, ctx->hpf_z, 2*sizeof(int16_t));
  571. ff_acelp_high_pass_filter(
  572. out_frame + i*SUBFRAME_SIZE,
  573. ctx->hpf_f,
  574. synth+10,
  575. SUBFRAME_SIZE);
  576. memcpy(ctx->hpf_z, synth+8+SUBFRAME_SIZE, 2*sizeof(int16_t));
  577. }
  578. ctx->was_periodic = is_periodic;
  579. /* Save signal for use in next frame. */
  580. memmove(ctx->exc_base, ctx->exc_base + 2 * SUBFRAME_SIZE, (PITCH_DELAY_MAX+INTERPOL_LEN)*sizeof(int16_t));
  581. *data_size = SUBFRAME_SIZE << 2;
  582. return buf_size;
  583. }
  584. AVCodec ff_g729_decoder =
  585. {
  586. "g729",
  587. AVMEDIA_TYPE_AUDIO,
  588. CODEC_ID_G729,
  589. sizeof(G729Context),
  590. decoder_init,
  591. NULL,
  592. NULL,
  593. decode_frame,
  594. .long_name = NULL_IF_CONFIG_SMALL("G.729"),
  595. };