You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

707 lines
25KB

  1. /*
  2. * G.729, G729 Annex D decoders
  3. * Copyright (c) 2008 Vladimir Voroshilov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <inttypes.h>
  22. #include <string.h>
  23. #include "avcodec.h"
  24. #include "libavutil/avutil.h"
  25. #include "get_bits.h"
  26. #include "dsputil.h"
  27. #include "g729.h"
  28. #include "lsp.h"
  29. #include "celp_math.h"
  30. #include "celp_filters.h"
  31. #include "acelp_filters.h"
  32. #include "acelp_pitch_delay.h"
  33. #include "acelp_vectors.h"
  34. #include "g729data.h"
  35. #include "g729postfilter.h"
  36. /**
  37. * minimum quantized LSF value (3.2.4)
  38. * 0.005 in Q13
  39. */
  40. #define LSFQ_MIN 40
  41. /**
  42. * maximum quantized LSF value (3.2.4)
  43. * 3.135 in Q13
  44. */
  45. #define LSFQ_MAX 25681
  46. /**
  47. * minimum LSF distance (3.2.4)
  48. * 0.0391 in Q13
  49. */
  50. #define LSFQ_DIFF_MIN 321
  51. /// interpolation filter length
  52. #define INTERPOL_LEN 11
  53. /**
  54. * minimum gain pitch value (3.8, Equation 47)
  55. * 0.2 in (1.14)
  56. */
  57. #define SHARP_MIN 3277
  58. /**
  59. * maximum gain pitch value (3.8, Equation 47)
  60. * (EE) This does not comply with the specification.
  61. * Specification says about 0.8, which should be
  62. * 13107 in (1.14), but reference C code uses
  63. * 13017 (equals to 0.7945) instead of it.
  64. */
  65. #define SHARP_MAX 13017
  66. /**
  67. * MR_ENERGY (mean removed energy) = mean_energy + 10 * log10(2^26 * subframe_size) in (7.13)
  68. */
  69. #define MR_ENERGY 1018156
  70. #define DECISION_NOISE 0
  71. #define DECISION_INTERMEDIATE 1
  72. #define DECISION_VOICE 2
  73. typedef enum {
  74. FORMAT_G729_8K = 0,
  75. FORMAT_G729D_6K4,
  76. FORMAT_COUNT,
  77. } G729Formats;
  78. typedef struct {
  79. uint8_t ac_index_bits[2]; ///< adaptive codebook index for second subframe (size in bits)
  80. uint8_t parity_bit; ///< parity bit for pitch delay
  81. uint8_t gc_1st_index_bits; ///< gain codebook (first stage) index (size in bits)
  82. uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits)
  83. uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector
  84. uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry
  85. } G729FormatDescription;
  86. typedef struct {
  87. DSPContext dsp;
  88. /// past excitation signal buffer
  89. int16_t exc_base[2*SUBFRAME_SIZE+PITCH_DELAY_MAX+INTERPOL_LEN];
  90. int16_t* exc; ///< start of past excitation data in buffer
  91. int pitch_delay_int_prev; ///< integer part of previous subframe's pitch delay (4.1.3)
  92. /// (2.13) LSP quantizer outputs
  93. int16_t past_quantizer_output_buf[MA_NP + 1][10];
  94. int16_t* past_quantizer_outputs[MA_NP + 1];
  95. int16_t lsfq[10]; ///< (2.13) quantized LSF coefficients from previous frame
  96. int16_t lsp_buf[2][10]; ///< (0.15) LSP coefficients (previous and current frames) (3.2.5)
  97. int16_t *lsp[2]; ///< pointers to lsp_buf
  98. int16_t quant_energy[4]; ///< (5.10) past quantized energy
  99. /// previous speech data for LP synthesis filter
  100. int16_t syn_filter_data[10];
  101. /// residual signal buffer (used in long-term postfilter)
  102. int16_t residual[SUBFRAME_SIZE + RES_PREV_DATA_SIZE];
  103. /// previous speech data for residual calculation filter
  104. int16_t res_filter_data[SUBFRAME_SIZE+10];
  105. /// previous speech data for short-term postfilter
  106. int16_t pos_filter_data[SUBFRAME_SIZE+10];
  107. /// (1.14) pitch gain of current and five previous subframes
  108. int16_t past_gain_pitch[6];
  109. /// (14.1) gain code from current and previous subframe
  110. int16_t past_gain_code[2];
  111. /// voice decision on previous subframe (0-noise, 1-intermediate, 2-voice), G.729D
  112. int16_t voice_decision;
  113. int16_t onset; ///< detected onset level (0-2)
  114. int16_t was_periodic; ///< whether previous frame was declared as periodic or not (4.4)
  115. int16_t ht_prev_data; ///< previous data for 4.2.3, equation 86
  116. int gain_coeff; ///< (1.14) gain coefficient (4.2.4)
  117. uint16_t rand_value; ///< random number generator value (4.4.4)
  118. int ma_predictor_prev; ///< switched MA predictor of LSP quantizer from last good frame
  119. /// (14.14) high-pass filter data (past input)
  120. int hpf_f[2];
  121. /// high-pass filter data (past output)
  122. int16_t hpf_z[2];
  123. } G729Context;
  124. static const G729FormatDescription format_g729_8k = {
  125. .ac_index_bits = {8,5},
  126. .parity_bit = 1,
  127. .gc_1st_index_bits = GC_1ST_IDX_BITS_8K,
  128. .gc_2nd_index_bits = GC_2ND_IDX_BITS_8K,
  129. .fc_signs_bits = 4,
  130. .fc_indexes_bits = 13,
  131. };
  132. static const G729FormatDescription format_g729d_6k4 = {
  133. .ac_index_bits = {8,4},
  134. .parity_bit = 0,
  135. .gc_1st_index_bits = GC_1ST_IDX_BITS_6K4,
  136. .gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4,
  137. .fc_signs_bits = 2,
  138. .fc_indexes_bits = 9,
  139. };
  140. /**
  141. * @brief pseudo random number generator
  142. */
  143. static inline uint16_t g729_prng(uint16_t value)
  144. {
  145. return 31821 * value + 13849;
  146. }
  147. /**
  148. * Get parity bit of bit 2..7
  149. */
  150. static inline int get_parity(uint8_t value)
  151. {
  152. return (0x6996966996696996ULL >> (value >> 2)) & 1;
  153. }
  154. /*
  155. * Decodes LSF (Line Spectral Frequencies) from L0-L3 (3.2.4).
  156. * @param lsfq [out] (2.13) quantized LSF coefficients
  157. * @param past_quantizer_outputs [in/out] (2.13) quantizer outputs from previous frames
  158. * @param ma_predictor switched MA predictor of LSP quantizer
  159. * @param vq_1st first stage vector of quantizer
  160. * @param vq_2nd_low second stage lower vector of LSP quantizer
  161. * @param vq_2nd_high second stage higher vector of LSP quantizer
  162. */
  163. static void lsf_decode(int16_t* lsfq, int16_t* past_quantizer_outputs[MA_NP + 1],
  164. int16_t ma_predictor,
  165. int16_t vq_1st, int16_t vq_2nd_low, int16_t vq_2nd_high)
  166. {
  167. int i,j;
  168. static const uint8_t min_distance[2]={10, 5}; //(2.13)
  169. int16_t* quantizer_output = past_quantizer_outputs[MA_NP];
  170. for (i = 0; i < 5; i++) {
  171. quantizer_output[i] = cb_lsp_1st[vq_1st][i ] + cb_lsp_2nd[vq_2nd_low ][i ];
  172. quantizer_output[i + 5] = cb_lsp_1st[vq_1st][i + 5] + cb_lsp_2nd[vq_2nd_high][i + 5];
  173. }
  174. for (j = 0; j < 2; j++) {
  175. for (i = 1; i < 10; i++) {
  176. int diff = (quantizer_output[i - 1] - quantizer_output[i] + min_distance[j]) >> 1;
  177. if (diff > 0) {
  178. quantizer_output[i - 1] -= diff;
  179. quantizer_output[i ] += diff;
  180. }
  181. }
  182. }
  183. for (i = 0; i < 10; i++) {
  184. int sum = quantizer_output[i] * cb_ma_predictor_sum[ma_predictor][i];
  185. for (j = 0; j < MA_NP; j++)
  186. sum += past_quantizer_outputs[j][i] * cb_ma_predictor[ma_predictor][j][i];
  187. lsfq[i] = sum >> 15;
  188. }
  189. ff_acelp_reorder_lsf(lsfq, LSFQ_DIFF_MIN, LSFQ_MIN, LSFQ_MAX, 10);
  190. }
  191. /**
  192. * Restores past LSP quantizer output using LSF from previous frame
  193. * @param lsfq [in/out] (2.13) quantized LSF coefficients
  194. * @param past_quantizer_outputs [in/out] (2.13) quantizer outputs from previous frames
  195. * @param ma_predictor_prev MA predictor from previous frame
  196. * @param lsfq_prev (2.13) quantized LSF coefficients from previous frame
  197. */
  198. static void lsf_restore_from_previous(int16_t* lsfq,
  199. int16_t* past_quantizer_outputs[MA_NP + 1],
  200. int ma_predictor_prev)
  201. {
  202. int16_t* quantizer_output = past_quantizer_outputs[MA_NP];
  203. int i,k;
  204. for (i = 0; i < 10; i++) {
  205. int tmp = lsfq[i] << 15;
  206. for (k = 0; k < MA_NP; k++)
  207. tmp -= past_quantizer_outputs[k][i] * cb_ma_predictor[ma_predictor_prev][k][i];
  208. quantizer_output[i] = ((tmp >> 15) * cb_ma_predictor_sum_inv[ma_predictor_prev][i]) >> 12;
  209. }
  210. }
  211. /**
  212. * Constructs new excitation signal and applies phase filter to it
  213. * @param out[out] constructed speech signal
  214. * @param in original excitation signal
  215. * @param fc_cur (2.13) original fixed-codebook vector
  216. * @param gain_code (14.1) gain code
  217. * @param subframe_size length of the subframe
  218. */
  219. static void g729d_get_new_exc(
  220. int16_t* out,
  221. const int16_t* in,
  222. const int16_t* fc_cur,
  223. int dstate,
  224. int gain_code,
  225. int subframe_size)
  226. {
  227. int i;
  228. int16_t fc_new[SUBFRAME_SIZE];
  229. ff_celp_convolve_circ(fc_new, fc_cur, phase_filter[dstate], subframe_size);
  230. for(i=0; i<subframe_size; i++)
  231. {
  232. out[i] = in[i];
  233. out[i] -= (gain_code * fc_cur[i] + 0x2000) >> 14;
  234. out[i] += (gain_code * fc_new[i] + 0x2000) >> 14;
  235. }
  236. }
  237. /**
  238. * Makes decision about onset in current subframe
  239. * @param past_onset decision result of previous subframe
  240. * @param past_gain_code gain code of current and previous subframe
  241. *
  242. * @return onset decision result for current subframe
  243. */
  244. static int g729d_onset_decision(int past_onset, const int16_t* past_gain_code)
  245. {
  246. if((past_gain_code[0] >> 1) > past_gain_code[1])
  247. return 2;
  248. else
  249. return FFMAX(past_onset-1, 0);
  250. }
  251. /**
  252. * Makes decision about voice presence in current subframe
  253. * @param onset onset level
  254. * @param prev_voice_decision voice decision result from previous subframe
  255. * @param past_gain_pitch pitch gain of current and previous subframes
  256. *
  257. * @return voice decision result for current subframe
  258. */
  259. static int16_t g729d_voice_decision(int onset, int prev_voice_decision, const int16_t* past_gain_pitch)
  260. {
  261. int i, low_gain_pitch_cnt, voice_decision;
  262. if(past_gain_pitch[0] >= 14745) // 0.9
  263. voice_decision = DECISION_VOICE;
  264. else if (past_gain_pitch[0] <= 9830) // 0.6
  265. voice_decision = DECISION_NOISE;
  266. else
  267. voice_decision = DECISION_INTERMEDIATE;
  268. for(i=0, low_gain_pitch_cnt=0; i<6; i++)
  269. if(past_gain_pitch[i] < 9830)
  270. low_gain_pitch_cnt++;
  271. if(low_gain_pitch_cnt > 2 && !onset)
  272. voice_decision = DECISION_NOISE;
  273. if(!onset && voice_decision > prev_voice_decision + 1)
  274. voice_decision--;
  275. if(onset && voice_decision < DECISION_VOICE)
  276. voice_decision++;
  277. return voice_decision;
  278. }
  279. static av_cold int decoder_init(AVCodecContext * avctx)
  280. {
  281. G729Context* ctx = avctx->priv_data;
  282. int i,k;
  283. if (avctx->channels != 1) {
  284. av_log(avctx, AV_LOG_ERROR, "Only mono sound is supported (requested channels: %d).\n", avctx->channels);
  285. return AVERROR(EINVAL);
  286. }
  287. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  288. /* Both 8kbit/s and 6.4kbit/s modes uses two subframes per frame. */
  289. avctx->frame_size = SUBFRAME_SIZE << 1;
  290. ctx->gain_coeff = 16384; // 1.0 in (1.14)
  291. for (k = 0; k < MA_NP + 1; k++) {
  292. ctx->past_quantizer_outputs[k] = ctx->past_quantizer_output_buf[k];
  293. for (i = 1; i < 11; i++)
  294. ctx->past_quantizer_outputs[k][i - 1] = (18717 * i) >> 3;
  295. }
  296. ctx->lsp[0] = ctx->lsp_buf[0];
  297. ctx->lsp[1] = ctx->lsp_buf[1];
  298. memcpy(ctx->lsp[0], lsp_init, 10 * sizeof(int16_t));
  299. ctx->exc = &ctx->exc_base[PITCH_DELAY_MAX+INTERPOL_LEN];
  300. /* random seed initialization */
  301. ctx->rand_value = 21845;
  302. /* quantized prediction error */
  303. for(i=0; i<4; i++)
  304. ctx->quant_energy[i] = -14336; // -14 in (5.10)
  305. avctx->dsp_mask= ~AV_CPU_FLAG_FORCE;
  306. dsputil_init(&ctx->dsp, avctx);
  307. return 0;
  308. }
  309. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  310. AVPacket *avpkt)
  311. {
  312. const uint8_t *buf = avpkt->data;
  313. int buf_size = avpkt->size;
  314. int16_t *out_frame = data;
  315. GetBitContext gb;
  316. const G729FormatDescription *format;
  317. int frame_erasure = 0; ///< frame erasure detected during decoding
  318. int bad_pitch = 0; ///< parity check failed
  319. int i;
  320. int16_t *tmp;
  321. G729Formats packet_type;
  322. G729Context *ctx = avctx->priv_data;
  323. int16_t lp[2][11]; // (3.12)
  324. uint8_t ma_predictor; ///< switched MA predictor of LSP quantizer
  325. uint8_t quantizer_1st; ///< first stage vector of quantizer
  326. uint8_t quantizer_2nd_lo; ///< second stage lower vector of quantizer (size in bits)
  327. uint8_t quantizer_2nd_hi; ///< second stage higher vector of quantizer (size in bits)
  328. int pitch_delay_int[2]; // pitch delay, integer part
  329. int pitch_delay_3x; // pitch delay, multiplied by 3
  330. int16_t fc[SUBFRAME_SIZE]; // fixed-codebook vector
  331. int16_t synth[SUBFRAME_SIZE+10]; // fixed-codebook vector
  332. int j;
  333. int gain_before, gain_after;
  334. int is_periodic = 0; // whether one of the subframes is declared as periodic or not
  335. if (*data_size < SUBFRAME_SIZE << 2) {
  336. av_log(avctx, AV_LOG_ERROR, "Error processing packet: output buffer too small\n");
  337. return AVERROR(EIO);
  338. }
  339. if (buf_size == 10) {
  340. packet_type = FORMAT_G729_8K;
  341. format = &format_g729_8k;
  342. //Reset voice decision
  343. ctx->onset = 0;
  344. ctx->voice_decision = DECISION_VOICE;
  345. av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s");
  346. } else if (buf_size == 8) {
  347. packet_type = FORMAT_G729D_6K4;
  348. format = &format_g729d_6k4;
  349. av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s");
  350. } else {
  351. av_log(avctx, AV_LOG_ERROR, "Packet size %d is unknown.\n", buf_size);
  352. return AVERROR_INVALIDDATA;
  353. }
  354. for (i=0; i < buf_size; i++)
  355. frame_erasure |= buf[i];
  356. frame_erasure = !frame_erasure;
  357. init_get_bits(&gb, buf, buf_size);
  358. ma_predictor = get_bits(&gb, 1);
  359. quantizer_1st = get_bits(&gb, VQ_1ST_BITS);
  360. quantizer_2nd_lo = get_bits(&gb, VQ_2ND_BITS);
  361. quantizer_2nd_hi = get_bits(&gb, VQ_2ND_BITS);
  362. if(frame_erasure)
  363. lsf_restore_from_previous(ctx->lsfq, ctx->past_quantizer_outputs,
  364. ctx->ma_predictor_prev);
  365. else {
  366. lsf_decode(ctx->lsfq, ctx->past_quantizer_outputs,
  367. ma_predictor,
  368. quantizer_1st, quantizer_2nd_lo, quantizer_2nd_hi);
  369. ctx->ma_predictor_prev = ma_predictor;
  370. }
  371. tmp = ctx->past_quantizer_outputs[MA_NP];
  372. memmove(ctx->past_quantizer_outputs + 1, ctx->past_quantizer_outputs,
  373. MA_NP * sizeof(int16_t*));
  374. ctx->past_quantizer_outputs[0] = tmp;
  375. ff_acelp_lsf2lsp(ctx->lsp[1], ctx->lsfq, 10);
  376. ff_acelp_lp_decode(&lp[0][0], &lp[1][0], ctx->lsp[1], ctx->lsp[0], 10);
  377. FFSWAP(int16_t*, ctx->lsp[1], ctx->lsp[0]);
  378. for (i = 0; i < 2; i++) {
  379. int gain_corr_factor;
  380. uint8_t ac_index; ///< adaptive codebook index
  381. uint8_t pulses_signs; ///< fixed-codebook vector pulse signs
  382. int fc_indexes; ///< fixed-codebook indexes
  383. uint8_t gc_1st_index; ///< gain codebook (first stage) index
  384. uint8_t gc_2nd_index; ///< gain codebook (second stage) index
  385. ac_index = get_bits(&gb, format->ac_index_bits[i]);
  386. if(!i && format->parity_bit)
  387. bad_pitch = get_parity(ac_index) == get_bits1(&gb);
  388. fc_indexes = get_bits(&gb, format->fc_indexes_bits);
  389. pulses_signs = get_bits(&gb, format->fc_signs_bits);
  390. gc_1st_index = get_bits(&gb, format->gc_1st_index_bits);
  391. gc_2nd_index = get_bits(&gb, format->gc_2nd_index_bits);
  392. if (frame_erasure)
  393. pitch_delay_3x = 3 * ctx->pitch_delay_int_prev;
  394. else if(!i) {
  395. if (bad_pitch)
  396. pitch_delay_3x = 3 * ctx->pitch_delay_int_prev;
  397. else
  398. pitch_delay_3x = ff_acelp_decode_8bit_to_1st_delay3(ac_index);
  399. } else {
  400. int pitch_delay_min = av_clip(ctx->pitch_delay_int_prev - 5,
  401. PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9);
  402. if(packet_type == FORMAT_G729D_6K4)
  403. pitch_delay_3x = ff_acelp_decode_4bit_to_2nd_delay3(ac_index, pitch_delay_min);
  404. else
  405. pitch_delay_3x = ff_acelp_decode_5_6_bit_to_2nd_delay3(ac_index, pitch_delay_min);
  406. }
  407. /* Round pitch delay to nearest (used everywhere except ff_acelp_interpolate). */
  408. pitch_delay_int[i] = (pitch_delay_3x + 1) / 3;
  409. if (frame_erasure) {
  410. ctx->rand_value = g729_prng(ctx->rand_value);
  411. fc_indexes = ctx->rand_value & ((1 << format->fc_indexes_bits) - 1);
  412. ctx->rand_value = g729_prng(ctx->rand_value);
  413. pulses_signs = ctx->rand_value;
  414. }
  415. memset(fc, 0, sizeof(int16_t) * SUBFRAME_SIZE);
  416. switch (packet_type) {
  417. case FORMAT_G729_8K:
  418. ff_acelp_fc_pulse_per_track(fc, ff_fc_4pulses_8bits_tracks_13,
  419. ff_fc_4pulses_8bits_track_4,
  420. fc_indexes, pulses_signs, 3, 3);
  421. break;
  422. case FORMAT_G729D_6K4:
  423. ff_acelp_fc_pulse_per_track(fc, ff_fc_2pulses_9bits_track1_gray,
  424. ff_fc_2pulses_9bits_track2_gray,
  425. fc_indexes, pulses_signs, 1, 4);
  426. break;
  427. }
  428. /*
  429. This filter enhances harmonic components of the fixed-codebook vector to
  430. improve the quality of the reconstructed speech.
  431. / fc_v[i], i < pitch_delay
  432. fc_v[i] = <
  433. \ fc_v[i] + gain_pitch * fc_v[i-pitch_delay], i >= pitch_delay
  434. */
  435. ff_acelp_weighted_vector_sum(fc + pitch_delay_int[i],
  436. fc + pitch_delay_int[i],
  437. fc, 1 << 14,
  438. av_clip(ctx->past_gain_pitch[0], SHARP_MIN, SHARP_MAX),
  439. 0, 14,
  440. SUBFRAME_SIZE - pitch_delay_int[i]);
  441. memmove(ctx->past_gain_pitch+1, ctx->past_gain_pitch, 5 * sizeof(int16_t));
  442. ctx->past_gain_code[1] = ctx->past_gain_code[0];
  443. if (frame_erasure) {
  444. ctx->past_gain_pitch[0] = (29491 * ctx->past_gain_pitch[0]) >> 15; // 0.90 (0.15)
  445. ctx->past_gain_code[0] = ( 2007 * ctx->past_gain_code[0] ) >> 11; // 0.98 (0.11)
  446. gain_corr_factor = 0;
  447. } else {
  448. if (packet_type == FORMAT_G729D_6K4) {
  449. ctx->past_gain_pitch[0] = cb_gain_1st_6k4[gc_1st_index][0] +
  450. cb_gain_2nd_6k4[gc_2nd_index][0];
  451. gain_corr_factor = cb_gain_1st_6k4[gc_1st_index][1] +
  452. cb_gain_2nd_6k4[gc_2nd_index][1];
  453. /* Without check below overflow can occure in ff_acelp_update_past_gain.
  454. It is not issue for G.729, because gain_corr_factor in it's case is always
  455. greater than 1024, while in G.729D it can be even zero. */
  456. gain_corr_factor = FFMAX(gain_corr_factor, 1024);
  457. #ifndef G729_BITEXACT
  458. gain_corr_factor >>= 1;
  459. #endif
  460. } else {
  461. ctx->past_gain_pitch[0] = cb_gain_1st_8k[gc_1st_index][0] +
  462. cb_gain_2nd_8k[gc_2nd_index][0];
  463. gain_corr_factor = cb_gain_1st_8k[gc_1st_index][1] +
  464. cb_gain_2nd_8k[gc_2nd_index][1];
  465. }
  466. /* Decode the fixed-codebook gain. */
  467. ctx->past_gain_code[0] = ff_acelp_decode_gain_code(&ctx->dsp, gain_corr_factor,
  468. fc, MR_ENERGY,
  469. ctx->quant_energy,
  470. ma_prediction_coeff,
  471. SUBFRAME_SIZE, 4);
  472. #ifdef G729_BITEXACT
  473. /*
  474. This correction required to get bit-exact result with
  475. reference code, because gain_corr_factor in G.729D is
  476. two times larger than in original G.729.
  477. If bit-exact result is not issue then gain_corr_factor
  478. can be simpler devided by 2 before call to g729_get_gain_code
  479. instead of using correction below.
  480. */
  481. if (packet_type == FORMAT_G729D_6K4) {
  482. gain_corr_factor >>= 1;
  483. ctx->past_gain_code[0] >>= 1;
  484. }
  485. #endif
  486. }
  487. ff_acelp_update_past_gain(ctx->quant_energy, gain_corr_factor, 2, frame_erasure);
  488. /* Routine requires rounding to lowest. */
  489. ff_acelp_interpolate(ctx->exc + i * SUBFRAME_SIZE,
  490. ctx->exc + i * SUBFRAME_SIZE - pitch_delay_3x / 3,
  491. ff_acelp_interp_filter, 6,
  492. (pitch_delay_3x % 3) << 1,
  493. 10, SUBFRAME_SIZE);
  494. ff_acelp_weighted_vector_sum(ctx->exc + i * SUBFRAME_SIZE,
  495. ctx->exc + i * SUBFRAME_SIZE, fc,
  496. (!ctx->was_periodic && frame_erasure) ? 0 : ctx->past_gain_pitch[0],
  497. ( ctx->was_periodic && frame_erasure) ? 0 : ctx->past_gain_code[0],
  498. 1 << 13, 14, SUBFRAME_SIZE);
  499. memcpy(synth, ctx->syn_filter_data, 10 * sizeof(int16_t));
  500. if (ff_celp_lp_synthesis_filter(
  501. synth+10,
  502. &lp[i][1],
  503. ctx->exc + i * SUBFRAME_SIZE,
  504. SUBFRAME_SIZE,
  505. 10,
  506. 1,
  507. 0x800))
  508. /* Overflow occured, downscale excitation signal... */
  509. for (j = 0; j < 2 * SUBFRAME_SIZE + PITCH_DELAY_MAX + INTERPOL_LEN; j++)
  510. ctx->exc_base[j] >>= 2;
  511. /* ... and make synthesis again. */
  512. if (packet_type == FORMAT_G729D_6K4) {
  513. int16_t exc_new[SUBFRAME_SIZE];
  514. ctx->onset = g729d_onset_decision(ctx->onset, ctx->past_gain_code);
  515. ctx->voice_decision = g729d_voice_decision(ctx->onset, ctx->voice_decision, ctx->past_gain_pitch);
  516. g729d_get_new_exc(exc_new, ctx->exc + i * SUBFRAME_SIZE, fc, ctx->voice_decision, ctx->past_gain_code[0], SUBFRAME_SIZE);
  517. ff_celp_lp_synthesis_filter(
  518. synth+10,
  519. &lp[i][1],
  520. exc_new,
  521. SUBFRAME_SIZE,
  522. 10,
  523. 0,
  524. 0x800);
  525. } else {
  526. ff_celp_lp_synthesis_filter(
  527. synth+10,
  528. &lp[i][1],
  529. ctx->exc + i * SUBFRAME_SIZE,
  530. SUBFRAME_SIZE,
  531. 10,
  532. 0,
  533. 0x800);
  534. }
  535. /* Save data (without postfilter) for use in next subframe. */
  536. memcpy(ctx->syn_filter_data, synth+SUBFRAME_SIZE, 10 * sizeof(int16_t));
  537. /* Calculate gain of unfiltered signal for use in AGC. */
  538. gain_before = 0;
  539. for (j = 0; j < SUBFRAME_SIZE; j++)
  540. gain_before += FFABS(synth[j+10]);
  541. /* Call postfilter and also update voicing decision for use in next frame. */
  542. ff_g729_postfilter(
  543. &ctx->dsp,
  544. &ctx->ht_prev_data,
  545. &is_periodic,
  546. &lp[i][0],
  547. pitch_delay_int[0],
  548. ctx->residual,
  549. ctx->res_filter_data,
  550. ctx->pos_filter_data,
  551. synth+10,
  552. SUBFRAME_SIZE);
  553. /* Calculate gain of filtered signal for use in AGC. */
  554. gain_after = 0;
  555. for(j=0; j<SUBFRAME_SIZE; j++)
  556. gain_after += FFABS(synth[j+10]);
  557. ctx->gain_coeff = ff_g729_adaptive_gain_control(
  558. gain_before,
  559. gain_after,
  560. synth+10,
  561. SUBFRAME_SIZE,
  562. ctx->gain_coeff);
  563. if (frame_erasure)
  564. ctx->pitch_delay_int_prev = FFMIN(ctx->pitch_delay_int_prev + 1, PITCH_DELAY_MAX);
  565. else
  566. ctx->pitch_delay_int_prev = pitch_delay_int[i];
  567. memcpy(synth+8, ctx->hpf_z, 2*sizeof(int16_t));
  568. ff_acelp_high_pass_filter(
  569. out_frame + i*SUBFRAME_SIZE,
  570. ctx->hpf_f,
  571. synth+10,
  572. SUBFRAME_SIZE);
  573. memcpy(ctx->hpf_z, synth+8+SUBFRAME_SIZE, 2*sizeof(int16_t));
  574. }
  575. ctx->was_periodic = is_periodic;
  576. /* Save signal for use in next frame. */
  577. memmove(ctx->exc_base, ctx->exc_base + 2 * SUBFRAME_SIZE, (PITCH_DELAY_MAX+INTERPOL_LEN)*sizeof(int16_t));
  578. *data_size = SUBFRAME_SIZE << 2;
  579. return buf_size;
  580. }
  581. AVCodec ff_g729_decoder =
  582. {
  583. "g729",
  584. AVMEDIA_TYPE_AUDIO,
  585. CODEC_ID_G729,
  586. sizeof(G729Context),
  587. decoder_init,
  588. NULL,
  589. NULL,
  590. decode_frame,
  591. .long_name = NULL_IF_CONFIG_SMALL("G.729"),
  592. };