You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

876 lines
30KB

  1. /*
  2. * Copyright (c) 2012 Andrew D'Addesio
  3. * Copyright (c) 2013-2014 Mozilla Corporation
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Opus SILK decoder
  24. */
  25. #include <stdint.h>
  26. #include "opus.h"
  27. #include "opustab.h"
  28. typedef struct SilkFrame {
  29. int coded;
  30. int log_gain;
  31. int16_t nlsf[16];
  32. float lpc[16];
  33. float output [2 * SILK_HISTORY];
  34. float lpc_history[2 * SILK_HISTORY];
  35. int primarylag;
  36. int prev_voiced;
  37. } SilkFrame;
  38. struct SilkContext {
  39. AVCodecContext *avctx;
  40. int output_channels;
  41. int midonly;
  42. int subframes;
  43. int sflength;
  44. int flength;
  45. int nlsf_interp_factor;
  46. enum OpusBandwidth bandwidth;
  47. int wb;
  48. SilkFrame frame[2];
  49. float prev_stereo_weights[2];
  50. float stereo_weights[2];
  51. int prev_coded_channels;
  52. };
  53. static inline void silk_stabilize_lsf(int16_t nlsf[16], int order, const uint16_t min_delta[17])
  54. {
  55. int pass, i;
  56. for (pass = 0; pass < 20; pass++) {
  57. int k, min_diff = 0;
  58. for (i = 0; i < order+1; i++) {
  59. int low = i != 0 ? nlsf[i-1] : 0;
  60. int high = i != order ? nlsf[i] : 32768;
  61. int diff = (high - low) - (min_delta[i]);
  62. if (diff < min_diff) {
  63. min_diff = diff;
  64. k = i;
  65. if (pass == 20)
  66. break;
  67. }
  68. }
  69. if (min_diff == 0) /* no issues; stabilized */
  70. return;
  71. /* wiggle one or two LSFs */
  72. if (k == 0) {
  73. /* repel away from lower bound */
  74. nlsf[0] = min_delta[0];
  75. } else if (k == order) {
  76. /* repel away from higher bound */
  77. nlsf[order-1] = 32768 - min_delta[order];
  78. } else {
  79. /* repel away from current position */
  80. int min_center = 0, max_center = 32768, center_val;
  81. /* lower extent */
  82. for (i = 0; i < k; i++)
  83. min_center += min_delta[i];
  84. min_center += min_delta[k] >> 1;
  85. /* upper extent */
  86. for (i = order; i > k; i--)
  87. max_center -= min_delta[i];
  88. max_center -= min_delta[k] >> 1;
  89. /* move apart */
  90. center_val = nlsf[k - 1] + nlsf[k];
  91. center_val = (center_val >> 1) + (center_val & 1); // rounded divide by 2
  92. center_val = FFMIN(max_center, FFMAX(min_center, center_val));
  93. nlsf[k - 1] = center_val - (min_delta[k] >> 1);
  94. nlsf[k] = nlsf[k - 1] + min_delta[k];
  95. }
  96. }
  97. /* resort to the fall-back method, the standard method for LSF stabilization */
  98. /* sort; as the LSFs should be nearly sorted, use insertion sort */
  99. for (i = 1; i < order; i++) {
  100. int j, value = nlsf[i];
  101. for (j = i - 1; j >= 0 && nlsf[j] > value; j--)
  102. nlsf[j + 1] = nlsf[j];
  103. nlsf[j + 1] = value;
  104. }
  105. /* push forwards to increase distance */
  106. if (nlsf[0] < min_delta[0])
  107. nlsf[0] = min_delta[0];
  108. for (i = 1; i < order; i++)
  109. nlsf[i] = FFMAX(nlsf[i], FFMIN(nlsf[i - 1] + min_delta[i], 32767));
  110. /* push backwards to increase distance */
  111. if (nlsf[order-1] > 32768 - min_delta[order])
  112. nlsf[order-1] = 32768 - min_delta[order];
  113. for (i = order-2; i >= 0; i--)
  114. if (nlsf[i] > nlsf[i + 1] - min_delta[i+1])
  115. nlsf[i] = nlsf[i + 1] - min_delta[i+1];
  116. return;
  117. }
  118. static inline int silk_is_lpc_stable(const int16_t lpc[16], int order)
  119. {
  120. int k, j, DC_resp = 0;
  121. int32_t lpc32[2][16]; // Q24
  122. int totalinvgain = 1 << 30; // 1.0 in Q30
  123. int32_t *row = lpc32[0], *prevrow;
  124. /* initialize the first row for the Levinson recursion */
  125. for (k = 0; k < order; k++) {
  126. DC_resp += lpc[k];
  127. row[k] = lpc[k] * 4096;
  128. }
  129. if (DC_resp >= 4096)
  130. return 0;
  131. /* check if prediction gain pushes any coefficients too far */
  132. for (k = order - 1; 1; k--) {
  133. int rc; // Q31; reflection coefficient
  134. int gaindiv; // Q30; inverse of the gain (the divisor)
  135. int gain; // gain for this reflection coefficient
  136. int fbits; // fractional bits used for the gain
  137. int error; // Q29; estimate of the error of our partial estimate of 1/gaindiv
  138. if (FFABS(row[k]) > 16773022)
  139. return 0;
  140. rc = -(row[k] * 128);
  141. gaindiv = (1 << 30) - MULH(rc, rc);
  142. totalinvgain = MULH(totalinvgain, gaindiv) << 2;
  143. if (k == 0)
  144. return (totalinvgain >= 107374);
  145. /* approximate 1.0/gaindiv */
  146. fbits = opus_ilog(gaindiv);
  147. gain = ((1 << 29) - 1) / (gaindiv >> (fbits + 1 - 16)); // Q<fbits-16>
  148. error = (1 << 29) - MULL(gaindiv << (15 + 16 - fbits), gain, 16);
  149. gain = ((gain << 16) + (error * gain >> 13));
  150. /* switch to the next row of the LPC coefficients */
  151. prevrow = row;
  152. row = lpc32[k & 1];
  153. for (j = 0; j < k; j++) {
  154. int x = prevrow[j] - ROUND_MULL(prevrow[k - j - 1], rc, 31);
  155. row[j] = ROUND_MULL(x, gain, fbits);
  156. }
  157. }
  158. }
  159. static void silk_lsp2poly(const int32_t lsp[16], int32_t pol[16], int half_order)
  160. {
  161. int i, j;
  162. pol[0] = 65536; // 1.0 in Q16
  163. pol[1] = -lsp[0];
  164. for (i = 1; i < half_order; i++) {
  165. pol[i + 1] = pol[i - 1] * 2 - ROUND_MULL(lsp[2 * i], pol[i], 16);
  166. for (j = i; j > 1; j--)
  167. pol[j] += pol[j - 2] - ROUND_MULL(lsp[2 * i], pol[j - 1], 16);
  168. pol[1] -= lsp[2 * i];
  169. }
  170. }
  171. static void silk_lsf2lpc(const int16_t nlsf[16], float lpcf[16], int order)
  172. {
  173. int i, k;
  174. int32_t lsp[16]; // Q17; 2*cos(LSF)
  175. int32_t p[9], q[9]; // Q16
  176. int32_t lpc32[16]; // Q17
  177. int16_t lpc[16]; // Q12
  178. /* convert the LSFs to LSPs, i.e. 2*cos(LSF) */
  179. for (k = 0; k < order; k++) {
  180. int index = nlsf[k] >> 8;
  181. int offset = nlsf[k] & 255;
  182. int k2 = (order == 10) ? ff_silk_lsf_ordering_nbmb[k] : ff_silk_lsf_ordering_wb[k];
  183. /* interpolate and round */
  184. lsp[k2] = ff_silk_cosine[index] * 256;
  185. lsp[k2] += (ff_silk_cosine[index + 1] - ff_silk_cosine[index]) * offset;
  186. lsp[k2] = (lsp[k2] + 4) >> 3;
  187. }
  188. silk_lsp2poly(lsp , p, order >> 1);
  189. silk_lsp2poly(lsp + 1, q, order >> 1);
  190. /* reconstruct A(z) */
  191. for (k = 0; k < order>>1; k++) {
  192. int32_t p_tmp = p[k + 1] + p[k];
  193. int32_t q_tmp = q[k + 1] - q[k];
  194. lpc32[k] = -q_tmp - p_tmp;
  195. lpc32[order-k-1] = q_tmp - p_tmp;
  196. }
  197. /* limit the range of the LPC coefficients to each fit within an int16_t */
  198. for (i = 0; i < 10; i++) {
  199. int j;
  200. unsigned int maxabs = 0;
  201. for (j = 0, k = 0; j < order; j++) {
  202. unsigned int x = FFABS(lpc32[k]);
  203. if (x > maxabs) {
  204. maxabs = x; // Q17
  205. k = j;
  206. }
  207. }
  208. maxabs = (maxabs + 16) >> 5; // convert to Q12
  209. if (maxabs > 32767) {
  210. /* perform bandwidth expansion */
  211. unsigned int chirp, chirp_base; // Q16
  212. maxabs = FFMIN(maxabs, 163838); // anything above this overflows chirp's numerator
  213. chirp_base = chirp = 65470 - ((maxabs - 32767) << 14) / ((maxabs * (k+1)) >> 2);
  214. for (k = 0; k < order; k++) {
  215. lpc32[k] = ROUND_MULL(lpc32[k], chirp, 16);
  216. chirp = (chirp_base * chirp + 32768) >> 16;
  217. }
  218. } else break;
  219. }
  220. if (i == 10) {
  221. /* time's up: just clamp */
  222. for (k = 0; k < order; k++) {
  223. int x = (lpc32[k] + 16) >> 5;
  224. lpc[k] = av_clip_int16(x);
  225. lpc32[k] = lpc[k] << 5; // shortcut mandated by the spec; drops lower 5 bits
  226. }
  227. } else {
  228. for (k = 0; k < order; k++)
  229. lpc[k] = (lpc32[k] + 16) >> 5;
  230. }
  231. /* if the prediction gain causes the LPC filter to become unstable,
  232. apply further bandwidth expansion on the Q17 coefficients */
  233. for (i = 1; i <= 16 && !silk_is_lpc_stable(lpc, order); i++) {
  234. unsigned int chirp, chirp_base;
  235. chirp_base = chirp = 65536 - (1 << i);
  236. for (k = 0; k < order; k++) {
  237. lpc32[k] = ROUND_MULL(lpc32[k], chirp, 16);
  238. lpc[k] = (lpc32[k] + 16) >> 5;
  239. chirp = (chirp_base * chirp + 32768) >> 16;
  240. }
  241. }
  242. for (i = 0; i < order; i++)
  243. lpcf[i] = lpc[i] / 4096.0f;
  244. }
  245. static inline void silk_decode_lpc(SilkContext *s, SilkFrame *frame,
  246. OpusRangeCoder *rc,
  247. float lpc_leadin[16], float lpc[16],
  248. int *lpc_order, int *has_lpc_leadin, int voiced)
  249. {
  250. int i;
  251. int order; // order of the LP polynomial; 10 for NB/MB and 16 for WB
  252. int8_t lsf_i1, lsf_i2[16]; // stage-1 and stage-2 codebook indices
  253. int16_t lsf_res[16]; // residual as a Q10 value
  254. int16_t nlsf[16]; // Q15
  255. *lpc_order = order = s->wb ? 16 : 10;
  256. /* obtain LSF stage-1 and stage-2 indices */
  257. lsf_i1 = ff_opus_rc_dec_cdf(rc, ff_silk_model_lsf_s1[s->wb][voiced]);
  258. for (i = 0; i < order; i++) {
  259. int index = s->wb ? ff_silk_lsf_s2_model_sel_wb [lsf_i1][i] :
  260. ff_silk_lsf_s2_model_sel_nbmb[lsf_i1][i];
  261. lsf_i2[i] = ff_opus_rc_dec_cdf(rc, ff_silk_model_lsf_s2[index]) - 4;
  262. if (lsf_i2[i] == -4)
  263. lsf_i2[i] -= ff_opus_rc_dec_cdf(rc, ff_silk_model_lsf_s2_ext);
  264. else if (lsf_i2[i] == 4)
  265. lsf_i2[i] += ff_opus_rc_dec_cdf(rc, ff_silk_model_lsf_s2_ext);
  266. }
  267. /* reverse the backwards-prediction step */
  268. for (i = order - 1; i >= 0; i--) {
  269. int qstep = s->wb ? 9830 : 11796;
  270. lsf_res[i] = lsf_i2[i] * 1024;
  271. if (lsf_i2[i] < 0) lsf_res[i] += 102;
  272. else if (lsf_i2[i] > 0) lsf_res[i] -= 102;
  273. lsf_res[i] = (lsf_res[i] * qstep) >> 16;
  274. if (i + 1 < order) {
  275. int weight = s->wb ? ff_silk_lsf_pred_weights_wb [ff_silk_lsf_weight_sel_wb [lsf_i1][i]][i] :
  276. ff_silk_lsf_pred_weights_nbmb[ff_silk_lsf_weight_sel_nbmb[lsf_i1][i]][i];
  277. lsf_res[i] += (lsf_res[i+1] * weight) >> 8;
  278. }
  279. }
  280. /* reconstruct the NLSF coefficients from the supplied indices */
  281. for (i = 0; i < order; i++) {
  282. const uint8_t * codebook = s->wb ? ff_silk_lsf_codebook_wb [lsf_i1] :
  283. ff_silk_lsf_codebook_nbmb[lsf_i1];
  284. int cur, prev, next, weight_sq, weight, ipart, fpart, y, value;
  285. /* find the weight of the residual */
  286. /* TODO: precompute */
  287. cur = codebook[i];
  288. prev = i ? codebook[i - 1] : 0;
  289. next = i + 1 < order ? codebook[i + 1] : 256;
  290. weight_sq = (1024 / (cur - prev) + 1024 / (next - cur)) << 16;
  291. /* approximate square-root with mandated fixed-point arithmetic */
  292. ipart = opus_ilog(weight_sq);
  293. fpart = (weight_sq >> (ipart-8)) & 127;
  294. y = ((ipart & 1) ? 32768 : 46214) >> ((32 - ipart)>>1);
  295. weight = y + ((213 * fpart * y) >> 16);
  296. value = cur * 128 + (lsf_res[i] * 16384) / weight;
  297. nlsf[i] = av_clip_uintp2(value, 15);
  298. }
  299. /* stabilize the NLSF coefficients */
  300. silk_stabilize_lsf(nlsf, order, s->wb ? ff_silk_lsf_min_spacing_wb :
  301. ff_silk_lsf_min_spacing_nbmb);
  302. /* produce an interpolation for the first 2 subframes, */
  303. /* and then convert both sets of NLSFs to LPC coefficients */
  304. *has_lpc_leadin = 0;
  305. if (s->subframes == 4) {
  306. int offset = ff_opus_rc_dec_cdf(rc, ff_silk_model_lsf_interpolation_offset);
  307. if (offset != 4 && frame->coded) {
  308. *has_lpc_leadin = 1;
  309. if (offset != 0) {
  310. int16_t nlsf_leadin[16];
  311. for (i = 0; i < order; i++)
  312. nlsf_leadin[i] = frame->nlsf[i] +
  313. ((nlsf[i] - frame->nlsf[i]) * offset >> 2);
  314. silk_lsf2lpc(nlsf_leadin, lpc_leadin, order);
  315. } else /* avoid re-computation for a (roughly) 1-in-4 occurrence */
  316. memcpy(lpc_leadin, frame->lpc, 16 * sizeof(float));
  317. } else
  318. offset = 4;
  319. s->nlsf_interp_factor = offset;
  320. silk_lsf2lpc(nlsf, lpc, order);
  321. } else {
  322. s->nlsf_interp_factor = 4;
  323. silk_lsf2lpc(nlsf, lpc, order);
  324. }
  325. memcpy(frame->nlsf, nlsf, order * sizeof(nlsf[0]));
  326. memcpy(frame->lpc, lpc, order * sizeof(lpc[0]));
  327. }
  328. static inline void silk_count_children(OpusRangeCoder *rc, int model, int32_t total,
  329. int32_t child[2])
  330. {
  331. if (total != 0) {
  332. child[0] = ff_opus_rc_dec_cdf(rc,
  333. ff_silk_model_pulse_location[model] + (((total - 1 + 5) * (total - 1)) >> 1));
  334. child[1] = total - child[0];
  335. } else {
  336. child[0] = 0;
  337. child[1] = 0;
  338. }
  339. }
  340. static inline void silk_decode_excitation(SilkContext *s, OpusRangeCoder *rc,
  341. float* excitationf,
  342. int qoffset_high, int active, int voiced)
  343. {
  344. int i;
  345. uint32_t seed;
  346. int shellblocks;
  347. int ratelevel;
  348. uint8_t pulsecount[20]; // total pulses in each shell block
  349. uint8_t lsbcount[20] = {0}; // raw lsbits defined for each pulse in each shell block
  350. int32_t excitation[320]; // Q23
  351. /* excitation parameters */
  352. seed = ff_opus_rc_dec_cdf(rc, ff_silk_model_lcg_seed);
  353. shellblocks = ff_silk_shell_blocks[s->bandwidth][s->subframes >> 2];
  354. ratelevel = ff_opus_rc_dec_cdf(rc, ff_silk_model_exc_rate[voiced]);
  355. for (i = 0; i < shellblocks; i++) {
  356. pulsecount[i] = ff_opus_rc_dec_cdf(rc, ff_silk_model_pulse_count[ratelevel]);
  357. if (pulsecount[i] == 17) {
  358. while (pulsecount[i] == 17 && ++lsbcount[i] != 10)
  359. pulsecount[i] = ff_opus_rc_dec_cdf(rc, ff_silk_model_pulse_count[9]);
  360. if (lsbcount[i] == 10)
  361. pulsecount[i] = ff_opus_rc_dec_cdf(rc, ff_silk_model_pulse_count[10]);
  362. }
  363. }
  364. /* decode pulse locations using PVQ */
  365. for (i = 0; i < shellblocks; i++) {
  366. if (pulsecount[i] != 0) {
  367. int a, b, c, d;
  368. int32_t * location = excitation + 16*i;
  369. int32_t branch[4][2];
  370. branch[0][0] = pulsecount[i];
  371. /* unrolled tail recursion */
  372. for (a = 0; a < 1; a++) {
  373. silk_count_children(rc, 0, branch[0][a], branch[1]);
  374. for (b = 0; b < 2; b++) {
  375. silk_count_children(rc, 1, branch[1][b], branch[2]);
  376. for (c = 0; c < 2; c++) {
  377. silk_count_children(rc, 2, branch[2][c], branch[3]);
  378. for (d = 0; d < 2; d++) {
  379. silk_count_children(rc, 3, branch[3][d], location);
  380. location += 2;
  381. }
  382. }
  383. }
  384. }
  385. } else
  386. memset(excitation + 16*i, 0, 16*sizeof(int32_t));
  387. }
  388. /* decode least significant bits */
  389. for (i = 0; i < shellblocks << 4; i++) {
  390. int bit;
  391. for (bit = 0; bit < lsbcount[i >> 4]; bit++)
  392. excitation[i] = (excitation[i] << 1) |
  393. ff_opus_rc_dec_cdf(rc, ff_silk_model_excitation_lsb);
  394. }
  395. /* decode signs */
  396. for (i = 0; i < shellblocks << 4; i++) {
  397. if (excitation[i] != 0) {
  398. int sign = ff_opus_rc_dec_cdf(rc, ff_silk_model_excitation_sign[active +
  399. voiced][qoffset_high][FFMIN(pulsecount[i >> 4], 6)]);
  400. if (sign == 0)
  401. excitation[i] *= -1;
  402. }
  403. }
  404. /* assemble the excitation */
  405. for (i = 0; i < shellblocks << 4; i++) {
  406. int value = excitation[i];
  407. excitation[i] = value * 256 | ff_silk_quant_offset[voiced][qoffset_high];
  408. if (value < 0) excitation[i] += 20;
  409. else if (value > 0) excitation[i] -= 20;
  410. /* invert samples pseudorandomly */
  411. seed = 196314165 * seed + 907633515;
  412. if (seed & 0x80000000)
  413. excitation[i] *= -1;
  414. seed += value;
  415. excitationf[i] = excitation[i] / 8388608.0f;
  416. }
  417. }
  418. /** Maximum residual history according to 4.2.7.6.1 */
  419. #define SILK_MAX_LAG (288 + LTP_ORDER / 2)
  420. /** Order of the LTP filter */
  421. #define LTP_ORDER 5
  422. static void silk_decode_frame(SilkContext *s, OpusRangeCoder *rc,
  423. int frame_num, int channel, int coded_channels, int active, int active1)
  424. {
  425. /* per frame */
  426. int voiced; // combines with active to indicate inactive, active, or active+voiced
  427. int qoffset_high;
  428. int order; // order of the LPC coefficients
  429. float lpc_leadin[16], lpc_body[16], residual[SILK_MAX_LAG + SILK_HISTORY];
  430. int has_lpc_leadin;
  431. float ltpscale;
  432. /* per subframe */
  433. struct {
  434. float gain;
  435. int pitchlag;
  436. float ltptaps[5];
  437. } sf[4];
  438. SilkFrame * const frame = s->frame + channel;
  439. int i;
  440. /* obtain stereo weights */
  441. if (coded_channels == 2 && channel == 0) {
  442. int n, wi[2], ws[2], w[2];
  443. n = ff_opus_rc_dec_cdf(rc, ff_silk_model_stereo_s1);
  444. wi[0] = ff_opus_rc_dec_cdf(rc, ff_silk_model_stereo_s2) + 3 * (n / 5);
  445. ws[0] = ff_opus_rc_dec_cdf(rc, ff_silk_model_stereo_s3);
  446. wi[1] = ff_opus_rc_dec_cdf(rc, ff_silk_model_stereo_s2) + 3 * (n % 5);
  447. ws[1] = ff_opus_rc_dec_cdf(rc, ff_silk_model_stereo_s3);
  448. for (i = 0; i < 2; i++)
  449. w[i] = ff_silk_stereo_weights[wi[i]] +
  450. (((ff_silk_stereo_weights[wi[i] + 1] - ff_silk_stereo_weights[wi[i]]) * 6554) >> 16)
  451. * (ws[i]*2 + 1);
  452. s->stereo_weights[0] = (w[0] - w[1]) / 8192.0;
  453. s->stereo_weights[1] = w[1] / 8192.0;
  454. /* and read the mid-only flag */
  455. s->midonly = active1 ? 0 : ff_opus_rc_dec_cdf(rc, ff_silk_model_mid_only);
  456. }
  457. /* obtain frame type */
  458. if (!active) {
  459. qoffset_high = ff_opus_rc_dec_cdf(rc, ff_silk_model_frame_type_inactive);
  460. voiced = 0;
  461. } else {
  462. int type = ff_opus_rc_dec_cdf(rc, ff_silk_model_frame_type_active);
  463. qoffset_high = type & 1;
  464. voiced = type >> 1;
  465. }
  466. /* obtain subframe quantization gains */
  467. for (i = 0; i < s->subframes; i++) {
  468. int log_gain; //Q7
  469. int ipart, fpart, lingain;
  470. if (i == 0 && (frame_num == 0 || !frame->coded)) {
  471. /* gain is coded absolute */
  472. int x = ff_opus_rc_dec_cdf(rc, ff_silk_model_gain_highbits[active + voiced]);
  473. log_gain = (x<<3) | ff_opus_rc_dec_cdf(rc, ff_silk_model_gain_lowbits);
  474. if (frame->coded)
  475. log_gain = FFMAX(log_gain, frame->log_gain - 16);
  476. } else {
  477. /* gain is coded relative */
  478. int delta_gain = ff_opus_rc_dec_cdf(rc, ff_silk_model_gain_delta);
  479. log_gain = av_clip_uintp2(FFMAX((delta_gain<<1) - 16,
  480. frame->log_gain + delta_gain - 4), 6);
  481. }
  482. frame->log_gain = log_gain;
  483. /* approximate 2**(x/128) with a Q7 (i.e. non-integer) input */
  484. log_gain = (log_gain * 0x1D1C71 >> 16) + 2090;
  485. ipart = log_gain >> 7;
  486. fpart = log_gain & 127;
  487. lingain = (1 << ipart) + ((-174 * fpart * (128-fpart) >>16) + fpart) * ((1<<ipart) >> 7);
  488. sf[i].gain = lingain / 65536.0f;
  489. }
  490. /* obtain LPC filter coefficients */
  491. silk_decode_lpc(s, frame, rc, lpc_leadin, lpc_body, &order, &has_lpc_leadin, voiced);
  492. /* obtain pitch lags, if this is a voiced frame */
  493. if (voiced) {
  494. int lag_absolute = (!frame_num || !frame->prev_voiced);
  495. int primarylag; // primary pitch lag for the entire SILK frame
  496. int ltpfilter;
  497. const int8_t * offsets;
  498. if (!lag_absolute) {
  499. int delta = ff_opus_rc_dec_cdf(rc, ff_silk_model_pitch_delta);
  500. if (delta)
  501. primarylag = frame->primarylag + delta - 9;
  502. else
  503. lag_absolute = 1;
  504. }
  505. if (lag_absolute) {
  506. /* primary lag is coded absolute */
  507. int highbits, lowbits;
  508. static const uint16_t * const model[] = {
  509. ff_silk_model_pitch_lowbits_nb, ff_silk_model_pitch_lowbits_mb,
  510. ff_silk_model_pitch_lowbits_wb
  511. };
  512. highbits = ff_opus_rc_dec_cdf(rc, ff_silk_model_pitch_highbits);
  513. lowbits = ff_opus_rc_dec_cdf(rc, model[s->bandwidth]);
  514. primarylag = ff_silk_pitch_min_lag[s->bandwidth] +
  515. highbits*ff_silk_pitch_scale[s->bandwidth] + lowbits;
  516. }
  517. frame->primarylag = primarylag;
  518. if (s->subframes == 2)
  519. offsets = (s->bandwidth == OPUS_BANDWIDTH_NARROWBAND)
  520. ? ff_silk_pitch_offset_nb10ms[ff_opus_rc_dec_cdf(rc,
  521. ff_silk_model_pitch_contour_nb10ms)]
  522. : ff_silk_pitch_offset_mbwb10ms[ff_opus_rc_dec_cdf(rc,
  523. ff_silk_model_pitch_contour_mbwb10ms)];
  524. else
  525. offsets = (s->bandwidth == OPUS_BANDWIDTH_NARROWBAND)
  526. ? ff_silk_pitch_offset_nb20ms[ff_opus_rc_dec_cdf(rc,
  527. ff_silk_model_pitch_contour_nb20ms)]
  528. : ff_silk_pitch_offset_mbwb20ms[ff_opus_rc_dec_cdf(rc,
  529. ff_silk_model_pitch_contour_mbwb20ms)];
  530. for (i = 0; i < s->subframes; i++)
  531. sf[i].pitchlag = av_clip(primarylag + offsets[i],
  532. ff_silk_pitch_min_lag[s->bandwidth],
  533. ff_silk_pitch_max_lag[s->bandwidth]);
  534. /* obtain LTP filter coefficients */
  535. ltpfilter = ff_opus_rc_dec_cdf(rc, ff_silk_model_ltp_filter);
  536. for (i = 0; i < s->subframes; i++) {
  537. int index, j;
  538. static const uint16_t * const filter_sel[] = {
  539. ff_silk_model_ltp_filter0_sel, ff_silk_model_ltp_filter1_sel,
  540. ff_silk_model_ltp_filter2_sel
  541. };
  542. static const int8_t (* const filter_taps[])[5] = {
  543. ff_silk_ltp_filter0_taps, ff_silk_ltp_filter1_taps, ff_silk_ltp_filter2_taps
  544. };
  545. index = ff_opus_rc_dec_cdf(rc, filter_sel[ltpfilter]);
  546. for (j = 0; j < 5; j++)
  547. sf[i].ltptaps[j] = filter_taps[ltpfilter][index][j] / 128.0f;
  548. }
  549. }
  550. /* obtain LTP scale factor */
  551. if (voiced && frame_num == 0)
  552. ltpscale = ff_silk_ltp_scale_factor[ff_opus_rc_dec_cdf(rc,
  553. ff_silk_model_ltp_scale_index)] / 16384.0f;
  554. else ltpscale = 15565.0f/16384.0f;
  555. /* generate the excitation signal for the entire frame */
  556. silk_decode_excitation(s, rc, residual + SILK_MAX_LAG, qoffset_high,
  557. active, voiced);
  558. /* skip synthesising the side channel if we want mono-only */
  559. if (s->output_channels == channel)
  560. return;
  561. /* generate the output signal */
  562. for (i = 0; i < s->subframes; i++) {
  563. const float * lpc_coeff = (i < 2 && has_lpc_leadin) ? lpc_leadin : lpc_body;
  564. float *dst = frame->output + SILK_HISTORY + i * s->sflength;
  565. float *resptr = residual + SILK_MAX_LAG + i * s->sflength;
  566. float *lpc = frame->lpc_history + SILK_HISTORY + i * s->sflength;
  567. float sum;
  568. int j, k;
  569. if (voiced) {
  570. int out_end;
  571. float scale;
  572. if (i < 2 || s->nlsf_interp_factor == 4) {
  573. out_end = -i * s->sflength;
  574. scale = ltpscale;
  575. } else {
  576. out_end = -(i - 2) * s->sflength;
  577. scale = 1.0f;
  578. }
  579. /* when the LPC coefficients change, a re-whitening filter is used */
  580. /* to produce a residual that accounts for the change */
  581. for (j = - sf[i].pitchlag - LTP_ORDER/2; j < out_end; j++) {
  582. sum = dst[j];
  583. for (k = 0; k < order; k++)
  584. sum -= lpc_coeff[k] * dst[j - k - 1];
  585. resptr[j] = av_clipf(sum, -1.0f, 1.0f) * scale / sf[i].gain;
  586. }
  587. if (out_end) {
  588. float rescale = sf[i-1].gain / sf[i].gain;
  589. for (j = out_end; j < 0; j++)
  590. resptr[j] *= rescale;
  591. }
  592. /* LTP synthesis */
  593. for (j = 0; j < s->sflength; j++) {
  594. sum = resptr[j];
  595. for (k = 0; k < LTP_ORDER; k++)
  596. sum += sf[i].ltptaps[k] * resptr[j - sf[i].pitchlag + LTP_ORDER/2 - k];
  597. resptr[j] = sum;
  598. }
  599. }
  600. /* LPC synthesis */
  601. for (j = 0; j < s->sflength; j++) {
  602. sum = resptr[j] * sf[i].gain;
  603. for (k = 1; k <= order; k++)
  604. sum += lpc_coeff[k - 1] * lpc[j - k];
  605. lpc[j] = sum;
  606. dst[j] = av_clipf(sum, -1.0f, 1.0f);
  607. }
  608. }
  609. frame->prev_voiced = voiced;
  610. memmove(frame->lpc_history, frame->lpc_history + s->flength, SILK_HISTORY * sizeof(float));
  611. memmove(frame->output, frame->output + s->flength, SILK_HISTORY * sizeof(float));
  612. frame->coded = 1;
  613. }
  614. static void silk_unmix_ms(SilkContext *s, float *l, float *r)
  615. {
  616. float *mid = s->frame[0].output + SILK_HISTORY - s->flength;
  617. float *side = s->frame[1].output + SILK_HISTORY - s->flength;
  618. float w0_prev = s->prev_stereo_weights[0];
  619. float w1_prev = s->prev_stereo_weights[1];
  620. float w0 = s->stereo_weights[0];
  621. float w1 = s->stereo_weights[1];
  622. int n1 = ff_silk_stereo_interp_len[s->bandwidth];
  623. int i;
  624. for (i = 0; i < n1; i++) {
  625. float interp0 = w0_prev + i * (w0 - w0_prev) / n1;
  626. float interp1 = w1_prev + i * (w1 - w1_prev) / n1;
  627. float p0 = 0.25 * (mid[i - 2] + 2 * mid[i - 1] + mid[i]);
  628. l[i] = av_clipf((1 + interp1) * mid[i - 1] + side[i - 1] + interp0 * p0, -1.0, 1.0);
  629. r[i] = av_clipf((1 - interp1) * mid[i - 1] - side[i - 1] - interp0 * p0, -1.0, 1.0);
  630. }
  631. for (; i < s->flength; i++) {
  632. float p0 = 0.25 * (mid[i - 2] + 2 * mid[i - 1] + mid[i]);
  633. l[i] = av_clipf((1 + w1) * mid[i - 1] + side[i - 1] + w0 * p0, -1.0, 1.0);
  634. r[i] = av_clipf((1 - w1) * mid[i - 1] - side[i - 1] - w0 * p0, -1.0, 1.0);
  635. }
  636. memcpy(s->prev_stereo_weights, s->stereo_weights, sizeof(s->stereo_weights));
  637. }
  638. static void silk_flush_frame(SilkFrame *frame)
  639. {
  640. if (!frame->coded)
  641. return;
  642. memset(frame->output, 0, sizeof(frame->output));
  643. memset(frame->lpc_history, 0, sizeof(frame->lpc_history));
  644. memset(frame->lpc, 0, sizeof(frame->lpc));
  645. memset(frame->nlsf, 0, sizeof(frame->nlsf));
  646. frame->log_gain = 0;
  647. frame->primarylag = 0;
  648. frame->prev_voiced = 0;
  649. frame->coded = 0;
  650. }
  651. int ff_silk_decode_superframe(SilkContext *s, OpusRangeCoder *rc,
  652. float *output[2],
  653. enum OpusBandwidth bandwidth,
  654. int coded_channels,
  655. int duration_ms)
  656. {
  657. int active[2][6], redundancy[2];
  658. int nb_frames, i, j;
  659. if (bandwidth > OPUS_BANDWIDTH_WIDEBAND ||
  660. coded_channels > 2 || duration_ms > 60) {
  661. av_log(s->avctx, AV_LOG_ERROR, "Invalid parameters passed "
  662. "to the SILK decoder.\n");
  663. return AVERROR(EINVAL);
  664. }
  665. nb_frames = 1 + (duration_ms > 20) + (duration_ms > 40);
  666. s->subframes = duration_ms / nb_frames / 5; // 5ms subframes
  667. s->sflength = 20 * (bandwidth + 2);
  668. s->flength = s->sflength * s->subframes;
  669. s->bandwidth = bandwidth;
  670. s->wb = bandwidth == OPUS_BANDWIDTH_WIDEBAND;
  671. /* make sure to flush the side channel when switching from mono to stereo */
  672. if (coded_channels > s->prev_coded_channels)
  673. silk_flush_frame(&s->frame[1]);
  674. s->prev_coded_channels = coded_channels;
  675. /* read the LP-layer header bits */
  676. for (i = 0; i < coded_channels; i++) {
  677. for (j = 0; j < nb_frames; j++)
  678. active[i][j] = ff_opus_rc_dec_log(rc, 1);
  679. redundancy[i] = ff_opus_rc_dec_log(rc, 1);
  680. if (redundancy[i]) {
  681. avpriv_report_missing_feature(s->avctx, "LBRR frames");
  682. return AVERROR_PATCHWELCOME;
  683. }
  684. }
  685. for (i = 0; i < nb_frames; i++) {
  686. for (j = 0; j < coded_channels && !s->midonly; j++)
  687. silk_decode_frame(s, rc, i, j, coded_channels, active[j][i], active[1][i]);
  688. /* reset the side channel if it is not coded */
  689. if (s->midonly && s->frame[1].coded)
  690. silk_flush_frame(&s->frame[1]);
  691. if (coded_channels == 1 || s->output_channels == 1) {
  692. for (j = 0; j < s->output_channels; j++) {
  693. memcpy(output[j] + i * s->flength,
  694. s->frame[0].output + SILK_HISTORY - s->flength - 2,
  695. s->flength * sizeof(float));
  696. }
  697. } else {
  698. silk_unmix_ms(s, output[0] + i * s->flength, output[1] + i * s->flength);
  699. }
  700. s->midonly = 0;
  701. }
  702. return nb_frames * s->flength;
  703. }
  704. void ff_silk_free(SilkContext **ps)
  705. {
  706. av_freep(ps);
  707. }
  708. void ff_silk_flush(SilkContext *s)
  709. {
  710. silk_flush_frame(&s->frame[0]);
  711. silk_flush_frame(&s->frame[1]);
  712. memset(s->prev_stereo_weights, 0, sizeof(s->prev_stereo_weights));
  713. }
  714. int ff_silk_init(AVCodecContext *avctx, SilkContext **ps, int output_channels)
  715. {
  716. SilkContext *s;
  717. if (output_channels != 1 && output_channels != 2) {
  718. av_log(avctx, AV_LOG_ERROR, "Invalid number of output channels: %d\n",
  719. output_channels);
  720. return AVERROR(EINVAL);
  721. }
  722. s = av_mallocz(sizeof(*s));
  723. if (!s)
  724. return AVERROR(ENOMEM);
  725. s->avctx = avctx;
  726. s->output_channels = output_channels;
  727. ff_silk_flush(s);
  728. *ps = s;
  729. return 0;
  730. }