You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

964 lines
31KB

  1. /*
  2. * Copyright (c) 2007-2008 CSIRO
  3. * Copyright (c) 2007-2009 Xiph.Org Foundation
  4. * Copyright (c) 2008-2009 Gregory Maxwell
  5. * Copyright (c) 2012 Andrew D'Addesio
  6. * Copyright (c) 2013-2014 Mozilla Corporation
  7. * Copyright (c) 2017 Rostislav Pehlivanov <atomnuker@gmail.com>
  8. *
  9. * This file is part of FFmpeg.
  10. *
  11. * FFmpeg is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU Lesser General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2.1 of the License, or (at your option) any later version.
  15. *
  16. * FFmpeg is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public
  22. * License along with FFmpeg; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  24. */
  25. #include "opustab.h"
  26. #include "opus_pvq.h"
  27. #define CELT_PVQ_U(n, k) (ff_celt_pvq_u_row[FFMIN(n, k)][FFMAX(n, k)])
  28. #define CELT_PVQ_V(n, k) (CELT_PVQ_U(n, k) + CELT_PVQ_U(n, (k) + 1))
  29. static inline int16_t celt_cos(int16_t x)
  30. {
  31. x = (MUL16(x, x) + 4096) >> 13;
  32. x = (32767-x) + ROUND_MUL16(x, (-7651 + ROUND_MUL16(x, (8277 + ROUND_MUL16(-626, x)))));
  33. return x + 1;
  34. }
  35. static inline int celt_log2tan(int isin, int icos)
  36. {
  37. int lc, ls;
  38. lc = opus_ilog(icos);
  39. ls = opus_ilog(isin);
  40. icos <<= 15 - lc;
  41. isin <<= 15 - ls;
  42. return (ls << 11) - (lc << 11) +
  43. ROUND_MUL16(isin, ROUND_MUL16(isin, -2597) + 7932) -
  44. ROUND_MUL16(icos, ROUND_MUL16(icos, -2597) + 7932);
  45. }
  46. static inline int celt_bits2pulses(const uint8_t *cache, int bits)
  47. {
  48. // TODO: Find the size of cache and make it into an array in the parameters list
  49. int i, low = 0, high;
  50. high = cache[0];
  51. bits--;
  52. for (i = 0; i < 6; i++) {
  53. int center = (low + high + 1) >> 1;
  54. if (cache[center] >= bits)
  55. high = center;
  56. else
  57. low = center;
  58. }
  59. return (bits - (low == 0 ? -1 : cache[low]) <= cache[high] - bits) ? low : high;
  60. }
  61. static inline int celt_pulses2bits(const uint8_t *cache, int pulses)
  62. {
  63. // TODO: Find the size of cache and make it into an array in the parameters list
  64. return (pulses == 0) ? 0 : cache[pulses] + 1;
  65. }
  66. static inline void celt_normalize_residual(const int * av_restrict iy, float * av_restrict X,
  67. int N, float g)
  68. {
  69. int i;
  70. for (i = 0; i < N; i++)
  71. X[i] = g * iy[i];
  72. }
  73. static void celt_exp_rotation_impl(float *X, uint32_t len, uint32_t stride,
  74. float c, float s)
  75. {
  76. float *Xptr;
  77. int i;
  78. Xptr = X;
  79. for (i = 0; i < len - stride; i++) {
  80. float x1 = Xptr[0];
  81. float x2 = Xptr[stride];
  82. Xptr[stride] = c * x2 + s * x1;
  83. *Xptr++ = c * x1 - s * x2;
  84. }
  85. Xptr = &X[len - 2 * stride - 1];
  86. for (i = len - 2 * stride - 1; i >= 0; i--) {
  87. float x1 = Xptr[0];
  88. float x2 = Xptr[stride];
  89. Xptr[stride] = c * x2 + s * x1;
  90. *Xptr-- = c * x1 - s * x2;
  91. }
  92. }
  93. static inline void celt_exp_rotation(float *X, uint32_t len,
  94. uint32_t stride, uint32_t K,
  95. enum CeltSpread spread, const int encode)
  96. {
  97. uint32_t stride2 = 0;
  98. float c, s;
  99. float gain, theta;
  100. int i;
  101. if (2*K >= len || spread == CELT_SPREAD_NONE)
  102. return;
  103. gain = (float)len / (len + (20 - 5*spread) * K);
  104. theta = M_PI * gain * gain / 4;
  105. c = cosf(theta);
  106. s = sinf(theta);
  107. if (len >= stride << 3) {
  108. stride2 = 1;
  109. /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding.
  110. It's basically incrementing long as (stride2+0.5)^2 < len/stride. */
  111. while ((stride2 * stride2 + stride2) * stride + (stride >> 2) < len)
  112. stride2++;
  113. }
  114. len /= stride;
  115. for (i = 0; i < stride; i++) {
  116. if (encode) {
  117. celt_exp_rotation_impl(X + i * len, len, 1, c, -s);
  118. if (stride2)
  119. celt_exp_rotation_impl(X + i * len, len, stride2, s, -c);
  120. } else {
  121. if (stride2)
  122. celt_exp_rotation_impl(X + i * len, len, stride2, s, c);
  123. celt_exp_rotation_impl(X + i * len, len, 1, c, s);
  124. }
  125. }
  126. }
  127. static inline uint32_t celt_extract_collapse_mask(const int *iy, uint32_t N, uint32_t B)
  128. {
  129. int i, j, N0 = N / B;
  130. uint32_t collapse_mask = 0;
  131. if (B <= 1)
  132. return 1;
  133. for (i = 0; i < B; i++)
  134. for (j = 0; j < N0; j++)
  135. collapse_mask |= (!!iy[i*N0+j]) << i;
  136. return collapse_mask;
  137. }
  138. static inline void celt_stereo_merge(float *X, float *Y, float mid, int N)
  139. {
  140. int i;
  141. float xp = 0, side = 0;
  142. float E[2];
  143. float mid2;
  144. float gain[2];
  145. /* Compute the norm of X+Y and X-Y as |X|^2 + |Y|^2 +/- sum(xy) */
  146. for (i = 0; i < N; i++) {
  147. xp += X[i] * Y[i];
  148. side += Y[i] * Y[i];
  149. }
  150. /* Compensating for the mid normalization */
  151. xp *= mid;
  152. mid2 = mid;
  153. E[0] = mid2 * mid2 + side - 2 * xp;
  154. E[1] = mid2 * mid2 + side + 2 * xp;
  155. if (E[0] < 6e-4f || E[1] < 6e-4f) {
  156. for (i = 0; i < N; i++)
  157. Y[i] = X[i];
  158. return;
  159. }
  160. gain[0] = 1.0f / sqrtf(E[0]);
  161. gain[1] = 1.0f / sqrtf(E[1]);
  162. for (i = 0; i < N; i++) {
  163. float value[2];
  164. /* Apply mid scaling (side is already scaled) */
  165. value[0] = mid * X[i];
  166. value[1] = Y[i];
  167. X[i] = gain[0] * (value[0] - value[1]);
  168. Y[i] = gain[1] * (value[0] + value[1]);
  169. }
  170. }
  171. static void celt_interleave_hadamard(float *tmp, float *X, int N0,
  172. int stride, int hadamard)
  173. {
  174. int i, j, N = N0*stride;
  175. const uint8_t *order = &ff_celt_hadamard_order[hadamard ? stride - 2 : 30];
  176. for (i = 0; i < stride; i++)
  177. for (j = 0; j < N0; j++)
  178. tmp[j*stride+i] = X[order[i]*N0+j];
  179. memcpy(X, tmp, N*sizeof(float));
  180. }
  181. static void celt_deinterleave_hadamard(float *tmp, float *X, int N0,
  182. int stride, int hadamard)
  183. {
  184. int i, j, N = N0*stride;
  185. const uint8_t *order = &ff_celt_hadamard_order[hadamard ? stride - 2 : 30];
  186. for (i = 0; i < stride; i++)
  187. for (j = 0; j < N0; j++)
  188. tmp[order[i]*N0+j] = X[j*stride+i];
  189. memcpy(X, tmp, N*sizeof(float));
  190. }
  191. static void celt_haar1(float *X, int N0, int stride)
  192. {
  193. int i, j;
  194. N0 >>= 1;
  195. for (i = 0; i < stride; i++) {
  196. for (j = 0; j < N0; j++) {
  197. float x0 = X[stride * (2 * j + 0) + i];
  198. float x1 = X[stride * (2 * j + 1) + i];
  199. X[stride * (2 * j + 0) + i] = (x0 + x1) * M_SQRT1_2;
  200. X[stride * (2 * j + 1) + i] = (x0 - x1) * M_SQRT1_2;
  201. }
  202. }
  203. }
  204. static inline int celt_compute_qn(int N, int b, int offset, int pulse_cap,
  205. int stereo)
  206. {
  207. int qn, qb;
  208. int N2 = 2 * N - 1;
  209. if (stereo && N == 2)
  210. N2--;
  211. /* The upper limit ensures that in a stereo split with itheta==16384, we'll
  212. * always have enough bits left over to code at least one pulse in the
  213. * side; otherwise it would collapse, since it doesn't get folded. */
  214. qb = FFMIN3(b - pulse_cap - (4 << 3), (b + N2 * offset) / N2, 8 << 3);
  215. qn = (qb < (1 << 3 >> 1)) ? 1 : ((ff_celt_qn_exp2[qb & 0x7] >> (14 - (qb >> 3))) + 1) >> 1 << 1;
  216. return qn;
  217. }
  218. /* Convert the quantized vector to an index */
  219. static inline uint32_t celt_icwrsi(uint32_t N, uint32_t K, const int *y)
  220. {
  221. int i, idx = 0, sum = 0;
  222. for (i = N - 1; i >= 0; i--) {
  223. const uint32_t i_s = CELT_PVQ_U(N - i, sum + FFABS(y[i]) + 1);
  224. idx += CELT_PVQ_U(N - i, sum) + (y[i] < 0)*i_s;
  225. sum += FFABS(y[i]);
  226. }
  227. return idx;
  228. }
  229. // this code was adapted from libopus
  230. static inline uint64_t celt_cwrsi(uint32_t N, uint32_t K, uint32_t i, int *y)
  231. {
  232. uint64_t norm = 0;
  233. uint32_t q, p;
  234. int s, val;
  235. int k0;
  236. while (N > 2) {
  237. /*Lots of pulses case:*/
  238. if (K >= N) {
  239. const uint32_t *row = ff_celt_pvq_u_row[N];
  240. /* Are the pulses in this dimension negative? */
  241. p = row[K + 1];
  242. s = -(i >= p);
  243. i -= p & s;
  244. /*Count how many pulses were placed in this dimension.*/
  245. k0 = K;
  246. q = row[N];
  247. if (q > i) {
  248. K = N;
  249. do {
  250. p = ff_celt_pvq_u_row[--K][N];
  251. } while (p > i);
  252. } else
  253. for (p = row[K]; p > i; p = row[K])
  254. K--;
  255. i -= p;
  256. val = (k0 - K + s) ^ s;
  257. norm += val * val;
  258. *y++ = val;
  259. } else { /*Lots of dimensions case:*/
  260. /*Are there any pulses in this dimension at all?*/
  261. p = ff_celt_pvq_u_row[K ][N];
  262. q = ff_celt_pvq_u_row[K + 1][N];
  263. if (p <= i && i < q) {
  264. i -= p;
  265. *y++ = 0;
  266. } else {
  267. /*Are the pulses in this dimension negative?*/
  268. s = -(i >= q);
  269. i -= q & s;
  270. /*Count how many pulses were placed in this dimension.*/
  271. k0 = K;
  272. do p = ff_celt_pvq_u_row[--K][N];
  273. while (p > i);
  274. i -= p;
  275. val = (k0 - K + s) ^ s;
  276. norm += val * val;
  277. *y++ = val;
  278. }
  279. }
  280. N--;
  281. }
  282. /* N == 2 */
  283. p = 2 * K + 1;
  284. s = -(i >= p);
  285. i -= p & s;
  286. k0 = K;
  287. K = (i + 1) / 2;
  288. if (K)
  289. i -= 2 * K - 1;
  290. val = (k0 - K + s) ^ s;
  291. norm += val * val;
  292. *y++ = val;
  293. /* N==1 */
  294. s = -i;
  295. val = (K + s) ^ s;
  296. norm += val * val;
  297. *y = val;
  298. return norm;
  299. }
  300. static inline void celt_encode_pulses(OpusRangeCoder *rc, int *y, uint32_t N, uint32_t K)
  301. {
  302. ff_opus_rc_enc_uint(rc, celt_icwrsi(N, K, y), CELT_PVQ_V(N, K));
  303. }
  304. static inline float celt_decode_pulses(OpusRangeCoder *rc, int *y, uint32_t N, uint32_t K)
  305. {
  306. const uint32_t idx = ff_opus_rc_dec_uint(rc, CELT_PVQ_V(N, K));
  307. return celt_cwrsi(N, K, idx, y);
  308. }
  309. /*
  310. * Faster than libopus's search, operates entirely in the signed domain.
  311. * Slightly worse/better depending on N, K and the input vector.
  312. */
  313. static float ppp_pvq_search_c(float *X, int *y, int K, int N)
  314. {
  315. int i, y_norm = 0;
  316. float res = 0.0f, xy_norm = 0.0f;
  317. for (i = 0; i < N; i++)
  318. res += FFABS(X[i]);
  319. res = K/(res + FLT_EPSILON);
  320. for (i = 0; i < N; i++) {
  321. y[i] = lrintf(res*X[i]);
  322. y_norm += y[i]*y[i];
  323. xy_norm += y[i]*X[i];
  324. K -= FFABS(y[i]);
  325. }
  326. while (K) {
  327. int max_idx = 0, phase = FFSIGN(K);
  328. float max_num = 0.0f;
  329. float max_den = 1.0f;
  330. y_norm += 1.0f;
  331. for (i = 0; i < N; i++) {
  332. /* If the sum has been overshot and the best place has 0 pulses allocated
  333. * to it, attempting to decrease it further will actually increase the
  334. * sum. Prevent this by disregarding any 0 positions when decrementing. */
  335. const int ca = 1 ^ ((y[i] == 0) & (phase < 0));
  336. const int y_new = y_norm + 2*phase*FFABS(y[i]);
  337. float xy_new = xy_norm + 1*phase*FFABS(X[i]);
  338. xy_new = xy_new * xy_new;
  339. if (ca && (max_den*xy_new) > (y_new*max_num)) {
  340. max_den = y_new;
  341. max_num = xy_new;
  342. max_idx = i;
  343. }
  344. }
  345. K -= phase;
  346. phase *= FFSIGN(X[max_idx]);
  347. xy_norm += 1*phase*X[max_idx];
  348. y_norm += 2*phase*y[max_idx];
  349. y[max_idx] += phase;
  350. }
  351. return (float)y_norm;
  352. }
  353. static uint32_t celt_alg_quant(OpusRangeCoder *rc, float *X, uint32_t N, uint32_t K,
  354. enum CeltSpread spread, uint32_t blocks, float gain,
  355. CeltPVQ *pvq)
  356. {
  357. int *y = pvq->qcoeff;
  358. celt_exp_rotation(X, N, blocks, K, spread, 1);
  359. gain /= sqrtf(pvq->pvq_search(X, y, K, N));
  360. celt_encode_pulses(rc, y, N, K);
  361. celt_normalize_residual(y, X, N, gain);
  362. celt_exp_rotation(X, N, blocks, K, spread, 0);
  363. return celt_extract_collapse_mask(y, N, blocks);
  364. }
  365. /** Decode pulse vector and combine the result with the pitch vector to produce
  366. the final normalised signal in the current band. */
  367. static uint32_t celt_alg_unquant(OpusRangeCoder *rc, float *X, uint32_t N, uint32_t K,
  368. enum CeltSpread spread, uint32_t blocks, float gain,
  369. CeltPVQ *pvq)
  370. {
  371. int *y = pvq->qcoeff;
  372. gain /= sqrtf(celt_decode_pulses(rc, y, N, K));
  373. celt_normalize_residual(y, X, N, gain);
  374. celt_exp_rotation(X, N, blocks, K, spread, 0);
  375. return celt_extract_collapse_mask(y, N, blocks);
  376. }
  377. static int celt_calc_theta(const float *X, const float *Y, int coupling, int N)
  378. {
  379. int i;
  380. float e[2] = { 0.0f, 0.0f };
  381. if (coupling) { /* Coupling case */
  382. for (i = 0; i < N; i++) {
  383. e[0] += (X[i] + Y[i])*(X[i] + Y[i]);
  384. e[1] += (X[i] - Y[i])*(X[i] - Y[i]);
  385. }
  386. } else {
  387. for (i = 0; i < N; i++) {
  388. e[0] += X[i]*X[i];
  389. e[1] += Y[i]*Y[i];
  390. }
  391. }
  392. return lrintf(32768.0f*atan2f(sqrtf(e[1]), sqrtf(e[0]))/M_PI);
  393. }
  394. static void celt_stereo_is_decouple(float *X, float *Y, float e_l, float e_r, int N)
  395. {
  396. int i;
  397. const float energy_n = 1.0f/(sqrtf(e_l*e_l + e_r*e_r) + FLT_EPSILON);
  398. e_l *= energy_n;
  399. e_r *= energy_n;
  400. for (i = 0; i < N; i++)
  401. X[i] = e_l*X[i] + e_r*Y[i];
  402. }
  403. static void celt_stereo_ms_decouple(float *X, float *Y, int N)
  404. {
  405. int i;
  406. for (i = 0; i < N; i++) {
  407. const float Xret = X[i];
  408. X[i] = (X[i] + Y[i])*M_SQRT1_2;
  409. Y[i] = (Y[i] - Xret)*M_SQRT1_2;
  410. }
  411. }
  412. static av_always_inline uint32_t quant_band_template(CeltPVQ *pvq, CeltFrame *f,
  413. OpusRangeCoder *rc,
  414. const int band, float *X,
  415. float *Y, int N, int b,
  416. uint32_t blocks, float *lowband,
  417. int duration, float *lowband_out,
  418. int level, float gain,
  419. float *lowband_scratch,
  420. int fill, int quant,
  421. QUANT_FN(*rec))
  422. {
  423. int i;
  424. const uint8_t *cache;
  425. int stereo = !!Y, split = stereo;
  426. int imid = 0, iside = 0;
  427. uint32_t N0 = N;
  428. int N_B = N / blocks;
  429. int N_B0 = N_B;
  430. int B0 = blocks;
  431. int time_divide = 0;
  432. int recombine = 0;
  433. int inv = 0;
  434. float mid = 0, side = 0;
  435. int longblocks = (B0 == 1);
  436. uint32_t cm = 0;
  437. if (N == 1) {
  438. float *x = X;
  439. for (i = 0; i <= stereo; i++) {
  440. int sign = 0;
  441. if (f->remaining2 >= 1 << 3) {
  442. if (quant) {
  443. sign = x[0] < 0;
  444. ff_opus_rc_put_raw(rc, sign, 1);
  445. } else {
  446. sign = ff_opus_rc_get_raw(rc, 1);
  447. }
  448. f->remaining2 -= 1 << 3;
  449. }
  450. x[0] = 1.0f - 2.0f*sign;
  451. x = Y;
  452. }
  453. if (lowband_out)
  454. lowband_out[0] = X[0];
  455. return 1;
  456. }
  457. if (!stereo && level == 0) {
  458. int tf_change = f->tf_change[band];
  459. int k;
  460. if (tf_change > 0)
  461. recombine = tf_change;
  462. /* Band recombining to increase frequency resolution */
  463. if (lowband &&
  464. (recombine || ((N_B & 1) == 0 && tf_change < 0) || B0 > 1)) {
  465. for (i = 0; i < N; i++)
  466. lowband_scratch[i] = lowband[i];
  467. lowband = lowband_scratch;
  468. }
  469. for (k = 0; k < recombine; k++) {
  470. if (quant || lowband)
  471. celt_haar1(quant ? X : lowband, N >> k, 1 << k);
  472. fill = ff_celt_bit_interleave[fill & 0xF] | ff_celt_bit_interleave[fill >> 4] << 2;
  473. }
  474. blocks >>= recombine;
  475. N_B <<= recombine;
  476. /* Increasing the time resolution */
  477. while ((N_B & 1) == 0 && tf_change < 0) {
  478. if (quant || lowband)
  479. celt_haar1(quant ? X : lowband, N_B, blocks);
  480. fill |= fill << blocks;
  481. blocks <<= 1;
  482. N_B >>= 1;
  483. time_divide++;
  484. tf_change++;
  485. }
  486. B0 = blocks;
  487. N_B0 = N_B;
  488. /* Reorganize the samples in time order instead of frequency order */
  489. if (B0 > 1 && (quant || lowband))
  490. celt_deinterleave_hadamard(pvq->hadamard_tmp, quant ? X : lowband,
  491. N_B >> recombine, B0 << recombine,
  492. longblocks);
  493. }
  494. /* If we need 1.5 more bit than we can produce, split the band in two. */
  495. cache = ff_celt_cache_bits +
  496. ff_celt_cache_index[(duration + 1) * CELT_MAX_BANDS + band];
  497. if (!stereo && duration >= 0 && b > cache[cache[0]] + 12 && N > 2) {
  498. N >>= 1;
  499. Y = X + N;
  500. split = 1;
  501. duration -= 1;
  502. if (blocks == 1)
  503. fill = (fill & 1) | (fill << 1);
  504. blocks = (blocks + 1) >> 1;
  505. }
  506. if (split) {
  507. int qn;
  508. int itheta = quant ? celt_calc_theta(X, Y, stereo, N) : 0;
  509. int mbits, sbits, delta;
  510. int qalloc;
  511. int pulse_cap;
  512. int offset;
  513. int orig_fill;
  514. int tell;
  515. /* Decide on the resolution to give to the split parameter theta */
  516. pulse_cap = ff_celt_log_freq_range[band] + duration * 8;
  517. offset = (pulse_cap >> 1) - (stereo && N == 2 ? CELT_QTHETA_OFFSET_TWOPHASE :
  518. CELT_QTHETA_OFFSET);
  519. qn = (stereo && band >= f->intensity_stereo) ? 1 :
  520. celt_compute_qn(N, b, offset, pulse_cap, stereo);
  521. tell = opus_rc_tell_frac(rc);
  522. if (qn != 1) {
  523. if (quant)
  524. itheta = (itheta*qn + 8192) >> 14;
  525. /* Entropy coding of the angle. We use a uniform pdf for the
  526. * time split, a step for stereo, and a triangular one for the rest. */
  527. if (quant) {
  528. if (stereo && N > 2)
  529. ff_opus_rc_enc_uint_step(rc, itheta, qn / 2);
  530. else if (stereo || B0 > 1)
  531. ff_opus_rc_enc_uint(rc, itheta, qn + 1);
  532. else
  533. ff_opus_rc_enc_uint_tri(rc, itheta, qn);
  534. itheta = itheta * 16384 / qn;
  535. if (stereo) {
  536. if (itheta == 0)
  537. celt_stereo_is_decouple(X, Y, f->block[0].lin_energy[band],
  538. f->block[1].lin_energy[band], N);
  539. else
  540. celt_stereo_ms_decouple(X, Y, N);
  541. }
  542. } else {
  543. if (stereo && N > 2)
  544. itheta = ff_opus_rc_dec_uint_step(rc, qn / 2);
  545. else if (stereo || B0 > 1)
  546. itheta = ff_opus_rc_dec_uint(rc, qn+1);
  547. else
  548. itheta = ff_opus_rc_dec_uint_tri(rc, qn);
  549. itheta = itheta * 16384 / qn;
  550. }
  551. } else if (stereo) {
  552. if (quant) {
  553. inv = itheta > 8192;
  554. if (inv) {
  555. for (i = 0; i < N; i++)
  556. Y[i] *= -1;
  557. }
  558. celt_stereo_is_decouple(X, Y, f->block[0].lin_energy[band],
  559. f->block[1].lin_energy[band], N);
  560. if (b > 2 << 3 && f->remaining2 > 2 << 3) {
  561. ff_opus_rc_enc_log(rc, inv, 2);
  562. } else {
  563. inv = 0;
  564. }
  565. } else {
  566. inv = (b > 2 << 3 && f->remaining2 > 2 << 3) ? ff_opus_rc_dec_log(rc, 2) : 0;
  567. }
  568. itheta = 0;
  569. }
  570. qalloc = opus_rc_tell_frac(rc) - tell;
  571. b -= qalloc;
  572. orig_fill = fill;
  573. if (itheta == 0) {
  574. imid = 32767;
  575. iside = 0;
  576. fill = av_mod_uintp2(fill, blocks);
  577. delta = -16384;
  578. } else if (itheta == 16384) {
  579. imid = 0;
  580. iside = 32767;
  581. fill &= ((1 << blocks) - 1) << blocks;
  582. delta = 16384;
  583. } else {
  584. imid = celt_cos(itheta);
  585. iside = celt_cos(16384-itheta);
  586. /* This is the mid vs side allocation that minimizes squared error
  587. in that band. */
  588. delta = ROUND_MUL16((N - 1) << 7, celt_log2tan(iside, imid));
  589. }
  590. mid = imid / 32768.0f;
  591. side = iside / 32768.0f;
  592. /* This is a special case for N=2 that only works for stereo and takes
  593. advantage of the fact that mid and side are orthogonal to encode
  594. the side with just one bit. */
  595. if (N == 2 && stereo) {
  596. int c;
  597. int sign = 0;
  598. float tmp;
  599. float *x2, *y2;
  600. mbits = b;
  601. /* Only need one bit for the side */
  602. sbits = (itheta != 0 && itheta != 16384) ? 1 << 3 : 0;
  603. mbits -= sbits;
  604. c = (itheta > 8192);
  605. f->remaining2 -= qalloc+sbits;
  606. x2 = c ? Y : X;
  607. y2 = c ? X : Y;
  608. if (sbits) {
  609. if (quant) {
  610. sign = x2[0]*y2[1] - x2[1]*y2[0] < 0;
  611. ff_opus_rc_put_raw(rc, sign, 1);
  612. } else {
  613. sign = ff_opus_rc_get_raw(rc, 1);
  614. }
  615. }
  616. sign = 1 - 2 * sign;
  617. /* We use orig_fill here because we want to fold the side, but if
  618. itheta==16384, we'll have cleared the low bits of fill. */
  619. cm = rec(pvq, f, rc, band, x2, NULL, N, mbits, blocks, lowband, duration,
  620. lowband_out, level, gain, lowband_scratch, orig_fill);
  621. /* We don't split N=2 bands, so cm is either 1 or 0 (for a fold-collapse),
  622. and there's no need to worry about mixing with the other channel. */
  623. y2[0] = -sign * x2[1];
  624. y2[1] = sign * x2[0];
  625. X[0] *= mid;
  626. X[1] *= mid;
  627. Y[0] *= side;
  628. Y[1] *= side;
  629. tmp = X[0];
  630. X[0] = tmp - Y[0];
  631. Y[0] = tmp + Y[0];
  632. tmp = X[1];
  633. X[1] = tmp - Y[1];
  634. Y[1] = tmp + Y[1];
  635. } else {
  636. /* "Normal" split code */
  637. float *next_lowband2 = NULL;
  638. float *next_lowband_out1 = NULL;
  639. int next_level = 0;
  640. int rebalance;
  641. uint32_t cmt;
  642. /* Give more bits to low-energy MDCTs than they would
  643. * otherwise deserve */
  644. if (B0 > 1 && !stereo && (itheta & 0x3fff)) {
  645. if (itheta > 8192)
  646. /* Rough approximation for pre-echo masking */
  647. delta -= delta >> (4 - duration);
  648. else
  649. /* Corresponds to a forward-masking slope of
  650. * 1.5 dB per 10 ms */
  651. delta = FFMIN(0, delta + (N << 3 >> (5 - duration)));
  652. }
  653. mbits = av_clip((b - delta) / 2, 0, b);
  654. sbits = b - mbits;
  655. f->remaining2 -= qalloc;
  656. if (lowband && !stereo)
  657. next_lowband2 = lowband + N; /* >32-bit split case */
  658. /* Only stereo needs to pass on lowband_out.
  659. * Otherwise, it's handled at the end */
  660. if (stereo)
  661. next_lowband_out1 = lowband_out;
  662. else
  663. next_level = level + 1;
  664. rebalance = f->remaining2;
  665. if (mbits >= sbits) {
  666. /* In stereo mode, we do not apply a scaling to the mid
  667. * because we need the normalized mid for folding later */
  668. cm = rec(pvq, f, rc, band, X, NULL, N, mbits, blocks, lowband,
  669. duration, next_lowband_out1, next_level,
  670. stereo ? 1.0f : (gain * mid), lowband_scratch, fill);
  671. rebalance = mbits - (rebalance - f->remaining2);
  672. if (rebalance > 3 << 3 && itheta != 0)
  673. sbits += rebalance - (3 << 3);
  674. /* For a stereo split, the high bits of fill are always zero,
  675. * so no folding will be done to the side. */
  676. cmt = rec(pvq, f, rc, band, Y, NULL, N, sbits, blocks, next_lowband2,
  677. duration, NULL, next_level, gain * side, NULL,
  678. fill >> blocks);
  679. cm |= cmt << ((B0 >> 1) & (stereo - 1));
  680. } else {
  681. /* For a stereo split, the high bits of fill are always zero,
  682. * so no folding will be done to the side. */
  683. cm = rec(pvq, f, rc, band, Y, NULL, N, sbits, blocks, next_lowband2,
  684. duration, NULL, next_level, gain * side, NULL, fill >> blocks);
  685. cm <<= ((B0 >> 1) & (stereo - 1));
  686. rebalance = sbits - (rebalance - f->remaining2);
  687. if (rebalance > 3 << 3 && itheta != 16384)
  688. mbits += rebalance - (3 << 3);
  689. /* In stereo mode, we do not apply a scaling to the mid because
  690. * we need the normalized mid for folding later */
  691. cm |= rec(pvq, f, rc, band, X, NULL, N, mbits, blocks, lowband, duration,
  692. next_lowband_out1, next_level, stereo ? 1.0f : (gain * mid),
  693. lowband_scratch, fill);
  694. }
  695. }
  696. } else {
  697. /* This is the basic no-split case */
  698. uint32_t q = celt_bits2pulses(cache, b);
  699. uint32_t curr_bits = celt_pulses2bits(cache, q);
  700. f->remaining2 -= curr_bits;
  701. /* Ensures we can never bust the budget */
  702. while (f->remaining2 < 0 && q > 0) {
  703. f->remaining2 += curr_bits;
  704. curr_bits = celt_pulses2bits(cache, --q);
  705. f->remaining2 -= curr_bits;
  706. }
  707. if (q != 0) {
  708. /* Finally do the actual (de)quantization */
  709. if (quant) {
  710. cm = celt_alg_quant(rc, X, N, (q < 8) ? q : (8 + (q & 7)) << ((q >> 3) - 1),
  711. f->spread, blocks, gain, pvq);
  712. } else {
  713. cm = celt_alg_unquant(rc, X, N, (q < 8) ? q : (8 + (q & 7)) << ((q >> 3) - 1),
  714. f->spread, blocks, gain, pvq);
  715. }
  716. } else {
  717. /* If there's no pulse, fill the band anyway */
  718. uint32_t cm_mask = (1 << blocks) - 1;
  719. fill &= cm_mask;
  720. if (fill) {
  721. if (!lowband) {
  722. /* Noise */
  723. for (i = 0; i < N; i++)
  724. X[i] = (((int32_t)celt_rng(f)) >> 20);
  725. cm = cm_mask;
  726. } else {
  727. /* Folded spectrum */
  728. for (i = 0; i < N; i++) {
  729. /* About 48 dB below the "normal" folding level */
  730. X[i] = lowband[i] + (((celt_rng(f)) & 0x8000) ? 1.0f / 256 : -1.0f / 256);
  731. }
  732. cm = fill;
  733. }
  734. celt_renormalize_vector(X, N, gain);
  735. } else {
  736. memset(X, 0, N*sizeof(float));
  737. }
  738. }
  739. }
  740. /* This code is used by the decoder and by the resynthesis-enabled encoder */
  741. if (stereo) {
  742. if (N > 2)
  743. celt_stereo_merge(X, Y, mid, N);
  744. if (inv) {
  745. for (i = 0; i < N; i++)
  746. Y[i] *= -1;
  747. }
  748. } else if (level == 0) {
  749. int k;
  750. /* Undo the sample reorganization going from time order to frequency order */
  751. if (B0 > 1)
  752. celt_interleave_hadamard(pvq->hadamard_tmp, X, N_B >> recombine,
  753. B0 << recombine, longblocks);
  754. /* Undo time-freq changes that we did earlier */
  755. N_B = N_B0;
  756. blocks = B0;
  757. for (k = 0; k < time_divide; k++) {
  758. blocks >>= 1;
  759. N_B <<= 1;
  760. cm |= cm >> blocks;
  761. celt_haar1(X, N_B, blocks);
  762. }
  763. for (k = 0; k < recombine; k++) {
  764. cm = ff_celt_bit_deinterleave[cm];
  765. celt_haar1(X, N0>>k, 1<<k);
  766. }
  767. blocks <<= recombine;
  768. /* Scale output for later folding */
  769. if (lowband_out) {
  770. float n = sqrtf(N0);
  771. for (i = 0; i < N0; i++)
  772. lowband_out[i] = n * X[i];
  773. }
  774. cm = av_mod_uintp2(cm, blocks);
  775. }
  776. return cm;
  777. }
  778. static QUANT_FN(pvq_decode_band)
  779. {
  780. return quant_band_template(pvq, f, rc, band, X, Y, N, b, blocks, lowband, duration,
  781. lowband_out, level, gain, lowband_scratch, fill, 0,
  782. pvq->decode_band);
  783. }
  784. static QUANT_FN(pvq_encode_band)
  785. {
  786. return quant_band_template(pvq, f, rc, band, X, Y, N, b, blocks, lowband, duration,
  787. lowband_out, level, gain, lowband_scratch, fill, 1,
  788. pvq->encode_band);
  789. }
  790. static float pvq_band_cost(CeltPVQ *pvq, CeltFrame *f, OpusRangeCoder *rc, int band,
  791. float *bits, float lambda)
  792. {
  793. int i, b = 0;
  794. uint32_t cm[2] = { (1 << f->blocks) - 1, (1 << f->blocks) - 1 };
  795. const int band_size = ff_celt_freq_range[band] << f->size;
  796. float buf[176 * 2], lowband_scratch[176], norm1[176], norm2[176];
  797. float dist, cost, err_x = 0.0f, err_y = 0.0f;
  798. float *X = buf;
  799. float *X_orig = f->block[0].coeffs + (ff_celt_freq_bands[band] << f->size);
  800. float *Y = (f->channels == 2) ? &buf[176] : NULL;
  801. float *Y_orig = f->block[1].coeffs + (ff_celt_freq_bands[band] << f->size);
  802. OPUS_RC_CHECKPOINT_SPAWN(rc);
  803. memcpy(X, X_orig, band_size*sizeof(float));
  804. if (Y)
  805. memcpy(Y, Y_orig, band_size*sizeof(float));
  806. f->remaining2 = ((f->framebits << 3) - f->anticollapse_needed) - opus_rc_tell_frac(rc) - 1;
  807. if (band <= f->coded_bands - 1) {
  808. int curr_balance = f->remaining / FFMIN(3, f->coded_bands - band);
  809. b = av_clip_uintp2(FFMIN(f->remaining2 + 1, f->pulses[band] + curr_balance), 14);
  810. }
  811. if (f->dual_stereo) {
  812. pvq->encode_band(pvq, f, rc, band, X, NULL, band_size, b / 2, f->blocks, NULL,
  813. f->size, norm1, 0, 1.0f, lowband_scratch, cm[0]);
  814. pvq->encode_band(pvq, f, rc, band, Y, NULL, band_size, b / 2, f->blocks, NULL,
  815. f->size, norm2, 0, 1.0f, lowband_scratch, cm[1]);
  816. } else {
  817. pvq->encode_band(pvq, f, rc, band, X, Y, band_size, b, f->blocks, NULL, f->size,
  818. norm1, 0, 1.0f, lowband_scratch, cm[0] | cm[1]);
  819. }
  820. for (i = 0; i < band_size; i++) {
  821. err_x += (X[i] - X_orig[i])*(X[i] - X_orig[i]);
  822. if (Y)
  823. err_y += (Y[i] - Y_orig[i])*(Y[i] - Y_orig[i]);
  824. }
  825. dist = sqrtf(err_x) + sqrtf(err_y);
  826. cost = OPUS_RC_CHECKPOINT_BITS(rc)/8.0f;
  827. *bits += cost;
  828. OPUS_RC_CHECKPOINT_ROLLBACK(rc);
  829. return lambda*dist*cost;
  830. }
  831. int av_cold ff_celt_pvq_init(CeltPVQ **pvq)
  832. {
  833. CeltPVQ *s = av_malloc(sizeof(CeltPVQ));
  834. if (!s)
  835. return AVERROR(ENOMEM);
  836. s->pvq_search = ppp_pvq_search_c;
  837. s->decode_band = pvq_decode_band;
  838. s->encode_band = pvq_encode_band;
  839. s->band_cost = pvq_band_cost;
  840. if (ARCH_X86)
  841. ff_opus_dsp_init_x86(s);
  842. *pvq = s;
  843. return 0;
  844. }
  845. void av_cold ff_celt_pvq_uninit(CeltPVQ **pvq)
  846. {
  847. av_freep(pvq);
  848. }