You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

521 lines
18KB

  1. /*
  2. * Real Audio 1.0 (14.4K) encoder
  3. * Copyright (c) 2010 Francesco Lavra <francescolavra@interfree.it>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Real Audio 1.0 (14.4K) encoder
  24. * @author Francesco Lavra <francescolavra@interfree.it>
  25. */
  26. #include <float.h>
  27. #include "avcodec.h"
  28. #include "put_bits.h"
  29. #include "celp_filters.h"
  30. #include "ra144.h"
  31. static av_cold int ra144_encode_init(AVCodecContext * avctx)
  32. {
  33. RA144Context *ractx;
  34. int ret;
  35. if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
  36. av_log(avctx, AV_LOG_ERROR, "invalid sample format\n");
  37. return -1;
  38. }
  39. if (avctx->channels != 1) {
  40. av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n",
  41. avctx->channels);
  42. return -1;
  43. }
  44. avctx->frame_size = NBLOCKS * BLOCKSIZE;
  45. avctx->bit_rate = 8000;
  46. ractx = avctx->priv_data;
  47. ractx->lpc_coef[0] = ractx->lpc_tables[0];
  48. ractx->lpc_coef[1] = ractx->lpc_tables[1];
  49. ractx->avctx = avctx;
  50. ret = ff_lpc_init(&ractx->lpc_ctx, avctx->frame_size, LPC_ORDER,
  51. FF_LPC_TYPE_LEVINSON);
  52. return ret;
  53. }
  54. static av_cold int ra144_encode_close(AVCodecContext *avctx)
  55. {
  56. RA144Context *ractx = avctx->priv_data;
  57. ff_lpc_end(&ractx->lpc_ctx);
  58. return 0;
  59. }
  60. /**
  61. * Quantize a value by searching a sorted table for the element with the
  62. * nearest value
  63. *
  64. * @param value value to quantize
  65. * @param table array containing the quantization table
  66. * @param size size of the quantization table
  67. * @return index of the quantization table corresponding to the element with the
  68. * nearest value
  69. */
  70. static int quantize(int value, const int16_t *table, unsigned int size)
  71. {
  72. unsigned int low = 0, high = size - 1;
  73. while (1) {
  74. int index = (low + high) >> 1;
  75. int error = table[index] - value;
  76. if (index == low)
  77. return table[high] + error > value ? low : high;
  78. if (error > 0) {
  79. high = index;
  80. } else {
  81. low = index;
  82. }
  83. }
  84. }
  85. /**
  86. * Orthogonalize a vector to another vector
  87. *
  88. * @param v vector to orthogonalize
  89. * @param u vector against which orthogonalization is performed
  90. */
  91. static void orthogonalize(float *v, const float *u)
  92. {
  93. int i;
  94. float num = 0, den = 0;
  95. for (i = 0; i < BLOCKSIZE; i++) {
  96. num += v[i] * u[i];
  97. den += u[i] * u[i];
  98. }
  99. num /= den;
  100. for (i = 0; i < BLOCKSIZE; i++)
  101. v[i] -= num * u[i];
  102. }
  103. /**
  104. * Calculate match score and gain of an LPC-filtered vector with respect to
  105. * input data, possibly othogonalizing it to up to 2 other vectors
  106. *
  107. * @param work array used to calculate the filtered vector
  108. * @param coefs coefficients of the LPC filter
  109. * @param vect original vector
  110. * @param ortho1 first vector against which orthogonalization is performed
  111. * @param ortho2 second vector against which orthogonalization is performed
  112. * @param data input data
  113. * @param score pointer to variable where match score is returned
  114. * @param gain pointer to variable where gain is returned
  115. */
  116. static void get_match_score(float *work, const float *coefs, float *vect,
  117. const float *ortho1, const float *ortho2,
  118. const float *data, float *score, float *gain)
  119. {
  120. float c, g;
  121. int i;
  122. ff_celp_lp_synthesis_filterf(work, coefs, vect, BLOCKSIZE, LPC_ORDER);
  123. if (ortho1)
  124. orthogonalize(work, ortho1);
  125. if (ortho2)
  126. orthogonalize(work, ortho2);
  127. c = g = 0;
  128. for (i = 0; i < BLOCKSIZE; i++) {
  129. g += work[i] * work[i];
  130. c += data[i] * work[i];
  131. }
  132. if (c <= 0) {
  133. *score = 0;
  134. return;
  135. }
  136. *gain = c / g;
  137. *score = *gain * c;
  138. }
  139. /**
  140. * Create a vector from the adaptive codebook at a given lag value
  141. *
  142. * @param vect array where vector is stored
  143. * @param cb adaptive codebook
  144. * @param lag lag value
  145. */
  146. static void create_adapt_vect(float *vect, const int16_t *cb, int lag)
  147. {
  148. int i;
  149. cb += BUFFERSIZE - lag;
  150. for (i = 0; i < FFMIN(BLOCKSIZE, lag); i++)
  151. vect[i] = cb[i];
  152. if (lag < BLOCKSIZE)
  153. for (i = 0; i < BLOCKSIZE - lag; i++)
  154. vect[lag + i] = cb[i];
  155. }
  156. /**
  157. * Search the adaptive codebook for the best entry and gain and remove its
  158. * contribution from input data
  159. *
  160. * @param adapt_cb array from which the adaptive codebook is extracted
  161. * @param work array used to calculate LPC-filtered vectors
  162. * @param coefs coefficients of the LPC filter
  163. * @param data input data
  164. * @return index of the best entry of the adaptive codebook
  165. */
  166. static int adaptive_cb_search(const int16_t *adapt_cb, float *work,
  167. const float *coefs, float *data)
  168. {
  169. int i, best_vect;
  170. float score, gain, best_score, best_gain;
  171. float exc[BLOCKSIZE];
  172. gain = best_score = 0;
  173. for (i = BLOCKSIZE / 2; i <= BUFFERSIZE; i++) {
  174. create_adapt_vect(exc, adapt_cb, i);
  175. get_match_score(work, coefs, exc, NULL, NULL, data, &score, &gain);
  176. if (score > best_score) {
  177. best_score = score;
  178. best_vect = i;
  179. best_gain = gain;
  180. }
  181. }
  182. if (!best_score)
  183. return 0;
  184. /**
  185. * Re-calculate the filtered vector from the vector with maximum match score
  186. * and remove its contribution from input data.
  187. */
  188. create_adapt_vect(exc, adapt_cb, best_vect);
  189. ff_celp_lp_synthesis_filterf(work, coefs, exc, BLOCKSIZE, LPC_ORDER);
  190. for (i = 0; i < BLOCKSIZE; i++)
  191. data[i] -= best_gain * work[i];
  192. return (best_vect - BLOCKSIZE / 2 + 1);
  193. }
  194. /**
  195. * Find the best vector of a fixed codebook by applying an LPC filter to
  196. * codebook entries, possibly othogonalizing them to up to 2 other vectors and
  197. * matching the results with input data
  198. *
  199. * @param work array used to calculate the filtered vectors
  200. * @param coefs coefficients of the LPC filter
  201. * @param cb fixed codebook
  202. * @param ortho1 first vector against which orthogonalization is performed
  203. * @param ortho2 second vector against which orthogonalization is performed
  204. * @param data input data
  205. * @param idx pointer to variable where the index of the best codebook entry is
  206. * returned
  207. * @param gain pointer to variable where the gain of the best codebook entry is
  208. * returned
  209. */
  210. static void find_best_vect(float *work, const float *coefs,
  211. const int8_t cb[][BLOCKSIZE], const float *ortho1,
  212. const float *ortho2, float *data, int *idx,
  213. float *gain)
  214. {
  215. int i, j;
  216. float g, score, best_score;
  217. float vect[BLOCKSIZE];
  218. *idx = *gain = best_score = 0;
  219. for (i = 0; i < FIXED_CB_SIZE; i++) {
  220. for (j = 0; j < BLOCKSIZE; j++)
  221. vect[j] = cb[i][j];
  222. get_match_score(work, coefs, vect, ortho1, ortho2, data, &score, &g);
  223. if (score > best_score) {
  224. best_score = score;
  225. *idx = i;
  226. *gain = g;
  227. }
  228. }
  229. }
  230. /**
  231. * Search the two fixed codebooks for the best entry and gain
  232. *
  233. * @param work array used to calculate LPC-filtered vectors
  234. * @param coefs coefficients of the LPC filter
  235. * @param data input data
  236. * @param cba_idx index of the best entry of the adaptive codebook
  237. * @param cb1_idx pointer to variable where the index of the best entry of the
  238. * first fixed codebook is returned
  239. * @param cb2_idx pointer to variable where the index of the best entry of the
  240. * second fixed codebook is returned
  241. */
  242. static void fixed_cb_search(float *work, const float *coefs, float *data,
  243. int cba_idx, int *cb1_idx, int *cb2_idx)
  244. {
  245. int i, ortho_cb1;
  246. float gain;
  247. float cba_vect[BLOCKSIZE], cb1_vect[BLOCKSIZE];
  248. float vect[BLOCKSIZE];
  249. /**
  250. * The filtered vector from the adaptive codebook can be retrieved from
  251. * work, because this function is called just after adaptive_cb_search().
  252. */
  253. if (cba_idx)
  254. memcpy(cba_vect, work, sizeof(cba_vect));
  255. find_best_vect(work, coefs, ff_cb1_vects, cba_idx ? cba_vect : NULL, NULL,
  256. data, cb1_idx, &gain);
  257. /**
  258. * Re-calculate the filtered vector from the vector with maximum match score
  259. * and remove its contribution from input data.
  260. */
  261. if (gain) {
  262. for (i = 0; i < BLOCKSIZE; i++)
  263. vect[i] = ff_cb1_vects[*cb1_idx][i];
  264. ff_celp_lp_synthesis_filterf(work, coefs, vect, BLOCKSIZE, LPC_ORDER);
  265. if (cba_idx)
  266. orthogonalize(work, cba_vect);
  267. for (i = 0; i < BLOCKSIZE; i++)
  268. data[i] -= gain * work[i];
  269. memcpy(cb1_vect, work, sizeof(cb1_vect));
  270. ortho_cb1 = 1;
  271. } else
  272. ortho_cb1 = 0;
  273. find_best_vect(work, coefs, ff_cb2_vects, cba_idx ? cba_vect : NULL,
  274. ortho_cb1 ? cb1_vect : NULL, data, cb2_idx, &gain);
  275. }
  276. /**
  277. * Encode a subblock of the current frame
  278. *
  279. * @param ractx encoder context
  280. * @param sblock_data input data of the subblock
  281. * @param lpc_coefs coefficients of the LPC filter
  282. * @param rms RMS of the reflection coefficients
  283. * @param pb pointer to PutBitContext of the current frame
  284. */
  285. static void ra144_encode_subblock(RA144Context *ractx,
  286. const int16_t *sblock_data,
  287. const int16_t *lpc_coefs, unsigned int rms,
  288. PutBitContext *pb)
  289. {
  290. float data[BLOCKSIZE], work[LPC_ORDER + BLOCKSIZE];
  291. float coefs[LPC_ORDER];
  292. float zero[BLOCKSIZE], cba[BLOCKSIZE], cb1[BLOCKSIZE], cb2[BLOCKSIZE];
  293. int16_t cba_vect[BLOCKSIZE];
  294. int cba_idx, cb1_idx, cb2_idx, gain;
  295. int i, n, m[3];
  296. float g[3];
  297. float error, best_error;
  298. for (i = 0; i < LPC_ORDER; i++) {
  299. work[i] = ractx->curr_sblock[BLOCKSIZE + i];
  300. coefs[i] = lpc_coefs[i] * (1/4096.0);
  301. }
  302. /**
  303. * Calculate the zero-input response of the LPC filter and subtract it from
  304. * input data.
  305. */
  306. memset(data, 0, sizeof(data));
  307. ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, data, BLOCKSIZE,
  308. LPC_ORDER);
  309. for (i = 0; i < BLOCKSIZE; i++) {
  310. zero[i] = work[LPC_ORDER + i];
  311. data[i] = sblock_data[i] - zero[i];
  312. }
  313. /**
  314. * Codebook search is performed without taking into account the contribution
  315. * of the previous subblock, since it has been just subtracted from input
  316. * data.
  317. */
  318. memset(work, 0, LPC_ORDER * sizeof(*work));
  319. cba_idx = adaptive_cb_search(ractx->adapt_cb, work + LPC_ORDER, coefs,
  320. data);
  321. if (cba_idx) {
  322. /**
  323. * The filtered vector from the adaptive codebook can be retrieved from
  324. * work, see implementation of adaptive_cb_search().
  325. */
  326. memcpy(cba, work + LPC_ORDER, sizeof(cba));
  327. ff_copy_and_dup(cba_vect, ractx->adapt_cb, cba_idx + BLOCKSIZE / 2 - 1);
  328. m[0] = (ff_irms(cba_vect) * rms) >> 12;
  329. }
  330. fixed_cb_search(work + LPC_ORDER, coefs, data, cba_idx, &cb1_idx, &cb2_idx);
  331. for (i = 0; i < BLOCKSIZE; i++) {
  332. cb1[i] = ff_cb1_vects[cb1_idx][i];
  333. cb2[i] = ff_cb2_vects[cb2_idx][i];
  334. }
  335. ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, cb1, BLOCKSIZE,
  336. LPC_ORDER);
  337. memcpy(cb1, work + LPC_ORDER, sizeof(cb1));
  338. m[1] = (ff_cb1_base[cb1_idx] * rms) >> 8;
  339. ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, cb2, BLOCKSIZE,
  340. LPC_ORDER);
  341. memcpy(cb2, work + LPC_ORDER, sizeof(cb2));
  342. m[2] = (ff_cb2_base[cb2_idx] * rms) >> 8;
  343. best_error = FLT_MAX;
  344. gain = 0;
  345. for (n = 0; n < 256; n++) {
  346. g[1] = ((ff_gain_val_tab[n][1] * m[1]) >> ff_gain_exp_tab[n]) *
  347. (1/4096.0);
  348. g[2] = ((ff_gain_val_tab[n][2] * m[2]) >> ff_gain_exp_tab[n]) *
  349. (1/4096.0);
  350. error = 0;
  351. if (cba_idx) {
  352. g[0] = ((ff_gain_val_tab[n][0] * m[0]) >> ff_gain_exp_tab[n]) *
  353. (1/4096.0);
  354. for (i = 0; i < BLOCKSIZE; i++) {
  355. data[i] = zero[i] + g[0] * cba[i] + g[1] * cb1[i] +
  356. g[2] * cb2[i];
  357. error += (data[i] - sblock_data[i]) *
  358. (data[i] - sblock_data[i]);
  359. }
  360. } else {
  361. for (i = 0; i < BLOCKSIZE; i++) {
  362. data[i] = zero[i] + g[1] * cb1[i] + g[2] * cb2[i];
  363. error += (data[i] - sblock_data[i]) *
  364. (data[i] - sblock_data[i]);
  365. }
  366. }
  367. if (error < best_error) {
  368. best_error = error;
  369. gain = n;
  370. }
  371. }
  372. put_bits(pb, 7, cba_idx);
  373. put_bits(pb, 8, gain);
  374. put_bits(pb, 7, cb1_idx);
  375. put_bits(pb, 7, cb2_idx);
  376. ff_subblock_synthesis(ractx, lpc_coefs, cba_idx, cb1_idx, cb2_idx, rms,
  377. gain);
  378. }
  379. static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame,
  380. int buf_size, void *data)
  381. {
  382. static const uint8_t sizes[LPC_ORDER] = {64, 32, 32, 16, 16, 8, 8, 8, 8, 4};
  383. static const uint8_t bit_sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2};
  384. RA144Context *ractx;
  385. PutBitContext pb;
  386. int32_t lpc_data[NBLOCKS * BLOCKSIZE];
  387. int32_t lpc_coefs[LPC_ORDER][MAX_LPC_ORDER];
  388. int shift[LPC_ORDER];
  389. int16_t block_coefs[NBLOCKS][LPC_ORDER];
  390. int lpc_refl[LPC_ORDER]; /**< reflection coefficients of the frame */
  391. unsigned int refl_rms[NBLOCKS]; /**< RMS of the reflection coefficients */
  392. int energy = 0;
  393. int i, idx;
  394. if (buf_size < FRAMESIZE) {
  395. av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
  396. return 0;
  397. }
  398. ractx = avctx->priv_data;
  399. /**
  400. * Since the LPC coefficients are calculated on a frame centered over the
  401. * fourth subframe, to encode a given frame, data from the next frame is
  402. * needed. In each call to this function, the previous frame (whose data are
  403. * saved in the encoder context) is encoded, and data from the current frame
  404. * are saved in the encoder context to be used in the next function call.
  405. */
  406. for (i = 0; i < (2 * BLOCKSIZE + BLOCKSIZE / 2); i++) {
  407. lpc_data[i] = ractx->curr_block[BLOCKSIZE + BLOCKSIZE / 2 + i];
  408. energy += (lpc_data[i] * lpc_data[i]) >> 4;
  409. }
  410. for (i = 2 * BLOCKSIZE + BLOCKSIZE / 2; i < NBLOCKS * BLOCKSIZE; i++) {
  411. lpc_data[i] = *((int16_t *)data + i - 2 * BLOCKSIZE - BLOCKSIZE / 2) >>
  412. 2;
  413. energy += (lpc_data[i] * lpc_data[i]) >> 4;
  414. }
  415. energy = ff_energy_tab[quantize(ff_t_sqrt(energy >> 5) >> 10, ff_energy_tab,
  416. 32)];
  417. ff_lpc_calc_coefs(&ractx->lpc_ctx, lpc_data, NBLOCKS * BLOCKSIZE, LPC_ORDER,
  418. LPC_ORDER, 16, lpc_coefs, shift, FF_LPC_TYPE_LEVINSON,
  419. 0, ORDER_METHOD_EST, 12, 0);
  420. for (i = 0; i < LPC_ORDER; i++)
  421. block_coefs[NBLOCKS - 1][i] = -(lpc_coefs[LPC_ORDER - 1][i] <<
  422. (12 - shift[LPC_ORDER - 1]));
  423. /**
  424. * TODO: apply perceptual weighting of the input speech through bandwidth
  425. * expansion of the LPC filter.
  426. */
  427. if (ff_eval_refl(lpc_refl, block_coefs[NBLOCKS - 1], avctx)) {
  428. /**
  429. * The filter is unstable: use the coefficients of the previous frame.
  430. */
  431. ff_int_to_int16(block_coefs[NBLOCKS - 1], ractx->lpc_coef[1]);
  432. ff_eval_refl(lpc_refl, block_coefs[NBLOCKS - 1], avctx);
  433. }
  434. init_put_bits(&pb, frame, buf_size);
  435. for (i = 0; i < LPC_ORDER; i++) {
  436. idx = quantize(lpc_refl[i], ff_lpc_refl_cb[i], sizes[i]);
  437. put_bits(&pb, bit_sizes[i], idx);
  438. lpc_refl[i] = ff_lpc_refl_cb[i][idx];
  439. }
  440. ractx->lpc_refl_rms[0] = ff_rms(lpc_refl);
  441. ff_eval_coefs(ractx->lpc_coef[0], lpc_refl);
  442. refl_rms[0] = ff_interp(ractx, block_coefs[0], 1, 1, ractx->old_energy);
  443. refl_rms[1] = ff_interp(ractx, block_coefs[1], 2,
  444. energy <= ractx->old_energy,
  445. ff_t_sqrt(energy * ractx->old_energy) >> 12);
  446. refl_rms[2] = ff_interp(ractx, block_coefs[2], 3, 0, energy);
  447. refl_rms[3] = ff_rescale_rms(ractx->lpc_refl_rms[0], energy);
  448. ff_int_to_int16(block_coefs[NBLOCKS - 1], ractx->lpc_coef[0]);
  449. put_bits(&pb, 5, quantize(energy, ff_energy_tab, 32));
  450. for (i = 0; i < NBLOCKS; i++)
  451. ra144_encode_subblock(ractx, ractx->curr_block + i * BLOCKSIZE,
  452. block_coefs[i], refl_rms[i], &pb);
  453. flush_put_bits(&pb);
  454. ractx->old_energy = energy;
  455. ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0];
  456. FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
  457. for (i = 0; i < NBLOCKS * BLOCKSIZE; i++)
  458. ractx->curr_block[i] = *((int16_t *)data + i) >> 2;
  459. return FRAMESIZE;
  460. }
  461. AVCodec ff_ra_144_encoder = {
  462. .name = "real_144",
  463. .type = AVMEDIA_TYPE_AUDIO,
  464. .id = CODEC_ID_RA_144,
  465. .priv_data_size = sizeof(RA144Context),
  466. .init = ra144_encode_init,
  467. .encode = ra144_encode_frame,
  468. .close = ra144_encode_close,
  469. .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K) encoder"),
  470. };