You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

568 lines
19KB

  1. /*
  2. * Real Audio 1.0 (14.4K) encoder
  3. * Copyright (c) 2010 Francesco Lavra <francescolavra@interfree.it>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Real Audio 1.0 (14.4K) encoder
  24. * @author Francesco Lavra <francescolavra@interfree.it>
  25. */
  26. #include <float.h>
  27. #include "avcodec.h"
  28. #include "audio_frame_queue.h"
  29. #include "internal.h"
  30. #include "put_bits.h"
  31. #include "celp_filters.h"
  32. #include "ra144.h"
  33. static av_cold int ra144_encode_close(AVCodecContext *avctx)
  34. {
  35. RA144Context *ractx = avctx->priv_data;
  36. ff_lpc_end(&ractx->lpc_ctx);
  37. ff_af_queue_close(&ractx->afq);
  38. #if FF_API_OLD_ENCODE_AUDIO
  39. av_freep(&avctx->coded_frame);
  40. #endif
  41. return 0;
  42. }
  43. static av_cold int ra144_encode_init(AVCodecContext * avctx)
  44. {
  45. RA144Context *ractx;
  46. int ret;
  47. if (avctx->channels != 1) {
  48. av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n",
  49. avctx->channels);
  50. return -1;
  51. }
  52. avctx->frame_size = NBLOCKS * BLOCKSIZE;
  53. avctx->delay = avctx->frame_size;
  54. avctx->bit_rate = 8000;
  55. ractx = avctx->priv_data;
  56. ractx->lpc_coef[0] = ractx->lpc_tables[0];
  57. ractx->lpc_coef[1] = ractx->lpc_tables[1];
  58. ractx->avctx = avctx;
  59. ret = ff_lpc_init(&ractx->lpc_ctx, avctx->frame_size, LPC_ORDER,
  60. FF_LPC_TYPE_LEVINSON);
  61. if (ret < 0)
  62. goto error;
  63. ff_af_queue_init(avctx, &ractx->afq);
  64. #if FF_API_OLD_ENCODE_AUDIO
  65. avctx->coded_frame = avcodec_alloc_frame();
  66. if (!avctx->coded_frame) {
  67. ret = AVERROR(ENOMEM);
  68. goto error;
  69. }
  70. #endif
  71. return 0;
  72. error:
  73. ra144_encode_close(avctx);
  74. return ret;
  75. }
  76. /**
  77. * Quantize a value by searching a sorted table for the element with the
  78. * nearest value
  79. *
  80. * @param value value to quantize
  81. * @param table array containing the quantization table
  82. * @param size size of the quantization table
  83. * @return index of the quantization table corresponding to the element with the
  84. * nearest value
  85. */
  86. static int quantize(int value, const int16_t *table, unsigned int size)
  87. {
  88. unsigned int low = 0, high = size - 1;
  89. while (1) {
  90. int index = (low + high) >> 1;
  91. int error = table[index] - value;
  92. if (index == low)
  93. return table[high] + error > value ? low : high;
  94. if (error > 0) {
  95. high = index;
  96. } else {
  97. low = index;
  98. }
  99. }
  100. }
  101. /**
  102. * Orthogonalize a vector to another vector
  103. *
  104. * @param v vector to orthogonalize
  105. * @param u vector against which orthogonalization is performed
  106. */
  107. static void orthogonalize(float *v, const float *u)
  108. {
  109. int i;
  110. float num = 0, den = 0;
  111. for (i = 0; i < BLOCKSIZE; i++) {
  112. num += v[i] * u[i];
  113. den += u[i] * u[i];
  114. }
  115. num /= den;
  116. for (i = 0; i < BLOCKSIZE; i++)
  117. v[i] -= num * u[i];
  118. }
  119. /**
  120. * Calculate match score and gain of an LPC-filtered vector with respect to
  121. * input data, possibly othogonalizing it to up to 2 other vectors
  122. *
  123. * @param work array used to calculate the filtered vector
  124. * @param coefs coefficients of the LPC filter
  125. * @param vect original vector
  126. * @param ortho1 first vector against which orthogonalization is performed
  127. * @param ortho2 second vector against which orthogonalization is performed
  128. * @param data input data
  129. * @param score pointer to variable where match score is returned
  130. * @param gain pointer to variable where gain is returned
  131. */
  132. static void get_match_score(float *work, const float *coefs, float *vect,
  133. const float *ortho1, const float *ortho2,
  134. const float *data, float *score, float *gain)
  135. {
  136. float c, g;
  137. int i;
  138. ff_celp_lp_synthesis_filterf(work, coefs, vect, BLOCKSIZE, LPC_ORDER);
  139. if (ortho1)
  140. orthogonalize(work, ortho1);
  141. if (ortho2)
  142. orthogonalize(work, ortho2);
  143. c = g = 0;
  144. for (i = 0; i < BLOCKSIZE; i++) {
  145. g += work[i] * work[i];
  146. c += data[i] * work[i];
  147. }
  148. if (c <= 0) {
  149. *score = 0;
  150. return;
  151. }
  152. *gain = c / g;
  153. *score = *gain * c;
  154. }
  155. /**
  156. * Create a vector from the adaptive codebook at a given lag value
  157. *
  158. * @param vect array where vector is stored
  159. * @param cb adaptive codebook
  160. * @param lag lag value
  161. */
  162. static void create_adapt_vect(float *vect, const int16_t *cb, int lag)
  163. {
  164. int i;
  165. cb += BUFFERSIZE - lag;
  166. for (i = 0; i < FFMIN(BLOCKSIZE, lag); i++)
  167. vect[i] = cb[i];
  168. if (lag < BLOCKSIZE)
  169. for (i = 0; i < BLOCKSIZE - lag; i++)
  170. vect[lag + i] = cb[i];
  171. }
  172. /**
  173. * Search the adaptive codebook for the best entry and gain and remove its
  174. * contribution from input data
  175. *
  176. * @param adapt_cb array from which the adaptive codebook is extracted
  177. * @param work array used to calculate LPC-filtered vectors
  178. * @param coefs coefficients of the LPC filter
  179. * @param data input data
  180. * @return index of the best entry of the adaptive codebook
  181. */
  182. static int adaptive_cb_search(const int16_t *adapt_cb, float *work,
  183. const float *coefs, float *data)
  184. {
  185. int i, best_vect;
  186. float score, gain, best_score, best_gain;
  187. float exc[BLOCKSIZE];
  188. gain = best_score = 0;
  189. for (i = BLOCKSIZE / 2; i <= BUFFERSIZE; i++) {
  190. create_adapt_vect(exc, adapt_cb, i);
  191. get_match_score(work, coefs, exc, NULL, NULL, data, &score, &gain);
  192. if (score > best_score) {
  193. best_score = score;
  194. best_vect = i;
  195. best_gain = gain;
  196. }
  197. }
  198. if (!best_score)
  199. return 0;
  200. /**
  201. * Re-calculate the filtered vector from the vector with maximum match score
  202. * and remove its contribution from input data.
  203. */
  204. create_adapt_vect(exc, adapt_cb, best_vect);
  205. ff_celp_lp_synthesis_filterf(work, coefs, exc, BLOCKSIZE, LPC_ORDER);
  206. for (i = 0; i < BLOCKSIZE; i++)
  207. data[i] -= best_gain * work[i];
  208. return best_vect - BLOCKSIZE / 2 + 1;
  209. }
  210. /**
  211. * Find the best vector of a fixed codebook by applying an LPC filter to
  212. * codebook entries, possibly othogonalizing them to up to 2 other vectors and
  213. * matching the results with input data
  214. *
  215. * @param work array used to calculate the filtered vectors
  216. * @param coefs coefficients of the LPC filter
  217. * @param cb fixed codebook
  218. * @param ortho1 first vector against which orthogonalization is performed
  219. * @param ortho2 second vector against which orthogonalization is performed
  220. * @param data input data
  221. * @param idx pointer to variable where the index of the best codebook entry is
  222. * returned
  223. * @param gain pointer to variable where the gain of the best codebook entry is
  224. * returned
  225. */
  226. static void find_best_vect(float *work, const float *coefs,
  227. const int8_t cb[][BLOCKSIZE], const float *ortho1,
  228. const float *ortho2, float *data, int *idx,
  229. float *gain)
  230. {
  231. int i, j;
  232. float g, score, best_score;
  233. float vect[BLOCKSIZE];
  234. *idx = *gain = best_score = 0;
  235. for (i = 0; i < FIXED_CB_SIZE; i++) {
  236. for (j = 0; j < BLOCKSIZE; j++)
  237. vect[j] = cb[i][j];
  238. get_match_score(work, coefs, vect, ortho1, ortho2, data, &score, &g);
  239. if (score > best_score) {
  240. best_score = score;
  241. *idx = i;
  242. *gain = g;
  243. }
  244. }
  245. }
  246. /**
  247. * Search the two fixed codebooks for the best entry and gain
  248. *
  249. * @param work array used to calculate LPC-filtered vectors
  250. * @param coefs coefficients of the LPC filter
  251. * @param data input data
  252. * @param cba_idx index of the best entry of the adaptive codebook
  253. * @param cb1_idx pointer to variable where the index of the best entry of the
  254. * first fixed codebook is returned
  255. * @param cb2_idx pointer to variable where the index of the best entry of the
  256. * second fixed codebook is returned
  257. */
  258. static void fixed_cb_search(float *work, const float *coefs, float *data,
  259. int cba_idx, int *cb1_idx, int *cb2_idx)
  260. {
  261. int i, ortho_cb1;
  262. float gain;
  263. float cba_vect[BLOCKSIZE], cb1_vect[BLOCKSIZE];
  264. float vect[BLOCKSIZE];
  265. /**
  266. * The filtered vector from the adaptive codebook can be retrieved from
  267. * work, because this function is called just after adaptive_cb_search().
  268. */
  269. if (cba_idx)
  270. memcpy(cba_vect, work, sizeof(cba_vect));
  271. find_best_vect(work, coefs, ff_cb1_vects, cba_idx ? cba_vect : NULL, NULL,
  272. data, cb1_idx, &gain);
  273. /**
  274. * Re-calculate the filtered vector from the vector with maximum match score
  275. * and remove its contribution from input data.
  276. */
  277. if (gain) {
  278. for (i = 0; i < BLOCKSIZE; i++)
  279. vect[i] = ff_cb1_vects[*cb1_idx][i];
  280. ff_celp_lp_synthesis_filterf(work, coefs, vect, BLOCKSIZE, LPC_ORDER);
  281. if (cba_idx)
  282. orthogonalize(work, cba_vect);
  283. for (i = 0; i < BLOCKSIZE; i++)
  284. data[i] -= gain * work[i];
  285. memcpy(cb1_vect, work, sizeof(cb1_vect));
  286. ortho_cb1 = 1;
  287. } else
  288. ortho_cb1 = 0;
  289. find_best_vect(work, coefs, ff_cb2_vects, cba_idx ? cba_vect : NULL,
  290. ortho_cb1 ? cb1_vect : NULL, data, cb2_idx, &gain);
  291. }
  292. /**
  293. * Encode a subblock of the current frame
  294. *
  295. * @param ractx encoder context
  296. * @param sblock_data input data of the subblock
  297. * @param lpc_coefs coefficients of the LPC filter
  298. * @param rms RMS of the reflection coefficients
  299. * @param pb pointer to PutBitContext of the current frame
  300. */
  301. static void ra144_encode_subblock(RA144Context *ractx,
  302. const int16_t *sblock_data,
  303. const int16_t *lpc_coefs, unsigned int rms,
  304. PutBitContext *pb)
  305. {
  306. float data[BLOCKSIZE] = { 0 }, work[LPC_ORDER + BLOCKSIZE];
  307. float coefs[LPC_ORDER];
  308. float zero[BLOCKSIZE], cba[BLOCKSIZE], cb1[BLOCKSIZE], cb2[BLOCKSIZE];
  309. int16_t cba_vect[BLOCKSIZE];
  310. int cba_idx, cb1_idx, cb2_idx, gain;
  311. int i, n, m[3];
  312. float g[3];
  313. float error, best_error;
  314. for (i = 0; i < LPC_ORDER; i++) {
  315. work[i] = ractx->curr_sblock[BLOCKSIZE + i];
  316. coefs[i] = lpc_coefs[i] * (1/4096.0);
  317. }
  318. /**
  319. * Calculate the zero-input response of the LPC filter and subtract it from
  320. * input data.
  321. */
  322. ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, data, BLOCKSIZE,
  323. LPC_ORDER);
  324. for (i = 0; i < BLOCKSIZE; i++) {
  325. zero[i] = work[LPC_ORDER + i];
  326. data[i] = sblock_data[i] - zero[i];
  327. }
  328. /**
  329. * Codebook search is performed without taking into account the contribution
  330. * of the previous subblock, since it has been just subtracted from input
  331. * data.
  332. */
  333. memset(work, 0, LPC_ORDER * sizeof(*work));
  334. cba_idx = adaptive_cb_search(ractx->adapt_cb, work + LPC_ORDER, coefs,
  335. data);
  336. if (cba_idx) {
  337. /**
  338. * The filtered vector from the adaptive codebook can be retrieved from
  339. * work, see implementation of adaptive_cb_search().
  340. */
  341. memcpy(cba, work + LPC_ORDER, sizeof(cba));
  342. ff_copy_and_dup(cba_vect, ractx->adapt_cb, cba_idx + BLOCKSIZE / 2 - 1);
  343. m[0] = (ff_irms(cba_vect) * rms) >> 12;
  344. }
  345. fixed_cb_search(work + LPC_ORDER, coefs, data, cba_idx, &cb1_idx, &cb2_idx);
  346. for (i = 0; i < BLOCKSIZE; i++) {
  347. cb1[i] = ff_cb1_vects[cb1_idx][i];
  348. cb2[i] = ff_cb2_vects[cb2_idx][i];
  349. }
  350. ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, cb1, BLOCKSIZE,
  351. LPC_ORDER);
  352. memcpy(cb1, work + LPC_ORDER, sizeof(cb1));
  353. m[1] = (ff_cb1_base[cb1_idx] * rms) >> 8;
  354. ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, cb2, BLOCKSIZE,
  355. LPC_ORDER);
  356. memcpy(cb2, work + LPC_ORDER, sizeof(cb2));
  357. m[2] = (ff_cb2_base[cb2_idx] * rms) >> 8;
  358. best_error = FLT_MAX;
  359. gain = 0;
  360. for (n = 0; n < 256; n++) {
  361. g[1] = ((ff_gain_val_tab[n][1] * m[1]) >> ff_gain_exp_tab[n]) *
  362. (1/4096.0);
  363. g[2] = ((ff_gain_val_tab[n][2] * m[2]) >> ff_gain_exp_tab[n]) *
  364. (1/4096.0);
  365. error = 0;
  366. if (cba_idx) {
  367. g[0] = ((ff_gain_val_tab[n][0] * m[0]) >> ff_gain_exp_tab[n]) *
  368. (1/4096.0);
  369. for (i = 0; i < BLOCKSIZE; i++) {
  370. data[i] = zero[i] + g[0] * cba[i] + g[1] * cb1[i] +
  371. g[2] * cb2[i];
  372. error += (data[i] - sblock_data[i]) *
  373. (data[i] - sblock_data[i]);
  374. }
  375. } else {
  376. for (i = 0; i < BLOCKSIZE; i++) {
  377. data[i] = zero[i] + g[1] * cb1[i] + g[2] * cb2[i];
  378. error += (data[i] - sblock_data[i]) *
  379. (data[i] - sblock_data[i]);
  380. }
  381. }
  382. if (error < best_error) {
  383. best_error = error;
  384. gain = n;
  385. }
  386. }
  387. put_bits(pb, 7, cba_idx);
  388. put_bits(pb, 8, gain);
  389. put_bits(pb, 7, cb1_idx);
  390. put_bits(pb, 7, cb2_idx);
  391. ff_subblock_synthesis(ractx, lpc_coefs, cba_idx, cb1_idx, cb2_idx, rms,
  392. gain);
  393. }
  394. static int ra144_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  395. const AVFrame *frame, int *got_packet_ptr)
  396. {
  397. static const uint8_t sizes[LPC_ORDER] = {64, 32, 32, 16, 16, 8, 8, 8, 8, 4};
  398. static const uint8_t bit_sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2};
  399. RA144Context *ractx = avctx->priv_data;
  400. PutBitContext pb;
  401. int32_t lpc_data[NBLOCKS * BLOCKSIZE];
  402. int32_t lpc_coefs[LPC_ORDER][MAX_LPC_ORDER];
  403. int shift[LPC_ORDER];
  404. int16_t block_coefs[NBLOCKS][LPC_ORDER];
  405. int lpc_refl[LPC_ORDER]; /**< reflection coefficients of the frame */
  406. unsigned int refl_rms[NBLOCKS]; /**< RMS of the reflection coefficients */
  407. const int16_t *samples = frame ? (const int16_t *)frame->data[0] : NULL;
  408. int energy = 0;
  409. int i, idx, ret;
  410. if (ractx->last_frame)
  411. return 0;
  412. if ((ret = ff_alloc_packet2(avctx, avpkt, FRAMESIZE)))
  413. return ret;
  414. /**
  415. * Since the LPC coefficients are calculated on a frame centered over the
  416. * fourth subframe, to encode a given frame, data from the next frame is
  417. * needed. In each call to this function, the previous frame (whose data are
  418. * saved in the encoder context) is encoded, and data from the current frame
  419. * are saved in the encoder context to be used in the next function call.
  420. */
  421. for (i = 0; i < (2 * BLOCKSIZE + BLOCKSIZE / 2); i++) {
  422. lpc_data[i] = ractx->curr_block[BLOCKSIZE + BLOCKSIZE / 2 + i];
  423. energy += (lpc_data[i] * lpc_data[i]) >> 4;
  424. }
  425. if (frame) {
  426. int j;
  427. for (j = 0; j < frame->nb_samples && i < NBLOCKS * BLOCKSIZE; i++, j++) {
  428. lpc_data[i] = samples[j] >> 2;
  429. energy += (lpc_data[i] * lpc_data[i]) >> 4;
  430. }
  431. }
  432. if (i < NBLOCKS * BLOCKSIZE)
  433. memset(&lpc_data[i], 0, (NBLOCKS * BLOCKSIZE - i) * sizeof(*lpc_data));
  434. energy = ff_energy_tab[quantize(ff_t_sqrt(energy >> 5) >> 10, ff_energy_tab,
  435. 32)];
  436. ff_lpc_calc_coefs(&ractx->lpc_ctx, lpc_data, NBLOCKS * BLOCKSIZE, LPC_ORDER,
  437. LPC_ORDER, 16, lpc_coefs, shift, FF_LPC_TYPE_LEVINSON,
  438. 0, ORDER_METHOD_EST, 12, 0);
  439. for (i = 0; i < LPC_ORDER; i++)
  440. block_coefs[NBLOCKS - 1][i] = -(lpc_coefs[LPC_ORDER - 1][i] <<
  441. (12 - shift[LPC_ORDER - 1]));
  442. /**
  443. * TODO: apply perceptual weighting of the input speech through bandwidth
  444. * expansion of the LPC filter.
  445. */
  446. if (ff_eval_refl(lpc_refl, block_coefs[NBLOCKS - 1], avctx)) {
  447. /**
  448. * The filter is unstable: use the coefficients of the previous frame.
  449. */
  450. ff_int_to_int16(block_coefs[NBLOCKS - 1], ractx->lpc_coef[1]);
  451. if (ff_eval_refl(lpc_refl, block_coefs[NBLOCKS - 1], avctx)) {
  452. /* the filter is still unstable. set reflection coeffs to zero. */
  453. memset(lpc_refl, 0, sizeof(lpc_refl));
  454. }
  455. }
  456. init_put_bits(&pb, avpkt->data, avpkt->size);
  457. for (i = 0; i < LPC_ORDER; i++) {
  458. idx = quantize(lpc_refl[i], ff_lpc_refl_cb[i], sizes[i]);
  459. put_bits(&pb, bit_sizes[i], idx);
  460. lpc_refl[i] = ff_lpc_refl_cb[i][idx];
  461. }
  462. ractx->lpc_refl_rms[0] = ff_rms(lpc_refl);
  463. ff_eval_coefs(ractx->lpc_coef[0], lpc_refl);
  464. refl_rms[0] = ff_interp(ractx, block_coefs[0], 1, 1, ractx->old_energy);
  465. refl_rms[1] = ff_interp(ractx, block_coefs[1], 2,
  466. energy <= ractx->old_energy,
  467. ff_t_sqrt(energy * ractx->old_energy) >> 12);
  468. refl_rms[2] = ff_interp(ractx, block_coefs[2], 3, 0, energy);
  469. refl_rms[3] = ff_rescale_rms(ractx->lpc_refl_rms[0], energy);
  470. ff_int_to_int16(block_coefs[NBLOCKS - 1], ractx->lpc_coef[0]);
  471. put_bits(&pb, 5, quantize(energy, ff_energy_tab, 32));
  472. for (i = 0; i < NBLOCKS; i++)
  473. ra144_encode_subblock(ractx, ractx->curr_block + i * BLOCKSIZE,
  474. block_coefs[i], refl_rms[i], &pb);
  475. flush_put_bits(&pb);
  476. ractx->old_energy = energy;
  477. ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0];
  478. FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
  479. /* copy input samples to current block for processing in next call */
  480. i = 0;
  481. if (frame) {
  482. for (; i < frame->nb_samples; i++)
  483. ractx->curr_block[i] = samples[i] >> 2;
  484. if ((ret = ff_af_queue_add(&ractx->afq, frame) < 0))
  485. return ret;
  486. } else
  487. ractx->last_frame = 1;
  488. memset(&ractx->curr_block[i], 0,
  489. (NBLOCKS * BLOCKSIZE - i) * sizeof(*ractx->curr_block));
  490. /* Get the next frame pts/duration */
  491. ff_af_queue_remove(&ractx->afq, avctx->frame_size, &avpkt->pts,
  492. &avpkt->duration);
  493. avpkt->size = FRAMESIZE;
  494. *got_packet_ptr = 1;
  495. return 0;
  496. }
  497. AVCodec ff_ra_144_encoder = {
  498. .name = "real_144",
  499. .type = AVMEDIA_TYPE_AUDIO,
  500. .id = AV_CODEC_ID_RA_144,
  501. .priv_data_size = sizeof(RA144Context),
  502. .init = ra144_encode_init,
  503. .encode2 = ra144_encode_frame,
  504. .close = ra144_encode_close,
  505. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
  506. .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
  507. AV_SAMPLE_FMT_NONE },
  508. .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"),
  509. };