You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

319 lines
11KB

  1. /*
  2. * AAC encoder psychoacoustic model
  3. * Copyright (C) 2008 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/aacpsy.c
  23. * AAC encoder psychoacoustic model
  24. */
  25. #include "avcodec.h"
  26. #include "aactab.h"
  27. #include "psymodel.h"
  28. /***********************************
  29. * TODOs:
  30. * thresholds linearization after their modifications for attaining given bitrate
  31. * try other bitrate controlling mechanism (maybe use ratecontrol.c?)
  32. * control quality for quality-based output
  33. **********************************/
  34. /**
  35. * constants for 3GPP AAC psychoacoustic model
  36. * @{
  37. */
  38. #define PSY_3GPP_SPREAD_LOW 1.5f // spreading factor for ascending threshold spreading (15 dB/Bark)
  39. #define PSY_3GPP_SPREAD_HI 3.0f // spreading factor for descending threshold spreading (30 dB/Bark)
  40. #define PSY_3GPP_RPEMIN 0.01f
  41. #define PSY_3GPP_RPELEV 2.0f
  42. /**
  43. * @}
  44. */
  45. /**
  46. * information for single band used by 3GPP TS26.403-inspired psychoacoustic model
  47. */
  48. typedef struct Psy3gppBand{
  49. float energy; ///< band energy
  50. float ffac; ///< form factor
  51. float thr; ///< energy threshold
  52. float min_snr; ///< minimal SNR
  53. float thr_quiet; ///< threshold in quiet
  54. }Psy3gppBand;
  55. /**
  56. * single/pair channel context for psychoacoustic model
  57. */
  58. typedef struct Psy3gppChannel{
  59. Psy3gppBand band[128]; ///< bands information
  60. Psy3gppBand prev_band[128]; ///< bands information from the previous frame
  61. float win_energy; ///< sliding average of channel energy
  62. float iir_state[2]; ///< hi-pass IIR filter state
  63. uint8_t next_grouping; ///< stored grouping scheme for the next frame (in case of 8 short window sequence)
  64. enum WindowSequence next_window_seq; ///< window sequence to be used in the next frame
  65. }Psy3gppChannel;
  66. /**
  67. * psychoacoustic model frame type-dependent coefficients
  68. */
  69. typedef struct Psy3gppCoeffs{
  70. float ath [64]; ///< absolute threshold of hearing per bands
  71. float barks [64]; ///< Bark value for each spectral band in long frame
  72. float spread_low[64]; ///< spreading factor for low-to-high threshold spreading in long frame
  73. float spread_hi [64]; ///< spreading factor for high-to-low threshold spreading in long frame
  74. }Psy3gppCoeffs;
  75. /**
  76. * 3GPP TS26.403-inspired psychoacoustic model specific data
  77. */
  78. typedef struct Psy3gppContext{
  79. Psy3gppCoeffs psy_coef[2];
  80. Psy3gppChannel *ch;
  81. }Psy3gppContext;
  82. /**
  83. * Calculate Bark value for given line.
  84. */
  85. static av_cold float calc_bark(float f)
  86. {
  87. return 13.3f * atanf(0.00076f * f) + 3.5f * atanf((f / 7500.0f) * (f / 7500.0f));
  88. }
  89. #define ATH_ADD 4
  90. /**
  91. * Calculate ATH value for given frequency.
  92. * Borrowed from Lame.
  93. */
  94. static av_cold float ath(float f, float add)
  95. {
  96. f /= 1000.0f;
  97. return 3.64 * pow(f, -0.8)
  98. - 6.8 * exp(-0.6 * (f - 3.4) * (f - 3.4))
  99. + 6.0 * exp(-0.15 * (f - 8.7) * (f - 8.7))
  100. + (0.6 + 0.04 * add) * 0.001 * f * f * f * f;
  101. }
  102. static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
  103. Psy3gppContext *pctx;
  104. float barks[1024];
  105. int i, j, g, start;
  106. float prev, minscale, minath;
  107. ctx->model_priv_data = av_mallocz(sizeof(Psy3gppContext));
  108. pctx = (Psy3gppContext*) ctx->model_priv_data;
  109. for (i = 0; i < 1024; i++)
  110. barks[i] = calc_bark(i * ctx->avctx->sample_rate / 2048.0);
  111. minath = ath(3410, ATH_ADD);
  112. for (j = 0; j < 2; j++) {
  113. Psy3gppCoeffs *coeffs = &pctx->psy_coef[j];
  114. i = 0;
  115. prev = 0.0;
  116. for (g = 0; g < ctx->num_bands[j]; g++) {
  117. i += ctx->bands[j][g];
  118. coeffs->barks[g] = (barks[i - 1] + prev) / 2.0;
  119. prev = barks[i - 1];
  120. }
  121. for (g = 0; g < ctx->num_bands[j] - 1; g++) {
  122. coeffs->spread_low[g] = pow(10.0, -(coeffs->barks[g+1] - coeffs->barks[g]) * PSY_3GPP_SPREAD_LOW);
  123. coeffs->spread_hi [g] = pow(10.0, -(coeffs->barks[g+1] - coeffs->barks[g]) * PSY_3GPP_SPREAD_HI);
  124. }
  125. start = 0;
  126. for (g = 0; g < ctx->num_bands[j]; g++) {
  127. minscale = ath(ctx->avctx->sample_rate * start / 1024.0, ATH_ADD);
  128. for (i = 1; i < ctx->bands[j][g]; i++)
  129. minscale = FFMIN(minscale, ath(ctx->avctx->sample_rate * (start + i) / 1024.0 / 2.0, ATH_ADD));
  130. coeffs->ath[g] = minscale - minath;
  131. start += ctx->bands[j][g];
  132. }
  133. }
  134. pctx->ch = av_mallocz(sizeof(Psy3gppChannel) * ctx->avctx->channels);
  135. return 0;
  136. }
  137. /**
  138. * IIR filter used in block switching decision
  139. */
  140. static float iir_filter(int in, float state[2])
  141. {
  142. float ret;
  143. ret = 0.7548f * (in - state[0]) + 0.5095f * state[1];
  144. state[0] = in;
  145. state[1] = ret;
  146. return ret;
  147. }
  148. /**
  149. * window grouping information stored as bits (0 - new group, 1 - group continues)
  150. */
  151. static const uint8_t window_grouping[9] = {
  152. 0xB6, 0x6C, 0xD8, 0xB2, 0x66, 0xC6, 0x96, 0x36, 0x36
  153. };
  154. /**
  155. * Tell encoder which window types to use.
  156. * @see 3GPP TS26.403 5.4.1 "Blockswitching"
  157. */
  158. static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
  159. const int16_t *audio, const int16_t *la,
  160. int channel, int prev_type)
  161. {
  162. int i, j;
  163. int br = ctx->avctx->bit_rate / ctx->avctx->channels;
  164. int attack_ratio = br <= 16000 ? 18 : 10;
  165. Psy3gppContext *pctx = (Psy3gppContext*) ctx->model_priv_data;
  166. Psy3gppChannel *pch = &pctx->ch[channel];
  167. uint8_t grouping = 0;
  168. FFPsyWindowInfo wi;
  169. memset(&wi, 0, sizeof(wi));
  170. if (la) {
  171. float s[8], v;
  172. int switch_to_eight = 0;
  173. float sum = 0.0, sum2 = 0.0;
  174. int attack_n = 0;
  175. for (i = 0; i < 8; i++) {
  176. for (j = 0; j < 128; j++) {
  177. v = iir_filter(audio[(i*128+j)*ctx->avctx->channels], pch->iir_state);
  178. sum += v*v;
  179. }
  180. s[i] = sum;
  181. sum2 += sum;
  182. }
  183. for (i = 0; i < 8; i++) {
  184. if (s[i] > pch->win_energy * attack_ratio) {
  185. attack_n = i + 1;
  186. switch_to_eight = 1;
  187. break;
  188. }
  189. }
  190. pch->win_energy = pch->win_energy*7/8 + sum2/64;
  191. wi.window_type[1] = prev_type;
  192. switch (prev_type) {
  193. case ONLY_LONG_SEQUENCE:
  194. wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE;
  195. break;
  196. case LONG_START_SEQUENCE:
  197. wi.window_type[0] = EIGHT_SHORT_SEQUENCE;
  198. grouping = pch->next_grouping;
  199. break;
  200. case LONG_STOP_SEQUENCE:
  201. wi.window_type[0] = ONLY_LONG_SEQUENCE;
  202. break;
  203. case EIGHT_SHORT_SEQUENCE:
  204. wi.window_type[0] = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE;
  205. grouping = switch_to_eight ? pch->next_grouping : 0;
  206. break;
  207. }
  208. pch->next_grouping = window_grouping[attack_n];
  209. } else {
  210. for (i = 0; i < 3; i++)
  211. wi.window_type[i] = prev_type;
  212. grouping = (prev_type == EIGHT_SHORT_SEQUENCE) ? window_grouping[0] : 0;
  213. }
  214. wi.window_shape = 1;
  215. if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) {
  216. wi.num_windows = 1;
  217. wi.grouping[0] = 1;
  218. } else {
  219. int lastgrp = 0;
  220. wi.num_windows = 8;
  221. for (i = 0; i < 8; i++) {
  222. if (!((grouping >> i) & 1))
  223. lastgrp = i;
  224. wi.grouping[lastgrp]++;
  225. }
  226. }
  227. return wi;
  228. }
  229. /**
  230. * Calculate band thresholds as suggested in 3GPP TS26.403
  231. */
  232. static void psy_3gpp_analyze(FFPsyContext *ctx, int channel,
  233. const float *coefs, FFPsyWindowInfo *wi)
  234. {
  235. Psy3gppContext *pctx = (Psy3gppContext*) ctx->model_priv_data;
  236. Psy3gppChannel *pch = &pctx->ch[channel];
  237. int start = 0;
  238. int i, w, g;
  239. const int num_bands = ctx->num_bands[wi->num_windows == 8];
  240. const uint8_t* band_sizes = ctx->bands[wi->num_windows == 8];
  241. Psy3gppCoeffs *coeffs = &pctx->psy_coef[wi->num_windows == 8];
  242. //calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation"
  243. for (w = 0; w < wi->num_windows*16; w += 16) {
  244. for (g = 0; g < num_bands; g++) {
  245. Psy3gppBand *band = &pch->band[w+g];
  246. band->energy = 0.0f;
  247. for (i = 0; i < band_sizes[g]; i++)
  248. band->energy += coefs[start+i] * coefs[start+i];
  249. band->energy *= 1.0f / (512*512);
  250. band->thr = band->energy * 0.001258925f;
  251. start += band_sizes[g];
  252. ctx->psy_bands[channel*PSY_MAX_BANDS+w+g].energy = band->energy;
  253. }
  254. }
  255. //modify thresholds - spread, threshold in quiet - 5.4.3 "Spreaded Energy Calculation"
  256. for (w = 0; w < wi->num_windows*16; w += 16) {
  257. Psy3gppBand *band = &pch->band[w];
  258. for (g = 1; g < num_bands; g++)
  259. band[g].thr = FFMAX(band[g].thr, band[g-1].thr * coeffs->spread_low[g-1]);
  260. for (g = num_bands - 2; g >= 0; g--)
  261. band[g].thr = FFMAX(band[g].thr, band[g+1].thr * coeffs->spread_hi [g]);
  262. for (g = 0; g < num_bands; g++) {
  263. band[g].thr_quiet = FFMAX(band[g].thr, coeffs->ath[g]);
  264. if (wi->num_windows != 8 && wi->window_type[1] != EIGHT_SHORT_SEQUENCE)
  265. band[g].thr_quiet = FFMAX(PSY_3GPP_RPEMIN*band[g].thr_quiet,
  266. FFMIN(band[g].thr_quiet,
  267. PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet));
  268. band[g].thr = FFMAX(band[g].thr, band[g].thr_quiet * 0.25);
  269. ctx->psy_bands[channel*PSY_MAX_BANDS+w+g].threshold = band[g].thr;
  270. }
  271. }
  272. memcpy(pch->prev_band, pch->band, sizeof(pch->band));
  273. }
  274. static av_cold void psy_3gpp_end(FFPsyContext *apc)
  275. {
  276. Psy3gppContext *pctx = (Psy3gppContext*) apc->model_priv_data;
  277. av_freep(&pctx->ch);
  278. av_freep(&apc->model_priv_data);
  279. }
  280. const FFPsyModel ff_aac_psy_model =
  281. {
  282. .name = "3GPP TS 26.403-inspired model",
  283. .init = psy_3gpp_init,
  284. .window = psy_3gpp_window,
  285. .analyze = psy_3gpp_analyze,
  286. .end = psy_3gpp_end,
  287. };