The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

668 lines
25KB

  1. /* libFLAC - Free Lossless Audio Codec library
  2. * Copyright (C) 2000-2009 Josh Coalson
  3. * Copyright (C) 2011-2023 Xiph.Org Foundation
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * - Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. *
  12. * - Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * - Neither the name of the Xiph.org Foundation nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  21. * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  22. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  23. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
  24. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  25. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  26. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  27. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  28. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  29. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  30. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #ifdef HAVE_CONFIG_H
  33. # include <config.h>
  34. #endif
  35. #include <math.h>
  36. #include <string.h>
  37. #include "../compat.h"
  38. #include "include/private/bitmath.h"
  39. #include "include/private/fixed.h"
  40. #include "../assert.h"
  41. #ifdef local_abs
  42. #undef local_abs
  43. #endif
  44. #define local_abs(x) ((uint32_t)((x)<0? -(x) : (x)))
  45. #ifdef local_abs64
  46. #undef local_abs64
  47. #endif
  48. #define local_abs64(x) ((uint64_t)((x)<0? -(x) : (x)))
  49. #ifdef FLAC__INTEGER_ONLY_LIBRARY
  50. /* rbps stands for residual bits per sample
  51. *
  52. * (ln(2) * err)
  53. * rbps = log (-----------)
  54. * 2 ( n )
  55. */
  56. static FLAC__fixedpoint local__compute_rbps_integerized(FLAC__uint32 err, FLAC__uint32 n)
  57. {
  58. FLAC__uint32 rbps;
  59. uint32_t bits; /* the number of bits required to represent a number */
  60. int fracbits; /* the number of bits of rbps that comprise the fractional part */
  61. FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint));
  62. FLAC__ASSERT(err > 0);
  63. FLAC__ASSERT(n > 0);
  64. FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE);
  65. if(err <= n)
  66. return 0;
  67. /*
  68. * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1.
  69. * These allow us later to know we won't lose too much precision in the
  70. * fixed-point division (err<<fracbits)/n.
  71. */
  72. fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2(err)+1);
  73. err <<= fracbits;
  74. err /= n;
  75. /* err now holds err/n with fracbits fractional bits */
  76. /*
  77. * Whittle err down to 16 bits max. 16 significant bits is enough for
  78. * our purposes.
  79. */
  80. FLAC__ASSERT(err > 0);
  81. bits = FLAC__bitmath_ilog2(err)+1;
  82. if(bits > 16) {
  83. err >>= (bits-16);
  84. fracbits -= (bits-16);
  85. }
  86. rbps = (FLAC__uint32)err;
  87. /* Multiply by fixed-point version of ln(2), with 16 fractional bits */
  88. rbps *= FLAC__FP_LN2;
  89. fracbits += 16;
  90. FLAC__ASSERT(fracbits >= 0);
  91. /* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */
  92. {
  93. const int f = fracbits & 3;
  94. if(f) {
  95. rbps >>= f;
  96. fracbits -= f;
  97. }
  98. }
  99. rbps = FLAC__fixedpoint_log2(rbps, fracbits, (uint32_t)(-1));
  100. if(rbps == 0)
  101. return 0;
  102. /*
  103. * The return value must have 16 fractional bits. Since the whole part
  104. * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits
  105. * must be >= -3, these assertion allows us to be able to shift rbps
  106. * left if necessary to get 16 fracbits without losing any bits of the
  107. * whole part of rbps.
  108. *
  109. * There is a slight chance due to accumulated error that the whole part
  110. * will require 6 bits, so we use 6 in the assertion. Really though as
  111. * long as it fits in 13 bits (32 - (16 - (-3))) we are fine.
  112. */
  113. FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6);
  114. FLAC__ASSERT(fracbits >= -3);
  115. /* now shift the decimal point into place */
  116. if(fracbits < 16)
  117. return rbps << (16-fracbits);
  118. else if(fracbits > 16)
  119. return rbps >> (fracbits-16);
  120. else
  121. return rbps;
  122. }
  123. static FLAC__fixedpoint local__compute_rbps_wide_integerized(FLAC__uint64 err, FLAC__uint32 n)
  124. {
  125. FLAC__uint32 rbps;
  126. uint32_t bits; /* the number of bits required to represent a number */
  127. int fracbits; /* the number of bits of rbps that comprise the fractional part */
  128. FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint));
  129. FLAC__ASSERT(err > 0);
  130. FLAC__ASSERT(n > 0);
  131. FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE);
  132. if(err <= n)
  133. return 0;
  134. /*
  135. * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1.
  136. * These allow us later to know we won't lose too much precision in the
  137. * fixed-point division (err<<fracbits)/n.
  138. */
  139. fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2_wide(err)+1);
  140. err <<= fracbits;
  141. err /= n;
  142. /* err now holds err/n with fracbits fractional bits */
  143. /*
  144. * Whittle err down to 16 bits max. 16 significant bits is enough for
  145. * our purposes.
  146. */
  147. FLAC__ASSERT(err > 0);
  148. bits = FLAC__bitmath_ilog2_wide(err)+1;
  149. if(bits > 16) {
  150. err >>= (bits-16);
  151. fracbits -= (bits-16);
  152. }
  153. rbps = (FLAC__uint32)err;
  154. /* Multiply by fixed-point version of ln(2), with 16 fractional bits */
  155. rbps *= FLAC__FP_LN2;
  156. fracbits += 16;
  157. FLAC__ASSERT(fracbits >= 0);
  158. /* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */
  159. {
  160. const int f = fracbits & 3;
  161. if(f) {
  162. rbps >>= f;
  163. fracbits -= f;
  164. }
  165. }
  166. rbps = FLAC__fixedpoint_log2(rbps, fracbits, (uint32_t)(-1));
  167. if(rbps == 0)
  168. return 0;
  169. /*
  170. * The return value must have 16 fractional bits. Since the whole part
  171. * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits
  172. * must be >= -3, these assertion allows us to be able to shift rbps
  173. * left if necessary to get 16 fracbits without losing any bits of the
  174. * whole part of rbps.
  175. *
  176. * There is a slight chance due to accumulated error that the whole part
  177. * will require 6 bits, so we use 6 in the assertion. Really though as
  178. * long as it fits in 13 bits (32 - (16 - (-3))) we are fine.
  179. */
  180. FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6);
  181. FLAC__ASSERT(fracbits >= -3);
  182. /* now shift the decimal point into place */
  183. if(fracbits < 16)
  184. return rbps << (16-fracbits);
  185. else if(fracbits > 16)
  186. return rbps >> (fracbits-16);
  187. else
  188. return rbps;
  189. }
  190. #endif
  191. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  192. uint32_t FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  193. #else
  194. uint32_t FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  195. #endif
  196. {
  197. FLAC__uint32 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
  198. uint32_t order;
  199. #if 0
  200. /* This code has been around a long time, and was written when compilers weren't able
  201. * to vectorize code. These days, compilers are better in optimizing the next block
  202. * which is also much more readable
  203. */
  204. FLAC__int32 last_error_0 = data[-1];
  205. FLAC__int32 last_error_1 = data[-1] - data[-2];
  206. FLAC__int32 last_error_2 = last_error_1 - (data[-2] - data[-3]);
  207. FLAC__int32 last_error_3 = last_error_2 - (data[-2] - 2*data[-3] + data[-4]);
  208. FLAC__int32 error, save;
  209. uint32_t i;
  210. /* total_error_* are 64-bits to avoid overflow when encoding
  211. * erratic signals when the bits-per-sample and blocksize are
  212. * large.
  213. */
  214. for(i = 0; i < data_len; i++) {
  215. error = data[i] ; total_error_0 += local_abs(error); save = error;
  216. error -= last_error_0; total_error_1 += local_abs(error); last_error_0 = save; save = error;
  217. error -= last_error_1; total_error_2 += local_abs(error); last_error_1 = save; save = error;
  218. error -= last_error_2; total_error_3 += local_abs(error); last_error_2 = save; save = error;
  219. error -= last_error_3; total_error_4 += local_abs(error); last_error_3 = save;
  220. }
  221. #else
  222. int i;
  223. for(i = 0; i < (int)data_len; i++) {
  224. total_error_0 += local_abs(data[i]);
  225. total_error_1 += local_abs(data[i] - data[i-1]);
  226. total_error_2 += local_abs(data[i] - 2 * data[i-1] + data[i-2]);
  227. total_error_3 += local_abs(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]);
  228. total_error_4 += local_abs(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]);
  229. }
  230. #endif
  231. /* prefer lower order */
  232. if(total_error_0 <= flac_min(flac_min(flac_min(total_error_1, total_error_2), total_error_3), total_error_4))
  233. order = 0;
  234. else if(total_error_1 <= flac_min(flac_min(total_error_2, total_error_3), total_error_4))
  235. order = 1;
  236. else if(total_error_2 <= flac_min(total_error_3, total_error_4))
  237. order = 2;
  238. else if(total_error_3 <= total_error_4)
  239. order = 3;
  240. else
  241. order = 4;
  242. /* Estimate the expected number of bits per residual signal sample. */
  243. /* 'total_error*' is linearly related to the variance of the residual */
  244. /* signal, so we use it directly to compute E(|x|) */
  245. FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
  246. FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
  247. FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
  248. FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
  249. FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
  250. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  251. residual_bits_per_sample[0] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
  252. residual_bits_per_sample[1] = (float)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
  253. residual_bits_per_sample[2] = (float)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
  254. residual_bits_per_sample[3] = (float)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
  255. residual_bits_per_sample[4] = (float)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
  256. #else
  257. residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_integerized(total_error_0, data_len) : 0;
  258. residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_integerized(total_error_1, data_len) : 0;
  259. residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_integerized(total_error_2, data_len) : 0;
  260. residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_integerized(total_error_3, data_len) : 0;
  261. residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_integerized(total_error_4, data_len) : 0;
  262. #endif
  263. return order;
  264. }
  265. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  266. uint32_t FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  267. #else
  268. uint32_t FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  269. #endif
  270. {
  271. FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
  272. uint32_t order;
  273. int i;
  274. for(i = 0; i < (int)data_len; i++) {
  275. total_error_0 += local_abs(data[i]);
  276. total_error_1 += local_abs(data[i] - data[i-1]);
  277. total_error_2 += local_abs(data[i] - 2 * data[i-1] + data[i-2]);
  278. total_error_3 += local_abs(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]);
  279. total_error_4 += local_abs(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]);
  280. }
  281. /* prefer lower order */
  282. if(total_error_0 <= flac_min(flac_min(flac_min(total_error_1, total_error_2), total_error_3), total_error_4))
  283. order = 0;
  284. else if(total_error_1 <= flac_min(flac_min(total_error_2, total_error_3), total_error_4))
  285. order = 1;
  286. else if(total_error_2 <= flac_min(total_error_3, total_error_4))
  287. order = 2;
  288. else if(total_error_3 <= total_error_4)
  289. order = 3;
  290. else
  291. order = 4;
  292. /* Estimate the expected number of bits per residual signal sample. */
  293. /* 'total_error*' is linearly related to the variance of the residual */
  294. /* signal, so we use it directly to compute E(|x|) */
  295. FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
  296. FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
  297. FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
  298. FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
  299. FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
  300. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  301. residual_bits_per_sample[0] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
  302. residual_bits_per_sample[1] = (float)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
  303. residual_bits_per_sample[2] = (float)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
  304. residual_bits_per_sample[3] = (float)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
  305. residual_bits_per_sample[4] = (float)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
  306. #else
  307. residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_wide_integerized(total_error_0, data_len) : 0;
  308. residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_wide_integerized(total_error_1, data_len) : 0;
  309. residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_wide_integerized(total_error_2, data_len) : 0;
  310. residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_wide_integerized(total_error_3, data_len) : 0;
  311. residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_wide_integerized(total_error_4, data_len) : 0;
  312. #endif
  313. return order;
  314. }
  315. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  316. #define CHECK_ORDER_IS_VALID(macro_order) \
  317. if(order_##macro_order##_is_valid && total_error_##macro_order < smallest_error) { \
  318. order = macro_order; \
  319. smallest_error = total_error_##macro_order ; \
  320. residual_bits_per_sample[ macro_order ] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0); \
  321. } \
  322. else \
  323. residual_bits_per_sample[ macro_order ] = 34.0f;
  324. #else
  325. #define CHECK_ORDER_IS_VALID(macro_order) \
  326. if(order_##macro_order##_is_valid && total_error_##macro_order < smallest_error) { \
  327. order = macro_order; \
  328. smallest_error = total_error_##macro_order ; \
  329. residual_bits_per_sample[ macro_order ] = (total_error_##macro_order > 0) ? local__compute_rbps_wide_integerized(total_error_##macro_order, data_len) : 0; \
  330. } \
  331. else \
  332. residual_bits_per_sample[ macro_order ] = 34 * FLAC__FP_ONE;
  333. #endif
  334. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  335. uint32_t FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  336. #else
  337. uint32_t FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  338. #endif
  339. {
  340. FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0, smallest_error = UINT64_MAX;
  341. FLAC__uint64 error_0, error_1, error_2, error_3, error_4;
  342. FLAC__bool order_0_is_valid = true, order_1_is_valid = true, order_2_is_valid = true, order_3_is_valid = true, order_4_is_valid = true;
  343. uint32_t order = 0;
  344. int i;
  345. for(i = -4; i < (int)data_len; i++) {
  346. error_0 = local_abs64((FLAC__int64)data[i]);
  347. error_1 = (i > -4) ? local_abs64((FLAC__int64)data[i] - data[i-1]) : 0 ;
  348. error_2 = (i > -3) ? local_abs64((FLAC__int64)data[i] - 2 * (FLAC__int64)data[i-1] + data[i-2]) : 0;
  349. error_3 = (i > -2) ? local_abs64((FLAC__int64)data[i] - 3 * (FLAC__int64)data[i-1] + 3 * (FLAC__int64)data[i-2] - data[i-3]) : 0;
  350. error_4 = (i > -1) ? local_abs64((FLAC__int64)data[i] - 4 * (FLAC__int64)data[i-1] + 6 * (FLAC__int64)data[i-2] - 4 * (FLAC__int64)data[i-3] + data[i-4]) : 0;
  351. total_error_0 += error_0;
  352. total_error_1 += error_1;
  353. total_error_2 += error_2;
  354. total_error_3 += error_3;
  355. total_error_4 += error_4;
  356. /* residual must not be INT32_MIN because abs(INT32_MIN) is undefined */
  357. if(error_0 > INT32_MAX)
  358. order_0_is_valid = false;
  359. if(error_1 > INT32_MAX)
  360. order_1_is_valid = false;
  361. if(error_2 > INT32_MAX)
  362. order_2_is_valid = false;
  363. if(error_3 > INT32_MAX)
  364. order_3_is_valid = false;
  365. if(error_4 > INT32_MAX)
  366. order_4_is_valid = false;
  367. }
  368. CHECK_ORDER_IS_VALID(0);
  369. CHECK_ORDER_IS_VALID(1);
  370. CHECK_ORDER_IS_VALID(2);
  371. CHECK_ORDER_IS_VALID(3);
  372. CHECK_ORDER_IS_VALID(4);
  373. return order;
  374. }
  375. #ifndef FLAC__INTEGER_ONLY_LIBRARY
  376. uint32_t FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  377. #else
  378. uint32_t FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
  379. #endif
  380. {
  381. FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0, smallest_error = UINT64_MAX;
  382. FLAC__uint64 error_0, error_1, error_2, error_3, error_4;
  383. FLAC__bool order_0_is_valid = true, order_1_is_valid = true, order_2_is_valid = true, order_3_is_valid = true, order_4_is_valid = true;
  384. uint32_t order = 0;
  385. int i;
  386. for(i = -4; i < (int)data_len; i++) {
  387. error_0 = local_abs64(data[i]);
  388. error_1 = (i > -4) ? local_abs64(data[i] - data[i-1]) : 0 ;
  389. error_2 = (i > -3) ? local_abs64(data[i] - 2 * data[i-1] + data[i-2]) : 0;
  390. error_3 = (i > -2) ? local_abs64(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]) : 0;
  391. error_4 = (i > -1) ? local_abs64(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]) : 0;
  392. total_error_0 += error_0;
  393. total_error_1 += error_1;
  394. total_error_2 += error_2;
  395. total_error_3 += error_3;
  396. total_error_4 += error_4;
  397. /* residual must not be INT32_MIN because abs(INT32_MIN) is undefined */
  398. if(error_0 > INT32_MAX)
  399. order_0_is_valid = false;
  400. if(error_1 > INT32_MAX)
  401. order_1_is_valid = false;
  402. if(error_2 > INT32_MAX)
  403. order_2_is_valid = false;
  404. if(error_3 > INT32_MAX)
  405. order_3_is_valid = false;
  406. if(error_4 > INT32_MAX)
  407. order_4_is_valid = false;
  408. }
  409. CHECK_ORDER_IS_VALID(0);
  410. CHECK_ORDER_IS_VALID(1);
  411. CHECK_ORDER_IS_VALID(2);
  412. CHECK_ORDER_IS_VALID(3);
  413. CHECK_ORDER_IS_VALID(4);
  414. return order;
  415. }
  416. void FLAC__fixed_compute_residual(const FLAC__int32 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
  417. {
  418. const int idata_len = (int)data_len;
  419. int i;
  420. switch(order) {
  421. case 0:
  422. FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
  423. memcpy(residual, data, sizeof(residual[0])*data_len);
  424. break;
  425. case 1:
  426. for(i = 0; i < idata_len; i++)
  427. residual[i] = data[i] - data[i-1];
  428. break;
  429. case 2:
  430. for(i = 0; i < idata_len; i++)
  431. residual[i] = data[i] - 2*data[i-1] + data[i-2];
  432. break;
  433. case 3:
  434. for(i = 0; i < idata_len; i++)
  435. residual[i] = data[i] - 3*data[i-1] + 3*data[i-2] - data[i-3];
  436. break;
  437. case 4:
  438. for(i = 0; i < idata_len; i++)
  439. residual[i] = data[i] - 4*data[i-1] + 6*data[i-2] - 4*data[i-3] + data[i-4];
  440. break;
  441. default:
  442. FLAC__ASSERT(0);
  443. }
  444. }
  445. void FLAC__fixed_compute_residual_wide(const FLAC__int32 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
  446. {
  447. const int idata_len = (int)data_len;
  448. int i;
  449. switch(order) {
  450. case 0:
  451. FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
  452. memcpy(residual, data, sizeof(residual[0])*data_len);
  453. break;
  454. case 1:
  455. for(i = 0; i < idata_len; i++)
  456. residual[i] = (FLAC__int64)data[i] - data[i-1];
  457. break;
  458. case 2:
  459. for(i = 0; i < idata_len; i++)
  460. residual[i] = (FLAC__int64)data[i] - 2*(FLAC__int64)data[i-1] + data[i-2];
  461. break;
  462. case 3:
  463. for(i = 0; i < idata_len; i++)
  464. residual[i] = (FLAC__int64)data[i] - 3*(FLAC__int64)data[i-1] + 3*(FLAC__int64)data[i-2] - data[i-3];
  465. break;
  466. case 4:
  467. for(i = 0; i < idata_len; i++)
  468. residual[i] = (FLAC__int64)data[i] - 4*(FLAC__int64)data[i-1] + 6*(FLAC__int64)data[i-2] - 4*(FLAC__int64)data[i-3] + data[i-4];
  469. break;
  470. default:
  471. FLAC__ASSERT(0);
  472. }
  473. }
  474. void FLAC__fixed_compute_residual_wide_33bit(const FLAC__int64 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
  475. {
  476. const int idata_len = (int)data_len;
  477. int i;
  478. switch(order) {
  479. case 0:
  480. for(i = 0; i < idata_len; i++)
  481. residual[i] = data[i];
  482. break;
  483. case 1:
  484. for(i = 0; i < idata_len; i++)
  485. residual[i] = data[i] - data[i-1];
  486. break;
  487. case 2:
  488. for(i = 0; i < idata_len; i++)
  489. residual[i] = data[i] - 2*data[i-1] + data[i-2];
  490. break;
  491. case 3:
  492. for(i = 0; i < idata_len; i++)
  493. residual[i] = data[i] - 3*data[i-1] + 3*data[i-2] - data[i-3];
  494. break;
  495. case 4:
  496. for(i = 0; i < idata_len; i++)
  497. residual[i] = data[i] - 4*data[i-1] + 6*data[i-2] - 4*data[i-3] + data[i-4];
  498. break;
  499. default:
  500. FLAC__ASSERT(0);
  501. }
  502. }
  503. #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && !defined(FUZZING_BUILD_MODE_FLAC_SANITIZE_SIGNED_INTEGER_OVERFLOW)
  504. /* The attribute below is to silence the undefined sanitizer of oss-fuzz.
  505. * Because fuzzing feeds bogus predictors and residual samples to the
  506. * decoder, having overflows in this section is unavoidable. Also,
  507. * because the calculated values are audio path only, there is no
  508. * potential for security problems */
  509. __attribute__((no_sanitize("signed-integer-overflow")))
  510. #endif
  511. void FLAC__fixed_restore_signal(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int32 data[])
  512. {
  513. int i, idata_len = (int)data_len;
  514. switch(order) {
  515. case 0:
  516. FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
  517. memcpy(data, residual, sizeof(residual[0])*data_len);
  518. break;
  519. case 1:
  520. for(i = 0; i < idata_len; i++)
  521. data[i] = residual[i] + data[i-1];
  522. break;
  523. case 2:
  524. for(i = 0; i < idata_len; i++)
  525. data[i] = residual[i] + 2*data[i-1] - data[i-2];
  526. break;
  527. case 3:
  528. for(i = 0; i < idata_len; i++)
  529. data[i] = residual[i] + 3*data[i-1] - 3*data[i-2] + data[i-3];
  530. break;
  531. case 4:
  532. for(i = 0; i < idata_len; i++)
  533. data[i] = residual[i] + 4*data[i-1] - 6*data[i-2] + 4*data[i-3] - data[i-4];
  534. break;
  535. default:
  536. FLAC__ASSERT(0);
  537. }
  538. }
  539. void FLAC__fixed_restore_signal_wide(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int32 data[])
  540. {
  541. int i, idata_len = (int)data_len;
  542. switch(order) {
  543. case 0:
  544. FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
  545. memcpy(data, residual, sizeof(residual[0])*data_len);
  546. break;
  547. case 1:
  548. for(i = 0; i < idata_len; i++)
  549. data[i] = (FLAC__int64)residual[i] + (FLAC__int64)data[i-1];
  550. break;
  551. case 2:
  552. for(i = 0; i < idata_len; i++)
  553. data[i] = (FLAC__int64)residual[i] + 2*(FLAC__int64)data[i-1] - (FLAC__int64)data[i-2];
  554. break;
  555. case 3:
  556. for(i = 0; i < idata_len; i++)
  557. data[i] = (FLAC__int64)residual[i] + 3*(FLAC__int64)data[i-1] - 3*(FLAC__int64)data[i-2] + (FLAC__int64)data[i-3];
  558. break;
  559. case 4:
  560. for(i = 0; i < idata_len; i++)
  561. data[i] = (FLAC__int64)residual[i] + 4*(FLAC__int64)data[i-1] - 6*(FLAC__int64)data[i-2] + 4*(FLAC__int64)data[i-3] - (FLAC__int64)data[i-4];
  562. break;
  563. default:
  564. FLAC__ASSERT(0);
  565. }
  566. }
  567. #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && !defined(FUZZING_BUILD_MODE_FLAC_SANITIZE_SIGNED_INTEGER_OVERFLOW)
  568. /* The attribute below is to silence the undefined sanitizer of oss-fuzz.
  569. * Because fuzzing feeds bogus predictors and residual samples to the
  570. * decoder, having overflows in this section is unavoidable. Also,
  571. * because the calculated values are audio path only, there is no
  572. * potential for security problems */
  573. __attribute__((no_sanitize("signed-integer-overflow")))
  574. #endif
  575. void FLAC__fixed_restore_signal_wide_33bit(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int64 data[])
  576. {
  577. int i, idata_len = (int)data_len;
  578. switch(order) {
  579. case 0:
  580. for(i = 0; i < idata_len; i++)
  581. data[i] = residual[i];
  582. break;
  583. case 1:
  584. for(i = 0; i < idata_len; i++)
  585. data[i] = (FLAC__int64)residual[i] + data[i-1];
  586. break;
  587. case 2:
  588. for(i = 0; i < idata_len; i++)
  589. data[i] = (FLAC__int64)residual[i] + 2*data[i-1] - data[i-2];
  590. break;
  591. case 3:
  592. for(i = 0; i < idata_len; i++)
  593. data[i] = (FLAC__int64)residual[i] + 3*data[i-1] - 3*data[i-2] + data[i-3];
  594. break;
  595. case 4:
  596. for(i = 0; i < idata_len; i++)
  597. data[i] = (FLAC__int64)residual[i] + 4*data[i-1] - 6*data[i-2] + 4*data[i-3] - data[i-4];
  598. break;
  599. default:
  600. FLAC__ASSERT(0);
  601. }
  602. }