jack2 codebase
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1563 lines
43KB

  1. /*
  2. Copyright (C) 2000 Paul Davis
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  14. */
  15. #define _ISOC9X_SOURCE 1
  16. #define _ISOC99_SOURCE 1
  17. #define __USE_ISOC9X 1
  18. #define __USE_ISOC99 1
  19. #include <stdio.h>
  20. #include <string.h>
  21. #include <math.h>
  22. #include <memory.h>
  23. #include <stdlib.h>
  24. #include <stdint.h>
  25. #include <limits.h>
  26. #ifdef __linux__
  27. #include <endian.h>
  28. #endif
  29. #include "memops.h"
  30. #if defined (__SSE2__) && !defined (__sun__)
  31. #include <emmintrin.h>
  32. #ifdef __SSE4_1__
  33. #include <smmintrin.h>
  34. #endif
  35. #endif
  36. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  37. #include <arm_neon.h>
  38. #endif
  39. /* Notes about these *_SCALING values.
  40. the MAX_<N>BIT values are floating point. when multiplied by
  41. a full-scale normalized floating point sample value (-1.0..+1.0)
  42. they should give the maximum value representable with an integer
  43. sample type of N bits. Note that this is asymmetric. Sample ranges
  44. for signed integer, 2's complement values are -(2^(N-1) to +(2^(N-1)-1)
  45. Complications
  46. -------------
  47. If we use +2^(N-1) for the scaling factors, we run into a problem:
  48. if we start with a normalized float value of -1.0, scaling
  49. to 24 bits would give -8388608 (-2^23), which is ideal.
  50. But with +1.0, we get +8388608, which is technically out of range.
  51. We never multiply a full range normalized value by this constant,
  52. but we could multiply it by a positive value that is close enough to +1.0
  53. to produce a value > +(2^(N-1)-1.
  54. There is no way around this paradox without wasting CPU cycles to determine
  55. which scaling factor to use (i.e. determine if its negative or not,
  56. use the right factor).
  57. So, for now (October 2008) we use 2^(N-1)-1 as the scaling factor.
  58. */
  59. #define SAMPLE_32BIT_SCALING 2147483647.0
  60. #define SAMPLE_24BIT_SCALING 8388607.0f
  61. #define SAMPLE_16BIT_SCALING 32767.0f
  62. /* these are just values to use if the floating point value was out of range
  63. advice from Fons Adriaensen: make the limits symmetrical
  64. */
  65. #define SAMPLE_32BIT_MAX 2147483647
  66. #define SAMPLE_32BIT_MIN -2147483647
  67. #define SAMPLE_32BIT_MAX_D 2147483647.0
  68. #define SAMPLE_32BIT_MIN_D -2147483647.0
  69. #define SAMPLE_24BIT_MAX 8388607
  70. #define SAMPLE_24BIT_MIN -8388607
  71. #define SAMPLE_24BIT_MAX_F 8388607.0f
  72. #define SAMPLE_24BIT_MIN_F -8388607.0f
  73. #define SAMPLE_16BIT_MAX 32767
  74. #define SAMPLE_16BIT_MIN -32767
  75. #define SAMPLE_16BIT_MAX_F 32767.0f
  76. #define SAMPLE_16BIT_MIN_F -32767.0f
  77. /* these mark the outer edges of the range considered "within" range
  78. for a floating point sample value. values outside (and on the boundaries)
  79. of this range will be clipped before conversion; values within this
  80. range will be scaled to appropriate values for the target sample
  81. type.
  82. */
  83. #define NORMALIZED_FLOAT_MIN -1.0f
  84. #define NORMALIZED_FLOAT_MAX 1.0f
  85. /* define this in case we end up on a platform that is missing
  86. the real lrintf functions
  87. */
  88. #define f_round(f) lrintf(f)
  89. #define d_round(f) lrint(f)
  90. #define float_16(s, d)\
  91. if ((s) <= NORMALIZED_FLOAT_MIN) {\
  92. (d) = SAMPLE_16BIT_MIN;\
  93. } else if ((s) >= NORMALIZED_FLOAT_MAX) {\
  94. (d) = SAMPLE_16BIT_MAX;\
  95. } else {\
  96. (d) = f_round ((s) * SAMPLE_16BIT_SCALING);\
  97. }
  98. /* call this when "s" has already been scaled (e.g. when dithering)
  99. */
  100. #define float_16_scaled(s, d)\
  101. if ((s) <= SAMPLE_16BIT_MIN_F) {\
  102. (d) = SAMPLE_16BIT_MIN_F;\
  103. } else if ((s) >= SAMPLE_16BIT_MAX_F) { \
  104. (d) = SAMPLE_16BIT_MAX;\
  105. } else {\
  106. (d) = f_round ((s));\
  107. }
  108. #define float_24u32(s, d) \
  109. if ((s) <= NORMALIZED_FLOAT_MIN) {\
  110. (d) = SAMPLE_24BIT_MIN << 8;\
  111. } else if ((s) >= NORMALIZED_FLOAT_MAX) {\
  112. (d) = SAMPLE_24BIT_MAX << 8;\
  113. } else {\
  114. (d) = f_round ((s) * SAMPLE_24BIT_SCALING) << 8;\
  115. }
  116. #define float_24l32(s, d) \
  117. if ((s) <= NORMALIZED_FLOAT_MIN) {\
  118. (d) = SAMPLE_24BIT_MIN; \
  119. } else if ((s) >= NORMALIZED_FLOAT_MAX) {\
  120. (d) = SAMPLE_24BIT_MAX; \
  121. } else {\
  122. (d) = f_round ((s) * SAMPLE_24BIT_SCALING); \
  123. }
  124. #define float_32(s, d) \
  125. do { \
  126. double clipped = fmin(NORMALIZED_FLOAT_MAX, \
  127. fmax((double)(s), NORMALIZED_FLOAT_MIN)); \
  128. double scaled = clipped * SAMPLE_32BIT_MAX_D; \
  129. (d) = d_round(scaled); \
  130. } \
  131. while (0)
  132. /* call this when "s" has already been scaled (e.g. when dithering)
  133. */
  134. #define float_24u32_scaled(s, d)\
  135. if ((s) <= SAMPLE_24BIT_MIN_F) {\
  136. (d) = SAMPLE_24BIT_MIN << 8;\
  137. } else if ((s) >= SAMPLE_24BIT_MAX_F) { \
  138. (d) = SAMPLE_24BIT_MAX << 8; \
  139. } else {\
  140. (d) = f_round ((s)) << 8; \
  141. }
  142. #define float_24(s, d) \
  143. if ((s) <= NORMALIZED_FLOAT_MIN) {\
  144. (d) = SAMPLE_24BIT_MIN;\
  145. } else if ((s) >= NORMALIZED_FLOAT_MAX) {\
  146. (d) = SAMPLE_24BIT_MAX;\
  147. } else {\
  148. (d) = f_round ((s) * SAMPLE_24BIT_SCALING);\
  149. }
  150. /* call this when "s" has already been scaled (e.g. when dithering)
  151. */
  152. #define float_24_scaled(s, d)\
  153. if ((s) <= SAMPLE_24BIT_MIN_F) {\
  154. (d) = SAMPLE_24BIT_MIN;\
  155. } else if ((s) >= SAMPLE_24BIT_MAX_F) { \
  156. (d) = SAMPLE_24BIT_MAX; \
  157. } else {\
  158. (d) = f_round ((s)); \
  159. }
  160. #if defined (__SSE2__) && !defined (__sun__)
  161. /* generates same as _mm_set_ps(1.f, 1.f, 1f., 1f) but faster */
  162. static inline __m128 gen_one(void)
  163. {
  164. volatile __m128i x = { 0 }; /* shut up, GCC */
  165. __m128i ones = _mm_cmpeq_epi32(x, x);
  166. return (__m128)_mm_slli_epi32 (_mm_srli_epi32(ones, 25), 23);
  167. }
  168. static inline __m128 clip(__m128 s, __m128 min, __m128 max)
  169. {
  170. return _mm_min_ps(max, _mm_max_ps(s, min));
  171. }
  172. static inline __m128d clip_double(__m128d s, __m128d min, __m128d max)
  173. {
  174. return _mm_min_pd(max, _mm_max_pd(s, min));
  175. }
  176. static inline __m128i float_24_sse(__m128 s)
  177. {
  178. const __m128 upper_bound = gen_one(); /* NORMALIZED_FLOAT_MAX */
  179. const __m128 lower_bound = _mm_sub_ps(_mm_setzero_ps(), upper_bound);
  180. __m128 clipped = clip(s, lower_bound, upper_bound);
  181. __m128 scaled = _mm_mul_ps(clipped, _mm_set1_ps(SAMPLE_24BIT_SCALING));
  182. return _mm_cvtps_epi32(scaled);
  183. }
  184. #endif
  185. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  186. static inline float32x4_t clip(float32x4_t s, float32x4_t min, float32x4_t max)
  187. {
  188. return vminq_f32(max, vmaxq_f32(s, min));
  189. }
  190. static inline int32x4_t float_24_neon(float32x4_t s)
  191. {
  192. const float32x4_t upper_bound = vdupq_n_f32(NORMALIZED_FLOAT_MAX);
  193. const float32x4_t lower_bound = vdupq_n_f32(NORMALIZED_FLOAT_MIN);
  194. float32x4_t clipped = clip(s, lower_bound, upper_bound);
  195. float32x4_t scaled = vmulq_f32(clipped, vdupq_n_f32(SAMPLE_24BIT_SCALING));
  196. return vcvtq_s32_f32(scaled);
  197. }
  198. static inline int16x4_t float_16_neon(float32x4_t s)
  199. {
  200. const float32x4_t upper_bound = vdupq_n_f32(NORMALIZED_FLOAT_MAX);
  201. const float32x4_t lower_bound = vdupq_n_f32(NORMALIZED_FLOAT_MIN);
  202. float32x4_t clipped = clip(s, lower_bound, upper_bound);
  203. float32x4_t scaled = vmulq_f32(clipped, vdupq_n_f32(SAMPLE_16BIT_SCALING));
  204. return vmovn_s32(vcvtq_s32_f32(scaled));
  205. }
  206. #endif
  207. /* Linear Congruential noise generator. From the music-dsp list
  208. * less random than rand(), but good enough and 10x faster
  209. */
  210. static unsigned int seed = 22222;
  211. static inline unsigned int fast_rand() {
  212. seed = (seed * 196314165) + 907633515;
  213. return seed;
  214. }
  215. /* functions for native float sample data */
  216. void sample_move_floatLE_sSs (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip) {
  217. while (nsamples--) {
  218. *dst = *((float *) src);
  219. dst++;
  220. src += src_skip;
  221. }
  222. }
  223. void sample_move_dS_floatLE (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state) {
  224. while (nsamples--) {
  225. *((float *) dst) = *src;
  226. dst += dst_skip;
  227. src++;
  228. }
  229. }
  230. /* NOTES on function naming:
  231. foo_bar_d<TYPE>_s<TYPE>
  232. the "d<TYPE>" component defines the destination type for the operation
  233. the "s<TYPE>" component defines the source type for the operation
  234. TYPE can be one of:
  235. S - sample is a jack_default_audio_sample_t, currently (October 2008) a 32 bit floating point value
  236. Ss - like S but reverse endian from the host CPU
  237. 32 - sample is a signed 32 bit integer value
  238. 32u24 - sample is a signed 32 bit integer value, but data is in upper 24 bits only
  239. 32u24s - like 32u24 but reverse endian from the host CPU
  240. 32l24 - sample is a signed 32 bit integer value, but data is in lower 24 bits only
  241. 32l24s - like 32l24 but reverse endian from the host CPU
  242. 24 - sample is a signed 24 bit integer value
  243. 24s - like 24 but reverse endian from the host CPU
  244. 16 - sample is a signed 16 bit integer value
  245. 16s - like 16 but reverse endian from the host CPU
  246. For obvious reasons, the reverse endian versions only show as source types.
  247. This covers all known sample formats at 16 bits or larger.
  248. */
  249. /* functions for native integer sample data */
  250. void sample_move_d32_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  251. {
  252. while (nsamples--) {
  253. int32_t z;
  254. float_32(*src, z);
  255. #if __BYTE_ORDER == __LITTLE_ENDIAN
  256. dst[0]=(char)(z>>24);
  257. dst[1]=(char)(z>>16);
  258. dst[2]=(char)(z>>8);
  259. dst[3]=(char)(z);
  260. #elif __BYTE_ORDER == __BIG_ENDIAN
  261. dst[0]=(char)(z);
  262. dst[1]=(char)(z>>8);
  263. dst[2]=(char)(z>>16);
  264. dst[3]=(char)(z>>24);
  265. #endif
  266. dst += dst_skip;
  267. src++;
  268. }
  269. }
  270. void sample_move_d32_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  271. {
  272. while (nsamples--) {
  273. float_32(*src, *(int32_t *)dst);
  274. dst += dst_skip;
  275. src++;
  276. }
  277. }
  278. void sample_move_d32u24_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  279. {
  280. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  281. unsigned long unrolled = nsamples / 4;
  282. nsamples = nsamples & 3;
  283. while (unrolled--) {
  284. float32x4_t samples = vld1q_f32(src);
  285. int32x4_t converted = float_24_neon(samples);
  286. int32x4_t shifted = vshlq_n_s32(converted, 8);
  287. shifted = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(shifted)));
  288. switch(dst_skip) {
  289. case 4:
  290. vst1q_s32((int32_t*)dst, shifted);
  291. break;
  292. default:
  293. vst1q_lane_s32((int32_t*)(dst), shifted, 0);
  294. vst1q_lane_s32((int32_t*)(dst+dst_skip), shifted, 1);
  295. vst1q_lane_s32((int32_t*)(dst+2*dst_skip), shifted, 2);
  296. vst1q_lane_s32((int32_t*)(dst+3*dst_skip), shifted, 3);
  297. break;
  298. }
  299. dst += 4*dst_skip;
  300. src+= 4;
  301. }
  302. #endif
  303. int32_t z;
  304. while (nsamples--) {
  305. float_24u32 (*src, z);
  306. #if __BYTE_ORDER == __LITTLE_ENDIAN
  307. dst[0]=(char)(z>>24);
  308. dst[1]=(char)(z>>16);
  309. dst[2]=(char)(z>>8);
  310. dst[3]=(char)(z);
  311. #elif __BYTE_ORDER == __BIG_ENDIAN
  312. dst[0]=(char)(z);
  313. dst[1]=(char)(z>>8);
  314. dst[2]=(char)(z>>16);
  315. dst[3]=(char)(z>>24);
  316. #endif
  317. dst += dst_skip;
  318. src++;
  319. }
  320. }
  321. void sample_move_d32u24_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  322. {
  323. #if defined (__SSE2__) && !defined (__sun__)
  324. __m128 int_max = _mm_set1_ps(SAMPLE_24BIT_MAX_F);
  325. __m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
  326. __m128 factor = int_max;
  327. unsigned long unrolled = nsamples / 4;
  328. nsamples = nsamples & 3;
  329. while (unrolled--) {
  330. __m128 in = _mm_load_ps(src);
  331. __m128 scaled = _mm_mul_ps(in, factor);
  332. __m128 clipped = clip(scaled, int_min, int_max);
  333. __m128i y = _mm_cvttps_epi32(clipped);
  334. __m128i shifted = _mm_slli_epi32(y, 8);
  335. #ifdef __SSE4_1__
  336. *(int32_t*)dst = _mm_extract_epi32(shifted, 0);
  337. *(int32_t*)(dst+dst_skip) = _mm_extract_epi32(shifted, 1);
  338. *(int32_t*)(dst+2*dst_skip) = _mm_extract_epi32(shifted, 2);
  339. *(int32_t*)(dst+3*dst_skip) = _mm_extract_epi32(shifted, 3);
  340. #else
  341. __m128i shuffled1 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(0, 3, 2, 1));
  342. __m128i shuffled2 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(1, 0, 3, 2));
  343. __m128i shuffled3 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(2, 1, 0, 3));
  344. _mm_store_ss((float*)dst, (__m128)shifted);
  345. _mm_store_ss((float*)(dst+dst_skip), (__m128)shuffled1);
  346. _mm_store_ss((float*)(dst+2*dst_skip), (__m128)shuffled2);
  347. _mm_store_ss((float*)(dst+3*dst_skip), (__m128)shuffled3);
  348. #endif
  349. dst += 4*dst_skip;
  350. src+= 4;
  351. }
  352. while (nsamples--) {
  353. __m128 in = _mm_load_ss(src);
  354. __m128 scaled = _mm_mul_ss(in, factor);
  355. __m128 clipped = _mm_min_ss(int_max, _mm_max_ss(scaled, int_min));
  356. int y = _mm_cvttss_si32(clipped);
  357. *((int *) dst) = y<<8;
  358. dst += dst_skip;
  359. src++;
  360. }
  361. #elif defined (__ARM_NEON__) || defined (__ARM_NEON)
  362. unsigned long unrolled = nsamples / 4;
  363. nsamples = nsamples & 3;
  364. while (unrolled--) {
  365. float32x4_t samples = vld1q_f32(src);
  366. int32x4_t converted = float_24_neon(samples);
  367. int32x4_t shifted = vshlq_n_s32(converted, 8);
  368. switch(dst_skip) {
  369. case 4:
  370. vst1q_s32((int32_t*)dst, shifted);
  371. break;
  372. default:
  373. vst1q_lane_s32((int32_t*)(dst), shifted, 0);
  374. vst1q_lane_s32((int32_t*)(dst+dst_skip), shifted, 1);
  375. vst1q_lane_s32((int32_t*)(dst+2*dst_skip), shifted, 2);
  376. vst1q_lane_s32((int32_t*)(dst+3*dst_skip), shifted, 3);
  377. break;
  378. }
  379. dst += 4*dst_skip;
  380. src+= 4;
  381. }
  382. #endif
  383. #if !defined (__SSE2__)
  384. while (nsamples--) {
  385. float_24u32 (*src, *((int32_t*) dst));
  386. dst += dst_skip;
  387. src++;
  388. }
  389. #endif
  390. }
  391. void sample_move_dS_s32u24s (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  392. {
  393. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  394. float32x4_t factor = vdupq_n_f32(1.0 / SAMPLE_24BIT_SCALING);
  395. unsigned long unrolled = nsamples / 4;
  396. while (unrolled--) {
  397. int32x4_t src128;
  398. switch(src_skip)
  399. {
  400. case 4:
  401. src128 = vld1q_s32((int32_t*)src);
  402. break;
  403. case 8:
  404. src128 = vld2q_s32((int32_t*)src).val[0];
  405. break;
  406. default:
  407. src128 = vld1q_lane_s32((int32_t*)src, src128, 0);
  408. src128 = vld1q_lane_s32((int32_t*)(src+src_skip), src128, 1);
  409. src128 = vld1q_lane_s32((int32_t*)(src+2*src_skip), src128, 2);
  410. src128 = vld1q_lane_s32((int32_t*)(src+3*src_skip), src128, 3);
  411. break;
  412. }
  413. src128 = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(src128)));
  414. int32x4_t shifted = vshrq_n_s32(src128, 8);
  415. float32x4_t as_float = vcvtq_f32_s32(shifted);
  416. float32x4_t divided = vmulq_f32(as_float, factor);
  417. vst1q_f32(dst, divided);
  418. src += 4*src_skip;
  419. dst += 4;
  420. }
  421. nsamples = nsamples & 3;
  422. #endif
  423. /* ALERT: signed sign-extension portability !!! */
  424. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_24BIT_SCALING;
  425. while (nsamples--) {
  426. int x;
  427. #if __BYTE_ORDER == __LITTLE_ENDIAN
  428. x = (unsigned char)(src[0]);
  429. x <<= 8;
  430. x |= (unsigned char)(src[1]);
  431. x <<= 8;
  432. x |= (unsigned char)(src[2]);
  433. x <<= 8;
  434. x |= (unsigned char)(src[3]);
  435. #elif __BYTE_ORDER == __BIG_ENDIAN
  436. x = (unsigned char)(src[3]);
  437. x <<= 8;
  438. x |= (unsigned char)(src[2]);
  439. x <<= 8;
  440. x |= (unsigned char)(src[1]);
  441. x <<= 8;
  442. x |= (unsigned char)(src[0]);
  443. #endif
  444. *dst = (x >> 8) * scaling;
  445. dst++;
  446. src += src_skip;
  447. }
  448. }
  449. void sample_move_dS_s32u24 (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  450. {
  451. #if defined (__SSE2__) && !defined (__sun__)
  452. unsigned long unrolled = nsamples / 4;
  453. static float inv_sample_max_24bit = 1.0 / SAMPLE_24BIT_SCALING;
  454. __m128 factor = _mm_set1_ps(inv_sample_max_24bit);
  455. while (unrolled--)
  456. {
  457. int i1 = *((int *) src);
  458. src+= src_skip;
  459. int i2 = *((int *) src);
  460. src+= src_skip;
  461. int i3 = *((int *) src);
  462. src+= src_skip;
  463. int i4 = *((int *) src);
  464. src+= src_skip;
  465. __m128i src = _mm_set_epi32(i4, i3, i2, i1);
  466. __m128i shifted = _mm_srai_epi32(src, 8);
  467. __m128 as_float = _mm_cvtepi32_ps(shifted);
  468. __m128 divided = _mm_mul_ps(as_float, factor);
  469. _mm_storeu_ps(dst, divided);
  470. dst += 4;
  471. }
  472. nsamples = nsamples & 3;
  473. #elif defined (__ARM_NEON__) || defined (__ARM_NEON)
  474. unsigned long unrolled = nsamples / 4;
  475. float32x4_t factor = vdupq_n_f32(1.0 / SAMPLE_24BIT_SCALING);
  476. while (unrolled--) {
  477. int32x4_t src128;
  478. switch(src_skip) {
  479. case 4:
  480. src128 = vld1q_s32((int32_t*)src);
  481. break;
  482. case 8:
  483. src128 = vld2q_s32((int32_t*)src).val[0];
  484. break;
  485. default:
  486. src128 = vld1q_lane_s32((int32_t*)src, src128, 0);
  487. src128 = vld1q_lane_s32((int32_t*)(src+src_skip), src128, 1);
  488. src128 = vld1q_lane_s32((int32_t*)(src+2*src_skip), src128, 2);
  489. src128 = vld1q_lane_s32((int32_t*)(src+3*src_skip), src128, 3);
  490. break;
  491. }
  492. int32x4_t shifted = vshrq_n_s32(src128, 8);
  493. float32x4_t as_float = vcvtq_f32_s32(shifted);
  494. float32x4_t divided = vmulq_f32(as_float, factor);
  495. vst1q_f32(dst, divided);
  496. src += 4*src_skip;
  497. dst += 4;
  498. }
  499. nsamples = nsamples & 3;
  500. #endif
  501. /* ALERT: signed sign-extension portability !!! */
  502. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_24BIT_SCALING;
  503. while (nsamples--) {
  504. *dst = (*((int *) src) >> 8) * scaling;
  505. dst++;
  506. src += src_skip;
  507. }
  508. }
  509. void sample_move_d32l24_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  510. {
  511. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  512. unsigned long unrolled = nsamples / 4;
  513. nsamples = nsamples & 3;
  514. while (unrolled--) {
  515. float32x4_t samples = vld1q_f32(src);
  516. int32x4_t converted = float_24_neon(samples);
  517. converted = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(converted)));
  518. switch(dst_skip) {
  519. case 4:
  520. vst1q_s32((int32_t*)dst, converted);
  521. break;
  522. default:
  523. vst1q_lane_s32((int32_t*)(dst), converted, 0);
  524. vst1q_lane_s32((int32_t*)(dst+dst_skip), converted, 1);
  525. vst1q_lane_s32((int32_t*)(dst+2*dst_skip), converted, 2);
  526. vst1q_lane_s32((int32_t*)(dst+3*dst_skip), converted, 3);
  527. break;
  528. }
  529. dst += 4*dst_skip;
  530. src+= 4;
  531. }
  532. #endif
  533. int32_t z;
  534. while (nsamples--) {
  535. float_24l32 (*src, z);
  536. #if __BYTE_ORDER == __LITTLE_ENDIAN
  537. dst[0]=(char)(z>>24);
  538. dst[1]=(char)(z>>16);
  539. dst[2]=(char)(z>>8);
  540. dst[3]=(char)(z);
  541. #elif __BYTE_ORDER == __BIG_ENDIAN
  542. dst[0]=(char)(z);
  543. dst[1]=(char)(z>>8);
  544. dst[2]=(char)(z>>16);
  545. dst[3]=(char)(z>>24);
  546. #endif
  547. dst += dst_skip;
  548. src++;
  549. }
  550. }
  551. void sample_move_d32l24_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  552. {
  553. #if defined (__SSE2__) && !defined (__sun__)
  554. __m128 int_max = _mm_set1_ps(SAMPLE_24BIT_MAX_F);
  555. __m128 int_min = _mm_sub_ps(_mm_setzero_ps(), int_max);
  556. __m128 factor = int_max;
  557. unsigned long unrolled = nsamples / 4;
  558. nsamples = nsamples & 3;
  559. while (unrolled--) {
  560. __m128 in = _mm_load_ps(src);
  561. __m128 scaled = _mm_mul_ps(in, factor);
  562. __m128 clipped = clip(scaled, int_min, int_max);
  563. __m128i shifted = _mm_cvttps_epi32(clipped);
  564. #ifdef __SSE4_1__
  565. *(int32_t*)dst = _mm_extract_epi32(shifted, 0);
  566. *(int32_t*)(dst+dst_skip) = _mm_extract_epi32(shifted, 1);
  567. *(int32_t*)(dst+2*dst_skip) = _mm_extract_epi32(shifted, 2);
  568. *(int32_t*)(dst+3*dst_skip) = _mm_extract_epi32(shifted, 3);
  569. #else
  570. __m128i shuffled1 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(0, 3, 2, 1));
  571. __m128i shuffled2 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(1, 0, 3, 2));
  572. __m128i shuffled3 = _mm_shuffle_epi32(shifted, _MM_SHUFFLE(2, 1, 0, 3));
  573. _mm_store_ss((float*)dst, (__m128)shifted);
  574. _mm_store_ss((float*)(dst+dst_skip), (__m128)shuffled1);
  575. _mm_store_ss((float*)(dst+2*dst_skip), (__m128)shuffled2);
  576. _mm_store_ss((float*)(dst+3*dst_skip), (__m128)shuffled3);
  577. #endif
  578. dst += 4*dst_skip;
  579. src+= 4;
  580. }
  581. while (nsamples--) {
  582. __m128 in = _mm_load_ss(src);
  583. __m128 scaled = _mm_mul_ss(in, factor);
  584. __m128 clipped = _mm_min_ss(int_max, _mm_max_ss(scaled, int_min));
  585. int y = _mm_cvttss_si32(clipped);
  586. *((int *) dst) = y<<8;
  587. dst += dst_skip;
  588. src++;
  589. }
  590. #elif defined (__ARM_NEON__) || defined (__ARM_NEON)
  591. unsigned long unrolled = nsamples / 4;
  592. nsamples = nsamples & 3;
  593. while (unrolled--) {
  594. float32x4_t samples = vld1q_f32(src);
  595. int32x4_t converted = float_24_neon(samples);
  596. switch(dst_skip) {
  597. case 4:
  598. vst1q_s32((int32_t*)dst, converted);
  599. break;
  600. default:
  601. vst1q_lane_s32((int32_t*)(dst), converted, 0);
  602. vst1q_lane_s32((int32_t*)(dst+dst_skip), converted, 1);
  603. vst1q_lane_s32((int32_t*)(dst+2*dst_skip), converted, 2);
  604. vst1q_lane_s32((int32_t*)(dst+3*dst_skip), converted, 3);
  605. break;
  606. }
  607. dst += 4*dst_skip;
  608. src+= 4;
  609. }
  610. #endif
  611. #if !defined (__SSE2__)
  612. while (nsamples--) {
  613. float_24l32 (*src, *((int32_t*) dst));
  614. dst += dst_skip;
  615. src++;
  616. }
  617. #endif
  618. }
  619. void sample_move_dS_s32s (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  620. {
  621. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_32BIT_SCALING;
  622. while (nsamples--) {
  623. int32_t x;
  624. #if __BYTE_ORDER == __LITTLE_ENDIAN
  625. x = (unsigned char)(src[0]);
  626. x <<= 8;
  627. x |= (unsigned char)(src[1]);
  628. x <<= 8;
  629. x |= (unsigned char)(src[2]);
  630. x <<= 8;
  631. x |= (unsigned char)(src[3]);
  632. #elif __BYTE_ORDER == __BIG_ENDIAN
  633. x = (unsigned char)(src[3]);
  634. x <<= 8;
  635. x |= (unsigned char)(src[2]);
  636. x <<= 8;
  637. x |= (unsigned char)(src[1]);
  638. x <<= 8;
  639. x |= (unsigned char)(src[0]);
  640. #endif
  641. double extended = x * scaling;
  642. *dst = (float)extended;
  643. dst++;
  644. src += src_skip;
  645. }
  646. }
  647. void sample_move_dS_s32l24s (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  648. {
  649. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  650. float32x4_t factor = vdupq_n_f32(1.0 / SAMPLE_24BIT_SCALING);
  651. unsigned long unrolled = nsamples / 4;
  652. while (unrolled--) {
  653. uint32x4_t src128;
  654. switch(src_skip)
  655. {
  656. case 4:
  657. src128 = vld1q_u32((uint32_t*)src);
  658. break;
  659. case 8:
  660. src128 = vld2q_u32((uint32_t*)src).val[0];
  661. break;
  662. default:
  663. src128 = vld1q_lane_u32((uint32_t*)src, src128, 0);
  664. src128 = vld1q_lane_u32((uint32_t*)(src+src_skip), src128, 1);
  665. src128 = vld1q_lane_u32((uint32_t*)(src+2*src_skip), src128, 2);
  666. src128 = vld1q_lane_u32((uint32_t*)(src+3*src_skip), src128, 3);
  667. break;
  668. }
  669. src128 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(src128)));
  670. uint32x4_t toupper = vshlq_n_u32(src128, 8);
  671. int32x4_t shifted = vshrq_n_s32((int32x4_t)toupper, 8);
  672. float32x4_t as_float = vcvtq_f32_s32(shifted);
  673. float32x4_t divided = vmulq_f32(as_float, factor);
  674. vst1q_f32(dst, divided);
  675. src += 4*src_skip;
  676. dst += 4;
  677. }
  678. nsamples = nsamples & 3;
  679. #endif
  680. /* ALERT: signed sign-extension portability !!! */
  681. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_24BIT_SCALING;
  682. while (nsamples--) {
  683. int32_t x;
  684. #if __BYTE_ORDER == __LITTLE_ENDIAN
  685. x = (unsigned char)(src[0]);
  686. x <<= 8;
  687. x |= (unsigned char)(src[1]);
  688. x <<= 8;
  689. x |= (unsigned char)(src[2]);
  690. x <<= 8;
  691. x |= (unsigned char)(src[3]);
  692. #elif __BYTE_ORDER == __BIG_ENDIAN
  693. x = (unsigned char)(src[3]);
  694. x <<= 8;
  695. x |= (unsigned char)(src[2]);
  696. x <<= 8;
  697. x |= (unsigned char)(src[1]);
  698. x <<= 8;
  699. x |= (unsigned char)(src[0]);
  700. #endif
  701. *dst = (x >> 0) * scaling;
  702. dst++;
  703. src += src_skip;
  704. }
  705. }
  706. void sample_move_dS_s32 (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  707. {
  708. const double scaling = 1.0 / SAMPLE_32BIT_SCALING;
  709. while (nsamples--) {
  710. int32_t val=(*((int32_t*)src));
  711. double extended = val * scaling;
  712. *dst = (float)extended;
  713. dst++;
  714. src += src_skip;
  715. }
  716. }
  717. void sample_move_dS_s32l24 (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  718. {
  719. #if defined (__SSE2__) && !defined (__sun__)
  720. unsigned long unrolled = nsamples / 4;
  721. static float inv_sample_max_24bit = 1.0 / SAMPLE_24BIT_SCALING;
  722. __m128 factor = _mm_set1_ps(inv_sample_max_24bit);
  723. while (unrolled--)
  724. {
  725. int i1 = *((int *) src);
  726. src+= src_skip;
  727. int i2 = *((int *) src);
  728. src+= src_skip;
  729. int i3 = *((int *) src);
  730. src+= src_skip;
  731. int i4 = *((int *) src);
  732. src+= src_skip;
  733. __m128i shifted = _mm_set_epi32(i4, i3, i2, i1);
  734. __m128 as_float = _mm_cvtepi32_ps(shifted);
  735. __m128 divided = _mm_mul_ps(as_float, factor);
  736. _mm_storeu_ps(dst, divided);
  737. dst += 4;
  738. }
  739. nsamples = nsamples & 3;
  740. #elif defined (__ARM_NEON__) || defined (__ARM_NEON)
  741. unsigned long unrolled = nsamples / 4;
  742. float32x4_t factor = vdupq_n_f32(1.0 / SAMPLE_24BIT_SCALING);
  743. while (unrolled--) {
  744. uint32x4_t src128;
  745. switch(src_skip) {
  746. case 4:
  747. src128 = vld1q_u32((uint32_t*)src);
  748. break;
  749. case 8:
  750. src128 = vld2q_u32((uint32_t*)src).val[0];
  751. break;
  752. default:
  753. src128 = vld1q_lane_u32((uint32_t*)src, src128, 0);
  754. src128 = vld1q_lane_u32((uint32_t*)(src+src_skip), src128, 1);
  755. src128 = vld1q_lane_u32((uint32_t*)(src+2*src_skip), src128, 2);
  756. src128 = vld1q_lane_u32((uint32_t*)(src+3*src_skip), src128, 3);
  757. break;
  758. }
  759. // Sign extension by moving to upper as unsigned, then down
  760. uint32x4_t toupper = vshlq_n_u32(src128, 8);
  761. int32x4_t shifted = vshrq_n_s32((int32x4_t)toupper, 8);
  762. float32x4_t as_float = vcvtq_f32_s32(shifted);
  763. float32x4_t divided = vmulq_f32(as_float, factor);
  764. vst1q_f32(dst, divided);
  765. src += 4*src_skip;
  766. dst += 4;
  767. }
  768. nsamples = nsamples & 3;
  769. #endif
  770. /* ALERT: signed sign-extension portability !!! */
  771. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_24BIT_SCALING;
  772. while (nsamples--) {
  773. uint32_t val=(*((uint32_t*)src));
  774. if (val & 0x800000u) val|=0xFF000000u;
  775. *dst = (*((int32_t *) &val)) * scaling;
  776. dst++;
  777. src += src_skip;
  778. }
  779. }
  780. void sample_move_d24_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  781. {
  782. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  783. unsigned long unrolled = nsamples / 4;
  784. while (unrolled--) {
  785. int i;
  786. int32_t z[4];
  787. float32x4_t samples = vld1q_f32(src);
  788. int32x4_t converted = float_24_neon(samples);
  789. converted = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(converted)));
  790. vst1q_s32(z, converted);
  791. for (i = 0; i != 4; ++i) {
  792. memcpy (dst, ((char*)(z+i))+1, 3);
  793. dst += dst_skip;
  794. }
  795. src += 4;
  796. }
  797. nsamples = nsamples & 3;
  798. #endif
  799. int32_t z;
  800. while (nsamples--) {
  801. float_24 (*src, z);
  802. #if __BYTE_ORDER == __LITTLE_ENDIAN
  803. dst[0]=(char)(z>>16);
  804. dst[1]=(char)(z>>8);
  805. dst[2]=(char)(z);
  806. #elif __BYTE_ORDER == __BIG_ENDIAN
  807. dst[0]=(char)(z);
  808. dst[1]=(char)(z>>8);
  809. dst[2]=(char)(z>>16);
  810. #endif
  811. dst += dst_skip;
  812. src++;
  813. }
  814. }
  815. void sample_move_d24_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  816. {
  817. #if defined (__SSE2__) && !defined (__sun__)
  818. _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
  819. while (nsamples >= 4) {
  820. int i;
  821. int32_t z[4];
  822. __m128 samples = _mm_loadu_ps(src);
  823. __m128i converted = float_24_sse(samples);
  824. #ifdef __SSE4_1__
  825. z[0] = _mm_extract_epi32(converted, 0);
  826. z[1] = _mm_extract_epi32(converted, 1);
  827. z[2] = _mm_extract_epi32(converted, 2);
  828. z[3] = _mm_extract_epi32(converted, 3);
  829. #else
  830. __m128i shuffled1 = _mm_shuffle_epi32(converted, _MM_SHUFFLE(0, 3, 2, 1));
  831. __m128i shuffled2 = _mm_shuffle_epi32(converted, _MM_SHUFFLE(1, 0, 3, 2));
  832. __m128i shuffled3 = _mm_shuffle_epi32(converted, _MM_SHUFFLE(2, 1, 0, 3));
  833. _mm_store_ss((float*)z, (__m128)converted);
  834. _mm_store_ss((float*)z+1, (__m128)shuffled1);
  835. _mm_store_ss((float*)z+2, (__m128)shuffled2);
  836. _mm_store_ss((float*)z+3, (__m128)shuffled3);
  837. #endif
  838. for (i = 0; i != 4; ++i) {
  839. memcpy (dst, z+i, 3);
  840. dst += dst_skip;
  841. }
  842. nsamples -= 4;
  843. src += 4;
  844. }
  845. #elif defined (__ARM_NEON__) || defined (__ARM_NEON)
  846. unsigned long unrolled = nsamples / 4;
  847. while (unrolled--) {
  848. int i;
  849. int32_t z[4];
  850. float32x4_t samples = vld1q_f32(src);
  851. int32x4_t converted = float_24_neon(samples);
  852. vst1q_s32(z, converted);
  853. for (i = 0; i != 4; ++i) {
  854. memcpy (dst, z+i, 3);
  855. dst += dst_skip;
  856. }
  857. src += 4;
  858. }
  859. nsamples = nsamples & 3;
  860. #endif
  861. int32_t z;
  862. while (nsamples--) {
  863. float_24 (*src, z);
  864. #if __BYTE_ORDER == __LITTLE_ENDIAN
  865. memcpy (dst, &z, 3);
  866. #elif __BYTE_ORDER == __BIG_ENDIAN
  867. memcpy (dst, (char *)&z + 1, 3);
  868. #endif
  869. dst += dst_skip;
  870. src++;
  871. }
  872. }
  873. void sample_move_dS_s24s (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  874. {
  875. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_24BIT_SCALING;
  876. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  877. // we shift 8 to the right by dividing by 256.0 -> no sign extra handling
  878. const float32x4_t vscaling = vdupq_n_f32(scaling/256.0);
  879. int32_t x[4];
  880. memset(x, 0, sizeof(x));
  881. unsigned long unrolled = nsamples / 4;
  882. while (unrolled--) {
  883. #if __BYTE_ORDER == __BIG_ENDIAN /* ARM big endian?? */
  884. // right aligned / inverse sequence below -> *256
  885. memcpy(((char*)&x[0])+1, src, 3);
  886. memcpy(((char*)&x[1])+1, src+src_skip, 3);
  887. memcpy(((char*)&x[2])+1, src+2*src_skip, 3);
  888. memcpy(((char*)&x[3])+1, src+3*src_skip, 3);
  889. #else
  890. memcpy(&x[0], src, 3);
  891. memcpy(&x[1], src+src_skip, 3);
  892. memcpy(&x[2], src+2*src_skip, 3);
  893. memcpy(&x[3], src+3*src_skip, 3);
  894. #endif
  895. src += 4 * src_skip;
  896. int32x4_t source = vld1q_s32(x);
  897. source = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(source)));
  898. float32x4_t converted = vcvtq_f32_s32(source);
  899. float32x4_t scaled = vmulq_f32(converted, vscaling);
  900. vst1q_f32(dst, scaled);
  901. dst += 4;
  902. }
  903. nsamples = nsamples & 3;
  904. #endif
  905. /* ALERT: signed sign-extension portability !!! */
  906. while (nsamples--) {
  907. int x;
  908. #if __BYTE_ORDER == __LITTLE_ENDIAN
  909. x = (unsigned char)(src[0]);
  910. x <<= 8;
  911. x |= (unsigned char)(src[1]);
  912. x <<= 8;
  913. x |= (unsigned char)(src[2]);
  914. /* correct sign bit and the rest of the top byte */
  915. if (src[0] & 0x80) {
  916. x |= 0xff << 24;
  917. }
  918. #elif __BYTE_ORDER == __BIG_ENDIAN
  919. x = (unsigned char)(src[2]);
  920. x <<= 8;
  921. x |= (unsigned char)(src[1]);
  922. x <<= 8;
  923. x |= (unsigned char)(src[0]);
  924. /* correct sign bit and the rest of the top byte */
  925. if (src[2] & 0x80) {
  926. x |= 0xff << 24;
  927. }
  928. #endif
  929. *dst = x * scaling;
  930. dst++;
  931. src += src_skip;
  932. }
  933. }
  934. void sample_move_dS_s24 (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  935. {
  936. const jack_default_audio_sample_t scaling = 1.f/SAMPLE_24BIT_SCALING;
  937. #if defined (__SSE2__) && !defined (__sun__)
  938. const __m128 scaling_block = _mm_set_ps1(scaling);
  939. while (nsamples >= 4) {
  940. int x0, x1, x2, x3;
  941. memcpy((char*)&x0 + 1, src, 3);
  942. memcpy((char*)&x1 + 1, src+src_skip, 3);
  943. memcpy((char*)&x2 + 1, src+2*src_skip, 3);
  944. memcpy((char*)&x3 + 1, src+3*src_skip, 3);
  945. src += 4 * src_skip;
  946. const __m128i block_i = _mm_set_epi32(x3, x2, x1, x0);
  947. const __m128i shifted = _mm_srai_epi32(block_i, 8);
  948. const __m128 converted = _mm_cvtepi32_ps (shifted);
  949. const __m128 scaled = _mm_mul_ps(converted, scaling_block);
  950. _mm_storeu_ps(dst, scaled);
  951. dst += 4;
  952. nsamples -= 4;
  953. }
  954. #elif defined (__ARM_NEON__) || defined (__ARM_NEON)
  955. // we shift 8 to the right by dividing by 256.0 -> no sign extra handling
  956. const float32x4_t vscaling = vdupq_n_f32(scaling/256.0);
  957. int32_t x[4];
  958. memset(x, 0, sizeof(x));
  959. unsigned long unrolled = nsamples / 4;
  960. while (unrolled--) {
  961. #if __BYTE_ORDER == __BIG_ENDIAN /* ARM big endian?? */
  962. // left aligned -> *256
  963. memcpy(&x[0], src, 3);
  964. memcpy(&x[1], src+src_skip, 3);
  965. memcpy(&x[2], src+2*src_skip, 3);
  966. memcpy(&x[3], src+3*src_skip, 3);
  967. #else
  968. memcpy(((char*)&x[0])+1, src, 3);
  969. memcpy(((char*)&x[1])+1, src+src_skip, 3);
  970. memcpy(((char*)&x[2])+1, src+2*src_skip, 3);
  971. memcpy(((char*)&x[3])+1, src+3*src_skip, 3);
  972. #endif
  973. src += 4 * src_skip;
  974. int32x4_t source = vld1q_s32(x);
  975. float32x4_t converted = vcvtq_f32_s32(source);
  976. float32x4_t scaled = vmulq_f32(converted, vscaling);
  977. vst1q_f32(dst, scaled);
  978. dst += 4;
  979. }
  980. nsamples = nsamples & 3;
  981. #endif
  982. while (nsamples--) {
  983. int x;
  984. #if __BYTE_ORDER == __LITTLE_ENDIAN
  985. memcpy((char*)&x + 1, src, 3);
  986. #elif __BYTE_ORDER == __BIG_ENDIAN
  987. memcpy(&x, src, 3);
  988. #endif
  989. x >>= 8;
  990. *dst = x * scaling;
  991. dst++;
  992. src += src_skip;
  993. }
  994. }
  995. void sample_move_d16_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  996. {
  997. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  998. unsigned long unrolled = nsamples / 4;
  999. nsamples = nsamples & 3;
  1000. while (unrolled--) {
  1001. float32x4_t samples = vld1q_f32(src);
  1002. int16x4_t converted = float_16_neon(samples);
  1003. converted = vreinterpret_s16_u8(vrev16_u8(vreinterpret_u8_s16(converted)));
  1004. switch(dst_skip) {
  1005. case 2:
  1006. vst1_s16((int16_t*)dst, converted);
  1007. break;
  1008. default:
  1009. vst1_lane_s16((int16_t*)(dst), converted, 0);
  1010. vst1_lane_s16((int16_t*)(dst+dst_skip), converted, 1);
  1011. vst1_lane_s16((int16_t*)(dst+2*dst_skip), converted, 2);
  1012. vst1_lane_s16((int16_t*)(dst+3*dst_skip), converted, 3);
  1013. break;
  1014. }
  1015. dst += 4*dst_skip;
  1016. src+= 4;
  1017. }
  1018. #endif
  1019. int16_t tmp;
  1020. while (nsamples--) {
  1021. // float_16 (*src, tmp);
  1022. if (*src <= NORMALIZED_FLOAT_MIN) {
  1023. tmp = SAMPLE_16BIT_MIN;
  1024. } else if (*src >= NORMALIZED_FLOAT_MAX) {
  1025. tmp = SAMPLE_16BIT_MAX;
  1026. } else {
  1027. tmp = (int16_t) f_round (*src * SAMPLE_16BIT_SCALING);
  1028. }
  1029. #if __BYTE_ORDER == __LITTLE_ENDIAN
  1030. dst[0]=(char)(tmp>>8);
  1031. dst[1]=(char)(tmp);
  1032. #elif __BYTE_ORDER == __BIG_ENDIAN
  1033. dst[0]=(char)(tmp);
  1034. dst[1]=(char)(tmp>>8);
  1035. #endif
  1036. dst += dst_skip;
  1037. src++;
  1038. }
  1039. }
  1040. void sample_move_d16_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1041. {
  1042. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  1043. unsigned long unrolled = nsamples / 4;
  1044. nsamples = nsamples & 3;
  1045. while (unrolled--) {
  1046. float32x4_t samples = vld1q_f32(src);
  1047. int16x4_t converted = float_16_neon(samples);
  1048. switch(dst_skip) {
  1049. case 2:
  1050. vst1_s16((int16_t*)dst, converted);
  1051. break;
  1052. default:
  1053. vst1_lane_s16((int16_t*)(dst), converted, 0);
  1054. vst1_lane_s16((int16_t*)(dst+dst_skip), converted, 1);
  1055. vst1_lane_s16((int16_t*)(dst+2*dst_skip), converted, 2);
  1056. vst1_lane_s16((int16_t*)(dst+3*dst_skip), converted, 3);
  1057. break;
  1058. }
  1059. dst += 4*dst_skip;
  1060. src+= 4;
  1061. }
  1062. #endif
  1063. while (nsamples--) {
  1064. float_16 (*src, *((int16_t*) dst));
  1065. dst += dst_skip;
  1066. src++;
  1067. }
  1068. }
  1069. void sample_move_dither_rect_d16_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1070. {
  1071. jack_default_audio_sample_t val;
  1072. int16_t tmp;
  1073. while (nsamples--) {
  1074. val = (*src * SAMPLE_16BIT_SCALING) + fast_rand() / (float) UINT_MAX - 0.5f;
  1075. float_16_scaled (val, tmp);
  1076. #if __BYTE_ORDER == __LITTLE_ENDIAN
  1077. dst[0]=(char)(tmp>>8);
  1078. dst[1]=(char)(tmp);
  1079. #elif __BYTE_ORDER == __BIG_ENDIAN
  1080. dst[0]=(char)(tmp);
  1081. dst[1]=(char)(tmp>>8);
  1082. #endif
  1083. dst += dst_skip;
  1084. src++;
  1085. }
  1086. }
  1087. void sample_move_dither_rect_d16_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1088. {
  1089. jack_default_audio_sample_t val;
  1090. while (nsamples--) {
  1091. val = (*src * SAMPLE_16BIT_SCALING) + fast_rand() / (float)UINT_MAX - 0.5f;
  1092. float_16_scaled (val, *((int16_t*) dst));
  1093. dst += dst_skip;
  1094. src++;
  1095. }
  1096. }
  1097. void sample_move_dither_tri_d16_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1098. {
  1099. jack_default_audio_sample_t val;
  1100. int16_t tmp;
  1101. while (nsamples--) {
  1102. val = (*src * SAMPLE_16BIT_SCALING) + ((float)fast_rand() + (float)fast_rand()) / (float)UINT_MAX - 1.0f;
  1103. float_16_scaled (val, tmp);
  1104. #if __BYTE_ORDER == __LITTLE_ENDIAN
  1105. dst[0]=(char)(tmp>>8);
  1106. dst[1]=(char)(tmp);
  1107. #elif __BYTE_ORDER == __BIG_ENDIAN
  1108. dst[0]=(char)(tmp);
  1109. dst[1]=(char)(tmp>>8);
  1110. #endif
  1111. dst += dst_skip;
  1112. src++;
  1113. }
  1114. }
  1115. void sample_move_dither_tri_d16_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1116. {
  1117. jack_default_audio_sample_t val;
  1118. while (nsamples--) {
  1119. val = (*src * SAMPLE_16BIT_SCALING) + ((float)fast_rand() + (float)fast_rand()) / (float)UINT_MAX - 1.0f;
  1120. float_16_scaled (val, *((int16_t*) dst));
  1121. dst += dst_skip;
  1122. src++;
  1123. }
  1124. }
  1125. void sample_move_dither_shaped_d16_sSs (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1126. {
  1127. jack_default_audio_sample_t x;
  1128. jack_default_audio_sample_t xe; /* the innput sample - filtered error */
  1129. jack_default_audio_sample_t xp; /* x' */
  1130. float r;
  1131. float rm1 = state->rm1;
  1132. unsigned int idx = state->idx;
  1133. int16_t tmp;
  1134. while (nsamples--) {
  1135. x = *src * SAMPLE_16BIT_SCALING;
  1136. r = ((float)fast_rand() + (float)fast_rand()) / (float)UINT_MAX - 1.0f;
  1137. /* Filter the error with Lipshitz's minimally audible FIR:
  1138. [2.033 -2.165 1.959 -1.590 0.6149] */
  1139. xe = x
  1140. - state->e[idx] * 2.033f
  1141. + state->e[(idx - 1) & DITHER_BUF_MASK] * 2.165f
  1142. - state->e[(idx - 2) & DITHER_BUF_MASK] * 1.959f
  1143. + state->e[(idx - 3) & DITHER_BUF_MASK] * 1.590f
  1144. - state->e[(idx - 4) & DITHER_BUF_MASK] * 0.6149f;
  1145. xp = xe + r - rm1;
  1146. rm1 = r;
  1147. float_16_scaled (xp, tmp);
  1148. /* Intrinsic z^-1 delay */
  1149. idx = (idx + 1) & DITHER_BUF_MASK;
  1150. state->e[idx] = xp - xe;
  1151. #if __BYTE_ORDER == __LITTLE_ENDIAN
  1152. dst[0]=(char)(tmp>>8);
  1153. dst[1]=(char)(tmp);
  1154. #elif __BYTE_ORDER == __BIG_ENDIAN
  1155. dst[0]=(char)(tmp);
  1156. dst[1]=(char)(tmp>>8);
  1157. #endif
  1158. dst += dst_skip;
  1159. src++;
  1160. }
  1161. state->rm1 = rm1;
  1162. state->idx = idx;
  1163. }
  1164. void sample_move_dither_shaped_d16_sS (char *dst, jack_default_audio_sample_t *src, unsigned long nsamples, unsigned long dst_skip, dither_state_t *state)
  1165. {
  1166. jack_default_audio_sample_t x;
  1167. jack_default_audio_sample_t xe; /* the innput sample - filtered error */
  1168. jack_default_audio_sample_t xp; /* x' */
  1169. float r;
  1170. float rm1 = state->rm1;
  1171. unsigned int idx = state->idx;
  1172. while (nsamples--) {
  1173. x = *src * SAMPLE_16BIT_SCALING;
  1174. r = ((float)fast_rand() + (float)fast_rand()) / (float)UINT_MAX - 1.0f;
  1175. /* Filter the error with Lipshitz's minimally audible FIR:
  1176. [2.033 -2.165 1.959 -1.590 0.6149] */
  1177. xe = x
  1178. - state->e[idx] * 2.033f
  1179. + state->e[(idx - 1) & DITHER_BUF_MASK] * 2.165f
  1180. - state->e[(idx - 2) & DITHER_BUF_MASK] * 1.959f
  1181. + state->e[(idx - 3) & DITHER_BUF_MASK] * 1.590f
  1182. - state->e[(idx - 4) & DITHER_BUF_MASK] * 0.6149f;
  1183. xp = xe + r - rm1;
  1184. rm1 = r;
  1185. float_16_scaled (xp, *((int16_t*) dst));
  1186. /* Intrinsic z^-1 delay */
  1187. idx = (idx + 1) & DITHER_BUF_MASK;
  1188. state->e[idx] = *((int16_t*) dst) - xe;
  1189. dst += dst_skip;
  1190. src++;
  1191. }
  1192. state->rm1 = rm1;
  1193. state->idx = idx;
  1194. }
  1195. void sample_move_dS_s16s (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  1196. {
  1197. short z;
  1198. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_16BIT_SCALING;
  1199. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  1200. const float32x4_t vscaling = vdupq_n_f32(scaling);
  1201. unsigned long unrolled = nsamples / 4;
  1202. while (unrolled--) {
  1203. int16x4_t source16x4;
  1204. switch(src_skip) {
  1205. case 2:
  1206. source16x4 = vld1_s16((int16_t*)src);
  1207. break;
  1208. case 4:
  1209. source16x4 = vld2_s16((int16_t*)src).val[0];
  1210. break;
  1211. default:
  1212. source16x4 = vld1_lane_s16((int16_t*)src, source16x4, 0);
  1213. source16x4 = vld1_lane_s16((int16_t*)(src+src_skip), source16x4, 1);
  1214. source16x4 = vld1_lane_s16((int16_t*)(src+2*src_skip), source16x4, 2);
  1215. source16x4 = vld1_lane_s16((int16_t*)(src+3*src_skip), source16x4, 3);
  1216. break;
  1217. }
  1218. source16x4 = vreinterpret_s16_u8(vrev16_u8(vreinterpret_u8_s16(source16x4)));
  1219. int32x4_t source32x4 = vmovl_s16(source16x4);
  1220. src += 4 * src_skip;
  1221. float32x4_t converted = vcvtq_f32_s32(source32x4);
  1222. float32x4_t scaled = vmulq_f32(converted, vscaling);
  1223. vst1q_f32(dst, scaled);
  1224. dst += 4;
  1225. }
  1226. nsamples = nsamples & 3;
  1227. #endif
  1228. /* ALERT: signed sign-extension portability !!! */
  1229. while (nsamples--) {
  1230. #if __BYTE_ORDER == __LITTLE_ENDIAN
  1231. z = (unsigned char)(src[0]);
  1232. z <<= 8;
  1233. z |= (unsigned char)(src[1]);
  1234. #elif __BYTE_ORDER == __BIG_ENDIAN
  1235. z = (unsigned char)(src[1]);
  1236. z <<= 8;
  1237. z |= (unsigned char)(src[0]);
  1238. #endif
  1239. *dst = z * scaling;
  1240. dst++;
  1241. src += src_skip;
  1242. }
  1243. }
  1244. void sample_move_dS_s16 (jack_default_audio_sample_t *dst, char *src, unsigned long nsamples, unsigned long src_skip)
  1245. {
  1246. /* ALERT: signed sign-extension portability !!! */
  1247. const jack_default_audio_sample_t scaling = 1.0/SAMPLE_16BIT_SCALING;
  1248. #if defined (__ARM_NEON__) || defined (__ARM_NEON)
  1249. const float32x4_t vscaling = vdupq_n_f32(scaling);
  1250. unsigned long unrolled = nsamples / 4;
  1251. while (unrolled--) {
  1252. int16x4_t source16x4;
  1253. switch(src_skip) {
  1254. case 2:
  1255. source16x4 = vld1_s16((int16_t*)src);
  1256. break;
  1257. case 4:
  1258. source16x4 = vld2_s16((int16_t*)src).val[0];
  1259. break;
  1260. default:
  1261. source16x4 = vld1_lane_s16((int16_t*)src, source16x4, 0);
  1262. source16x4 = vld1_lane_s16((int16_t*)(src+src_skip), source16x4, 1);
  1263. source16x4 = vld1_lane_s16((int16_t*)(src+2*src_skip), source16x4, 2);
  1264. source16x4 = vld1_lane_s16((int16_t*)(src+3*src_skip), source16x4, 3);
  1265. break;
  1266. }
  1267. int32x4_t source32x4 = vmovl_s16(source16x4);
  1268. src += 4 * src_skip;
  1269. float32x4_t converted = vcvtq_f32_s32(source32x4);
  1270. float32x4_t scaled = vmulq_f32(converted, vscaling);
  1271. vst1q_f32(dst, scaled);
  1272. dst += 4;
  1273. }
  1274. nsamples = nsamples & 3;
  1275. #endif
  1276. while (nsamples--) {
  1277. *dst = (*((short *) src)) * scaling;
  1278. dst++;
  1279. src += src_skip;
  1280. }
  1281. }
  1282. void memset_interleave (char *dst, char val, unsigned long bytes,
  1283. unsigned long unit_bytes,
  1284. unsigned long skip_bytes)
  1285. {
  1286. switch (unit_bytes) {
  1287. case 1:
  1288. while (bytes--) {
  1289. *dst = val;
  1290. dst += skip_bytes;
  1291. }
  1292. break;
  1293. case 2:
  1294. while (bytes) {
  1295. *((short *) dst) = (short) val;
  1296. dst += skip_bytes;
  1297. bytes -= 2;
  1298. }
  1299. break;
  1300. case 4:
  1301. while (bytes) {
  1302. *((int *) dst) = (int) val;
  1303. dst += skip_bytes;
  1304. bytes -= 4;
  1305. }
  1306. break;
  1307. default:
  1308. while (bytes) {
  1309. memset(dst, val, unit_bytes);
  1310. dst += skip_bytes;
  1311. bytes -= unit_bytes;
  1312. }
  1313. break;
  1314. }
  1315. }
  1316. /* COPY FUNCTIONS: used to move data from an input channel to an
  1317. output channel. Note that we assume that the skip distance
  1318. is the same for both channels. This is completely fine
  1319. unless the input and output were on different audio interfaces that
  1320. were interleaved differently. We don't try to handle that.
  1321. */
  1322. void
  1323. memcpy_fake (char *dst, char *src, unsigned long src_bytes, unsigned long foo, unsigned long bar)
  1324. {
  1325. memcpy (dst, src, src_bytes);
  1326. }
  1327. void
  1328. memcpy_interleave_d16_s16 (char *dst, char *src, unsigned long src_bytes,
  1329. unsigned long dst_skip_bytes, unsigned long src_skip_bytes)
  1330. {
  1331. while (src_bytes) {
  1332. *((short *) dst) = *((short *) src);
  1333. dst += dst_skip_bytes;
  1334. src += src_skip_bytes;
  1335. src_bytes -= 2;
  1336. }
  1337. }
  1338. void
  1339. memcpy_interleave_d24_s24 (char *dst, char *src, unsigned long src_bytes,
  1340. unsigned long dst_skip_bytes, unsigned long src_skip_bytes)
  1341. {
  1342. while (src_bytes) {
  1343. memcpy(dst, src, 3);
  1344. dst += dst_skip_bytes;
  1345. src += src_skip_bytes;
  1346. src_bytes -= 3;
  1347. }
  1348. }
  1349. void
  1350. memcpy_interleave_d32_s32 (char *dst, char *src, unsigned long src_bytes,
  1351. unsigned long dst_skip_bytes, unsigned long src_skip_bytes)
  1352. {
  1353. while (src_bytes) {
  1354. *((int *) dst) = *((int *) src);
  1355. dst += dst_skip_bytes;
  1356. src += src_skip_bytes;
  1357. src_bytes -= 4;
  1358. }
  1359. }