Audio plugin host https://kx.studio/carla
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

juce_fallback_SIMDNativeOps.h 10KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2022 - Raw Material Software Limited
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 7 End-User License
  8. Agreement and JUCE Privacy Policy.
  9. End User License Agreement: www.juce.com/juce-7-licence
  10. Privacy Policy: www.juce.com/juce-privacy-policy
  11. Or: You may also use this code under the terms of the GPL v3 (see
  12. www.gnu.org/licenses).
  13. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  14. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  15. DISCLAIMED.
  16. ==============================================================================
  17. */
  18. namespace juce
  19. {
  20. namespace dsp
  21. {
  22. /** A template specialisation to find corresponding mask type for primitives. */
  23. namespace SIMDInternal
  24. {
  25. template <typename Primitive> struct MaskTypeFor { using type = Primitive; };
  26. template <> struct MaskTypeFor <float> { using type = uint32_t; };
  27. template <> struct MaskTypeFor <double> { using type = uint64_t; };
  28. template <> struct MaskTypeFor <char> { using type = uint8_t; };
  29. template <> struct MaskTypeFor <int8_t> { using type = uint8_t; };
  30. template <> struct MaskTypeFor <int16_t> { using type = uint16_t; };
  31. template <> struct MaskTypeFor <int32_t> { using type = uint32_t; };
  32. template <> struct MaskTypeFor <int64_t> { using type = uint64_t; };
  33. template <> struct MaskTypeFor <std::complex<float>> { using type = uint32_t; };
  34. template <> struct MaskTypeFor <std::complex<double>> { using type = uint64_t; };
  35. template <typename Primitive> struct PrimitiveType { using type = typename std::remove_cv<Primitive>::type; };
  36. template <typename Primitive> struct PrimitiveType<std::complex<Primitive>> { using type = typename std::remove_cv<Primitive>::type; };
  37. template <int n> struct Log2Helper { enum { value = Log2Helper<n/2>::value + 1 }; };
  38. template <> struct Log2Helper<1> { enum { value = 0 }; };
  39. }
  40. /**
  41. Useful fallback routines to use if the native SIMD op is not supported. You
  42. should never need to use this directly. Use juce_SIMDRegister instead.
  43. @tags{DSP}
  44. */
  45. template <typename ScalarType, typename vSIMDType>
  46. struct SIMDFallbackOps
  47. {
  48. static constexpr size_t n = sizeof (vSIMDType) / sizeof (ScalarType);
  49. static constexpr size_t mask = (sizeof (vSIMDType) / sizeof (ScalarType)) - 1;
  50. static constexpr size_t bits = SIMDInternal::Log2Helper<(int) n>::value;
  51. // helper types
  52. using MaskType = typename SIMDInternal::MaskTypeFor<ScalarType>::type;
  53. union UnionType { vSIMDType v; ScalarType s[n]; };
  54. union UnionMaskType { vSIMDType v; MaskType m[n]; };
  55. // fallback methods
  56. static forcedinline vSIMDType add (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarAdd> (a, b); }
  57. static forcedinline vSIMDType sub (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarSub> (a, b); }
  58. static forcedinline vSIMDType mul (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarMul> (a, b); }
  59. static forcedinline vSIMDType bit_and (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarAnd> (a, b); }
  60. static forcedinline vSIMDType bit_or (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarOr > (a, b); }
  61. static forcedinline vSIMDType bit_xor (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarXor> (a, b); }
  62. static forcedinline vSIMDType bit_notand (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarNot> (a, b); }
  63. static forcedinline vSIMDType min (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarMin> (a, b); }
  64. static forcedinline vSIMDType max (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarMax> (a, b); }
  65. static forcedinline vSIMDType equal (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarEq > (a, b); }
  66. static forcedinline vSIMDType notEqual (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarNeq> (a, b); }
  67. static forcedinline vSIMDType greaterThan (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarGt > (a, b); }
  68. static forcedinline vSIMDType greaterThanOrEqual (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarGeq> (a, b); }
  69. static forcedinline ScalarType get (vSIMDType v, size_t i) noexcept
  70. {
  71. UnionType u {v};
  72. return u.s[i];
  73. }
  74. static forcedinline vSIMDType set (vSIMDType v, size_t i, ScalarType s) noexcept
  75. {
  76. UnionType u {v};
  77. u.s[i] = s;
  78. return u.v;
  79. }
  80. static forcedinline vSIMDType bit_not (vSIMDType av) noexcept
  81. {
  82. UnionMaskType a {av};
  83. for (size_t i = 0; i < n; ++i)
  84. a.m[i] = ~a.m[i];
  85. return a.v;
  86. }
  87. static forcedinline ScalarType sum (vSIMDType av) noexcept
  88. {
  89. UnionType a {av};
  90. auto retval = static_cast<ScalarType> (0);
  91. for (size_t i = 0; i < n; ++i)
  92. retval = static_cast<ScalarType> (retval + a.s[i]);
  93. return retval;
  94. }
  95. static forcedinline vSIMDType truncate (vSIMDType av) noexcept
  96. {
  97. UnionType a {av};
  98. for (size_t i = 0; i < n; ++i)
  99. a.s[i] = static_cast<ScalarType> (static_cast<int> (a.s[i]));
  100. return a.v;
  101. }
  102. static forcedinline vSIMDType multiplyAdd (vSIMDType av, vSIMDType bv, vSIMDType cv) noexcept
  103. {
  104. UnionType a {av}, b {bv}, c {cv};
  105. for (size_t i = 0; i < n; ++i)
  106. a.s[i] += b.s[i] * c.s[i];
  107. return a.v;
  108. }
  109. //==============================================================================
  110. static forcedinline bool allEqual (vSIMDType av, vSIMDType bv) noexcept
  111. {
  112. UnionType a {av}, b {bv};
  113. for (size_t i = 0; i < n; ++i)
  114. if (a.s[i] != b.s[i])
  115. return false;
  116. return true;
  117. }
  118. //==============================================================================
  119. static forcedinline vSIMDType cmplxmul (vSIMDType av, vSIMDType bv) noexcept
  120. {
  121. UnionType a {av}, b {bv}, r;
  122. const int m = n >> 1;
  123. for (int i = 0; i < m; ++i)
  124. {
  125. std::complex<ScalarType> result
  126. = std::complex<ScalarType> (a.s[i<<1], a.s[(i<<1)|1])
  127. * std::complex<ScalarType> (b.s[i<<1], b.s[(i<<1)|1]);
  128. r.s[i<<1] = result.real();
  129. r.s[(i<<1)|1] = result.imag();
  130. }
  131. return r.v;
  132. }
  133. struct ScalarAdd { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return a + b; } };
  134. struct ScalarSub { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return a - b; } };
  135. struct ScalarMul { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return a * b; } };
  136. struct ScalarMin { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return jmin (a, b); } };
  137. struct ScalarMax { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return jmax (a, b); } };
  138. struct ScalarAnd { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return a & b; } };
  139. struct ScalarOr { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return a | b; } };
  140. struct ScalarXor { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return a ^ b; } };
  141. struct ScalarNot { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return (~a) & b; } };
  142. struct ScalarEq { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a == b); } };
  143. struct ScalarNeq { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a != b); } };
  144. struct ScalarGt { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a > b); } };
  145. struct ScalarGeq { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a >= b); } };
  146. // generic apply routines for operations above
  147. template <typename Op>
  148. static forcedinline vSIMDType apply (vSIMDType av, vSIMDType bv) noexcept
  149. {
  150. UnionType a {av}, b {bv};
  151. for (size_t i = 0; i < n; ++i)
  152. a.s[i] = Op::op (a.s[i], b.s[i]);
  153. return a.v;
  154. }
  155. template <typename Op>
  156. static forcedinline vSIMDType cmp (vSIMDType av, vSIMDType bv) noexcept
  157. {
  158. UnionType a {av}, b {bv};
  159. UnionMaskType r;
  160. for (size_t i = 0; i < n; ++i)
  161. r.m[i] = Op::op (a.s[i], b.s[i]) ? static_cast<MaskType> (-1) : static_cast<MaskType> (0);
  162. return r.v;
  163. }
  164. template <typename Op>
  165. static forcedinline vSIMDType bitapply (vSIMDType av, vSIMDType bv) noexcept
  166. {
  167. UnionMaskType a {av}, b {bv};
  168. for (size_t i = 0; i < n; ++i)
  169. a.m[i] = Op::op (a.m[i], b.m[i]);
  170. return a.v;
  171. }
  172. static forcedinline vSIMDType expand (ScalarType s) noexcept
  173. {
  174. UnionType r;
  175. for (size_t i = 0; i < n; ++i)
  176. r.s[i] = s;
  177. return r.v;
  178. }
  179. static forcedinline vSIMDType load (const ScalarType* a) noexcept
  180. {
  181. UnionType r;
  182. for (size_t i = 0; i < n; ++i)
  183. r.s[i] = a[i];
  184. return r.v;
  185. }
  186. static forcedinline void store (vSIMDType av, ScalarType* dest) noexcept
  187. {
  188. UnionType a {av};
  189. for (size_t i = 0; i < n; ++i)
  190. dest[i] = a.s[i];
  191. }
  192. template <unsigned int shuffle_idx>
  193. static forcedinline vSIMDType shuffle (vSIMDType av) noexcept
  194. {
  195. UnionType a {av}, r;
  196. // the compiler will unroll this loop and the index can
  197. // be computed at compile-time, so this will be super fast
  198. for (size_t i = 0; i < n; ++i)
  199. r.s[i] = a.s[(shuffle_idx >> (bits * i)) & mask];
  200. return r.v;
  201. }
  202. };
  203. } // namespace dsp
  204. } // namespace juce