The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

255 lines
11KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2017 - ROLI Ltd.
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 5 End-User License
  8. Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
  9. 27th April 2017).
  10. End User License Agreement: www.juce.com/juce-5-licence
  11. Privacy Policy: www.juce.com/juce-5-privacy-policy
  12. Or: You may also use this code under the terms of the GPL v3 (see
  13. www.gnu.org/licenses).
  14. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  15. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  16. DISCLAIMED.
  17. ==============================================================================
  18. */
  19. namespace juce
  20. {
  21. namespace dsp
  22. {
  23. /** A template specialisation to find corresponding mask type for primitives. */
  24. namespace SIMDInternal
  25. {
  26. template <typename Primitive> struct MaskTypeFor { typedef Primitive type; };
  27. template <> struct MaskTypeFor <float> { typedef uint32_t type; };
  28. template <> struct MaskTypeFor <double> { typedef uint64_t type; };
  29. template <> struct MaskTypeFor <char> { typedef uint8_t type; };
  30. template <> struct MaskTypeFor <int8_t> { typedef uint8_t type; };
  31. template <> struct MaskTypeFor <int16_t> { typedef uint16_t type; };
  32. template <> struct MaskTypeFor <int32_t> { typedef uint32_t type; };
  33. template <> struct MaskTypeFor <int64_t> { typedef uint64_t type; };
  34. template <> struct MaskTypeFor <std::complex<float>> { typedef uint32_t type; };
  35. template <> struct MaskTypeFor <std::complex<double>> { typedef uint64_t type; };
  36. template <typename Primitive> struct PrimitiveType { typedef Primitive type; };
  37. template <typename Primitive> struct PrimitiveType<std::complex<Primitive>> { typedef Primitive type; };
  38. template <int n> struct Log2Helper { enum { value = Log2Helper<n/2>::value + 1 }; };
  39. template <> struct Log2Helper<1> { enum { value = 0 }; };
  40. }
  41. /**
  42. Useful fallback routines to use if the native SIMD op is not supported. You
  43. should never need to use this directly. Use juce_SIMDRegister instead.
  44. @tags{DSP}
  45. */
  46. template <typename ScalarType, typename vSIMDType>
  47. struct SIMDFallbackOps
  48. {
  49. static constexpr size_t n = sizeof (vSIMDType) / sizeof (ScalarType);
  50. static constexpr size_t mask = (sizeof (vSIMDType) / sizeof (ScalarType)) - 1;
  51. static constexpr size_t bits = SIMDInternal::Log2Helper<n>::value;
  52. // corresponding mask type
  53. typedef typename SIMDInternal::MaskTypeFor<ScalarType>::type MaskType;
  54. // fallback methods
  55. static forcedinline vSIMDType add (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarAdd> (a, b); }
  56. static forcedinline vSIMDType sub (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarSub> (a, b); }
  57. static forcedinline vSIMDType mul (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarMul> (a, b); }
  58. static forcedinline vSIMDType bit_and (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarAnd> (a, b); }
  59. static forcedinline vSIMDType bit_or (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarOr > (a, b); }
  60. static forcedinline vSIMDType bit_xor (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarXor> (a, b); }
  61. static forcedinline vSIMDType bit_notand (vSIMDType a, vSIMDType b) noexcept { return bitapply<ScalarNot> (a, b); }
  62. static forcedinline vSIMDType min (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarMin> (a, b); }
  63. static forcedinline vSIMDType max (vSIMDType a, vSIMDType b) noexcept { return apply<ScalarMax> (a, b); }
  64. static forcedinline vSIMDType equal (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarEq > (a, b); }
  65. static forcedinline vSIMDType notEqual (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarNeq> (a, b); }
  66. static forcedinline vSIMDType greaterThan (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarGt > (a, b); }
  67. static forcedinline vSIMDType greaterThanOrEqual (vSIMDType a, vSIMDType b) noexcept { return cmp<ScalarGeq> (a, b); }
  68. static forcedinline vSIMDType bit_not (vSIMDType a) noexcept
  69. {
  70. vSIMDType retval;
  71. auto* dst = reinterpret_cast<MaskType*> (&retval);
  72. auto* aSrc = reinterpret_cast<const MaskType*> (&a);
  73. for (size_t i = 0; i < n; ++i)
  74. dst [i] = ~aSrc [i];
  75. return retval;
  76. }
  77. static forcedinline ScalarType sum (vSIMDType a) noexcept
  78. {
  79. auto retval = static_cast<ScalarType> (0);
  80. auto* aSrc = reinterpret_cast<const ScalarType*> (&a);
  81. for (size_t i = 0; i < n; ++i)
  82. retval += aSrc [i];
  83. return retval;
  84. }
  85. static forcedinline vSIMDType multiplyAdd (vSIMDType a, vSIMDType b, vSIMDType c) noexcept
  86. {
  87. vSIMDType retval;
  88. auto* dst = reinterpret_cast<ScalarType*> (&retval);
  89. auto* aSrc = reinterpret_cast<const ScalarType*> (&a);
  90. auto* bSrc = reinterpret_cast<const ScalarType*> (&b);
  91. auto* cSrc = reinterpret_cast<const ScalarType*> (&c);
  92. for (size_t i = 0; i < n; ++i)
  93. dst [i] = aSrc [i] + (bSrc [i] * cSrc [i]);
  94. return retval;
  95. }
  96. //==============================================================================
  97. static forcedinline bool allEqual (vSIMDType a, vSIMDType b) noexcept
  98. {
  99. auto* aSrc = reinterpret_cast<const ScalarType*> (&a);
  100. auto* bSrc = reinterpret_cast<const ScalarType*> (&b);
  101. for (size_t i = 0; i < n; ++i)
  102. if (aSrc[i] != bSrc[i])
  103. return false;
  104. return true;
  105. }
  106. //==============================================================================
  107. static forcedinline vSIMDType cmplxmul (vSIMDType a, vSIMDType b) noexcept
  108. {
  109. vSIMDType retval;
  110. auto* dst = reinterpret_cast<std::complex<ScalarType>*> (&retval);
  111. auto* aSrc = reinterpret_cast<const std::complex<ScalarType>*> (&a);
  112. auto* bSrc = reinterpret_cast<const std::complex<ScalarType>*> (&b);
  113. const int m = n >> 1;
  114. for (int i = 0; i < m; ++i)
  115. dst [i] = aSrc [i] * bSrc [i];
  116. return retval;
  117. }
  118. struct ScalarAdd { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return a + b; } };
  119. struct ScalarSub { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return a - b; } };
  120. struct ScalarMul { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return a * b; } };
  121. struct ScalarMin { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return jmin (a, b); } };
  122. struct ScalarMax { static forcedinline ScalarType op (ScalarType a, ScalarType b) noexcept { return jmax (a, b); } };
  123. struct ScalarAnd { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return a & b; } };
  124. struct ScalarOr { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return a | b; } };
  125. struct ScalarXor { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return a ^ b; } };
  126. struct ScalarNot { static forcedinline MaskType op (MaskType a, MaskType b) noexcept { return (~a) & b; } };
  127. struct ScalarEq { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a == b); } };
  128. struct ScalarNeq { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a != b); } };
  129. struct ScalarGt { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a > b); } };
  130. struct ScalarGeq { static forcedinline bool op (ScalarType a, ScalarType b) noexcept { return (a >= b); } };
  131. // generic apply routines for operations above
  132. template <typename Op>
  133. static forcedinline vSIMDType apply (vSIMDType a, vSIMDType b) noexcept
  134. {
  135. vSIMDType retval;
  136. auto* dst = reinterpret_cast<ScalarType*> (&retval);
  137. auto* aSrc = reinterpret_cast<const ScalarType*> (&a);
  138. auto* bSrc = reinterpret_cast<const ScalarType*> (&b);
  139. for (size_t i = 0; i < n; ++i)
  140. dst [i] = Op::op (aSrc [i], bSrc [i]);
  141. return retval;
  142. }
  143. template <typename Op>
  144. static forcedinline vSIMDType cmp (vSIMDType a, vSIMDType b) noexcept
  145. {
  146. vSIMDType retval;
  147. auto* dst = reinterpret_cast<MaskType*> (&retval);
  148. auto* aSrc = reinterpret_cast<const ScalarType*> (&a);
  149. auto* bSrc = reinterpret_cast<const ScalarType*> (&b);
  150. for (size_t i = 0; i < n; ++i)
  151. dst [i] = Op::op (aSrc [i], bSrc [i]) ? static_cast<MaskType> (-1) : static_cast<MaskType> (0);
  152. return retval;
  153. }
  154. template <typename Op>
  155. static forcedinline vSIMDType bitapply (vSIMDType a, vSIMDType b) noexcept
  156. {
  157. vSIMDType retval;
  158. auto* dst = reinterpret_cast<MaskType*> (&retval);
  159. auto* aSrc = reinterpret_cast<const MaskType*> (&a);
  160. auto* bSrc = reinterpret_cast<const MaskType*> (&b);
  161. for (size_t i = 0; i < n; ++i)
  162. dst [i] = Op::op (aSrc [i], bSrc [i]);
  163. return retval;
  164. }
  165. static forcedinline vSIMDType expand (ScalarType s) noexcept
  166. {
  167. vSIMDType retval;
  168. auto* dst = reinterpret_cast<ScalarType*> (&retval);
  169. for (size_t i = 0; i < n; ++i)
  170. dst [i] = s;
  171. return retval;
  172. }
  173. static forcedinline vSIMDType load (const ScalarType* a) noexcept
  174. {
  175. vSIMDType retval;
  176. auto* dst = reinterpret_cast<ScalarType*> (&retval);
  177. for (size_t i = 0; i < n; ++i)
  178. dst [i] = a[i];
  179. return retval;
  180. }
  181. static forcedinline void store (vSIMDType value, ScalarType* dest) noexcept
  182. {
  183. const auto* src = reinterpret_cast<const ScalarType*> (&value);
  184. for (size_t i = 0; i < n; ++i)
  185. dest[i] = src[i];
  186. }
  187. template <unsigned int shuffle_idx>
  188. static forcedinline vSIMDType shuffle (vSIMDType a) noexcept
  189. {
  190. vSIMDType retval;
  191. auto* dst = reinterpret_cast<ScalarType*> (&retval);
  192. auto* aSrc = reinterpret_cast<const ScalarType*> (&a);
  193. // the compiler will unroll this loop and the index can
  194. // be computed at compile-time, so this will be super fast
  195. for (size_t i = 0; i < n; ++i)
  196. dst [i] = aSrc [(shuffle_idx >> (bits * i)) & mask];
  197. return retval;
  198. }
  199. };
  200. } // namespace dsp
  201. } // namespace juce