The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

400 lines
21KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2017 - ROLI Ltd.
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 5 End-User License
  8. Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
  9. 27th April 2017).
  10. End User License Agreement: www.juce.com/juce-5-licence
  11. Privacy Policy: www.juce.com/juce-5-privacy-policy
  12. Or: You may also use this code under the terms of the GPL v3 (see
  13. www.gnu.org/licenses).
  14. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  15. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  16. DISCLAIMED.
  17. ==============================================================================
  18. */
  19. namespace juce
  20. {
  21. namespace dsp
  22. {
  23. #ifndef DOXYGEN
  24. // This class is needed internally.
  25. template <typename Scalar>
  26. struct CmplxSIMDOps;
  27. #endif
  28. //==============================================================================
  29. /**
  30. A wrapper around the platform's native SIMD register type.
  31. This class is only availabe on SIMD machines. Use JUCE_USE_SIMD to query
  32. if SIMD is avaialble for your system.
  33. SIMDRegister<Type> is a templated class representing the native
  34. vectorized version of FloatingType. SIMDRegister supports all numerical
  35. primitive types and std:complex<float> and std::complex<double> supports
  36. and most operations of the corresponding primitive
  37. type. Additionally, SIMDRegister can be accessed like an array to extract
  38. the individual elements.
  39. If you are using SIMDRegister as a pointer, then you must ensure that the
  40. memory is suffeciently aligned for SIMD vector operations. Failing to do so
  41. will result in crashes or very slow code. Use SIMDRegister::isSIMDAligned
  42. to query if a pointer is suffeciently aligned for SIMD vector operations.
  43. Note that using SIMDRegister without enabling optimizations will result
  44. in code with very poor performance.
  45. @tags{DSP}
  46. */
  47. template <typename Type>
  48. struct SIMDRegister
  49. {
  50. //==============================================================================
  51. /** The type that represents the individual constituents of the SIMD Register */
  52. typedef Type ElementType;
  53. /** STL compatible value_type definition (same as ElementType). */
  54. typedef ElementType value_type;
  55. /** The corresponding primitive integer type, for example, this will be int32_t
  56. if type is a float. */
  57. typedef typename SIMDInternal::MaskTypeFor<ElementType>::type MaskType;
  58. //==============================================================================
  59. // Here are some types which are needed internally
  60. /** The native primitive type (used internally). */
  61. typedef typename SIMDInternal::PrimitiveType<ElementType>::type PrimitiveType;
  62. /** The native operations for this platform and type combination (used internally) */
  63. typedef SIMDNativeOps<PrimitiveType> NativeOps;
  64. /** The native type (used internally). */
  65. typedef typename NativeOps::vSIMDType vSIMDType;
  66. /** The corresponding integer SIMDRegister type (used internally). */
  67. typedef SIMDRegister<MaskType> vMaskType;
  68. /** The internal native type for the corresponding mask type (used internally). */
  69. typedef typename vMaskType::vSIMDType vMaskSIMDType;
  70. /** Wrapper for operations which need to be handled differently for complex
  71. and scalar types (used internally). */
  72. typedef CmplxSIMDOps<ElementType> CmplxOps;
  73. /** Type which is returned when using the subscript operator. The returned type
  74. should be used just like the type ElementType. */
  75. struct ElementAccess;
  76. //==============================================================================
  77. /** The size in bytes of this register. */
  78. static constexpr size_t SIMDRegisterSize = sizeof (vSIMDType);
  79. /** The number of elements that this vector can hold. */
  80. static constexpr size_t SIMDNumElements = SIMDRegisterSize / sizeof (ElementType);
  81. vSIMDType value;
  82. /** Default constructor. */
  83. inline SIMDRegister() noexcept {}
  84. /** Constructs an object from the native SIMD type. */
  85. inline SIMDRegister (vSIMDType a) noexcept : value (a) {}
  86. /** Constructs an object from a scalar type by broadcasting it to all elements. */
  87. inline SIMDRegister (Type s) noexcept { *this = s; }
  88. /** Destrutor. */
  89. inline ~SIMDRegister() noexcept {}
  90. //==============================================================================
  91. /** Returns the number of elements in this vector. */
  92. static constexpr size_t size() noexcept { return SIMDNumElements; }
  93. //==============================================================================
  94. /** Creates a new SIMDRegister from the corresponding scalar primitive.
  95. The scalar is extended to all elements of the vector. */
  96. inline static SIMDRegister JUCE_VECTOR_CALLTYPE expand (ElementType s) noexcept { return {CmplxOps::expand (s)}; }
  97. /** Creates a new SIMDRegister from the internal SIMD type (for example
  98. __mm128 for single-precision floating point on SSE architectures). */
  99. inline static SIMDRegister JUCE_VECTOR_CALLTYPE fromNative (vSIMDType a) noexcept { return {a}; }
  100. /** Creates a new SIMDRegister from the first SIMDNumElements of a scalar array. */
  101. inline static SIMDRegister JUCE_VECTOR_CALLTYPE fromRawArray (const ElementType* a) noexcept
  102. {
  103. jassert (isSIMDAligned (a));
  104. return {CmplxOps::load (a)};
  105. }
  106. /** Copies the elements of the SIMDRegister to a scalar array in memory. */
  107. inline void JUCE_VECTOR_CALLTYPE copyToRawArray (ElementType* a) const noexcept
  108. {
  109. jassert (isSIMDAligned (a));
  110. CmplxOps::store (value, a);
  111. }
  112. //==============================================================================
  113. /** Returns the idx-th element of the receiver. Note that this does not check if idx
  114. is larger than the native register size. */
  115. inline ElementType JUCE_VECTOR_CALLTYPE get (size_t idx) const noexcept
  116. {
  117. jassert (idx < SIMDNumElements);
  118. return CmplxOps::get (value, idx);
  119. }
  120. /** Sets the idx-th element of the receiver. Note that this does not check if idx
  121. is larger than the native register size. */
  122. inline void JUCE_VECTOR_CALLTYPE set (size_t idx, ElementType v) noexcept
  123. {
  124. jassert (idx < SIMDNumElements);
  125. value = CmplxOps::set (value, idx, v);
  126. }
  127. //==============================================================================
  128. /** Returns the idx-th element of the receiver. Note that this does not check if idx
  129. is larger than the native register size. */
  130. inline ElementType JUCE_VECTOR_CALLTYPE operator[] (size_t idx) const noexcept
  131. {
  132. return get (idx);
  133. }
  134. /** Returns the idx-th element of the receiver. Note that this does not check if idx
  135. is larger than the native register size. */
  136. inline ElementAccess JUCE_VECTOR_CALLTYPE operator[] (size_t idx) noexcept
  137. {
  138. jassert (idx < SIMDNumElements);
  139. return ElementAccess (*this, idx);
  140. }
  141. //==============================================================================
  142. /** Adds another SIMDRegister to the receiver. */
  143. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator+= (SIMDRegister v) noexcept { value = NativeOps::add (value, v.value); return *this; }
  144. /** Subtracts another SIMDRegister to the receiver. */
  145. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator-= (SIMDRegister v) noexcept { value = NativeOps::sub (value, v.value); return *this; }
  146. /** Multiplies another SIMDRegister to the receiver. */
  147. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator*= (SIMDRegister v) noexcept { value = CmplxOps::mul (value, v.value); return *this; }
  148. //==============================================================================
  149. /** Broadcasts the scalar to all elements of the receiver. */
  150. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator= (ElementType s) noexcept { value = CmplxOps::expand (s); return *this; }
  151. /** Adds a scalar to the receiver. */
  152. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator+= (ElementType s) noexcept { value = NativeOps::add (value, CmplxOps::expand (s)); return *this; }
  153. /** Subtracts a scalar to the receiver. */
  154. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator-= (ElementType s) noexcept { value = NativeOps::sub (value, CmplxOps::expand (s)); return *this; }
  155. /** Multiplies a scalar to the receiver. */
  156. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator*= (ElementType s) noexcept { value = CmplxOps::mul (value, CmplxOps::expand (s)); return *this; }
  157. //==============================================================================
  158. /** Bit-and the reciver with SIMDRegister v and store the result in the receiver. */
  159. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator&= (vMaskType v) noexcept { value = NativeOps::bit_and (value, toVecType (v.value)); return *this; }
  160. /** Bit-or the reciver with SIMDRegister v and store the result in the receiver. */
  161. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator|= (vMaskType v) noexcept { value = NativeOps::bit_or (value, toVecType (v.value)); return *this; }
  162. /** Bit-xor the reciver with SIMDRegister v and store the result in the receiver. */
  163. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator^= (vMaskType v) noexcept { value = NativeOps::bit_xor (value, toVecType (v.value)); return *this; }
  164. //==============================================================================
  165. /** Bit-and each element of the reciver with the scalar s and store the result in the receiver.*/
  166. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator&= (MaskType s) noexcept { value = NativeOps::bit_and (value, toVecType (s)); return *this; }
  167. /** Bit-or each element of the reciver with the scalar s and store the result in the receiver.*/
  168. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator|= (MaskType s) noexcept { value = NativeOps::bit_or (value, toVecType (s)); return *this; }
  169. /** Bit-xor each element of the reciver with the scalar s and store the result in the receiver.*/
  170. inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator^= (MaskType s) noexcept { value = NativeOps::bit_xor (value, toVecType (s)); return *this; }
  171. //==============================================================================
  172. /** Returns the sum of the receiver and v.*/
  173. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator+ (SIMDRegister v) const noexcept { return { NativeOps::add (value, v.value) }; }
  174. /** Returns the difference of the receiver and v.*/
  175. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator- (SIMDRegister v) const noexcept { return { NativeOps::sub (value, v.value) }; }
  176. /** Returns the product of the receiver and v.*/
  177. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator* (SIMDRegister v) const noexcept { return { CmplxOps::mul (value, v.value) }; }
  178. //==============================================================================
  179. /** Returns a vector where each element is the sum of the corresponding element in the receiver and the scalar s.*/
  180. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator+ (ElementType s) const noexcept { return { NativeOps::add (value, CmplxOps::expand (s)) }; }
  181. /** Returns a vector where each element is the difference of the corresponding element in the receiver and the scalar s.*/
  182. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator- (ElementType s) const noexcept { return { NativeOps::sub (value, CmplxOps::expand (s)) }; }
  183. /** Returns a vector where each element is the product of the corresponding element in the receiver and the scalar s.*/
  184. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator* (ElementType s) const noexcept { return { CmplxOps::mul (value, CmplxOps::expand (s)) }; }
  185. //==============================================================================
  186. /** Returns the bit-and of the receiver and v. */
  187. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator& (vMaskType v) const noexcept { return { NativeOps::bit_and (value, toVecType (v.value)) }; }
  188. /** Returns the bit-or of the receiver and v. */
  189. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator| (vMaskType v) const noexcept { return { NativeOps::bit_or (value, toVecType (v.value)) }; }
  190. /** Returns the bit-xor of the receiver and v. */
  191. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator^ (vMaskType v) const noexcept { return { NativeOps::bit_xor (value, toVecType (v.value)) }; }
  192. /** Returns a vector where each element is the bit-inverted value of the corresponding element in the receiver.*/
  193. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator~() const noexcept { return { NativeOps::bit_not (value) }; }
  194. //==============================================================================
  195. /** Returns a vector where each element is the bit-and'd value of the corresponding element in the receiver and the scalar s.*/
  196. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator& (MaskType s) const noexcept { return { NativeOps::bit_and (value, toVecType (s)) }; }
  197. /** Returns a vector where each element is the bit-or'd value of the corresponding element in the receiver and the scalar s.*/
  198. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator| (MaskType s) const noexcept { return { NativeOps::bit_or (value, toVecType (s)) }; }
  199. /** Returns a vector where each element is the bit-xor'd value of the corresponding element in the receiver and the scalar s.*/
  200. inline SIMDRegister JUCE_VECTOR_CALLTYPE operator^ (MaskType s) const noexcept { return { NativeOps::bit_xor (value, toVecType (s)) }; }
  201. //==============================================================================
  202. /** Returns true if all elements-wise comparisons return true. */
  203. inline bool JUCE_VECTOR_CALLTYPE operator== (SIMDRegister other) const noexcept { return NativeOps::allEqual (value, other.value); }
  204. /** Returns true if any elements-wise comparisons return false. */
  205. inline bool JUCE_VECTOR_CALLTYPE operator!= (SIMDRegister other) const noexcept { return ! (*this == other); }
  206. /** Returns true if all elements are equal to the scalar. */
  207. inline bool JUCE_VECTOR_CALLTYPE operator== (Type s) const noexcept { return *this == SIMDRegister::expand (s); }
  208. /** Returns true if any elements are not equal to the scalar. */
  209. inline bool JUCE_VECTOR_CALLTYPE operator!= (Type s) const noexcept { return ! (*this == s); }
  210. //==============================================================================
  211. /** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
  212. if the corresponding element of a is equal to the corresponding element of b, or zero otherwise.
  213. The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
  214. static inline vMaskType JUCE_VECTOR_CALLTYPE equal (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::equal (a.value, b.value)); }
  215. /** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
  216. if the corresponding element of a is not equal to the corresponding element of b, or zero otherwise.
  217. The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
  218. static inline vMaskType JUCE_VECTOR_CALLTYPE notEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::notEqual (a.value, b.value)); }
  219. /** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
  220. if the corresponding element of a is less than to the corresponding element of b, or zero otherwise.
  221. The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
  222. static inline vMaskType JUCE_VECTOR_CALLTYPE lessThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (b.value, a.value)); }
  223. /** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
  224. if the corresponding element of a is than or equal to the corresponding element of b, or zero otherwise.
  225. The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
  226. static inline vMaskType JUCE_VECTOR_CALLTYPE lessThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (b.value, a.value)); }
  227. /** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
  228. if the corresponding element of a is greater than to the corresponding element of b, or zero otherwise.
  229. The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
  230. static inline vMaskType JUCE_VECTOR_CALLTYPE greaterThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (a.value, b.value)); }
  231. /** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
  232. if the corresponding element of a is greater than or equal to the corresponding element of b, or zero otherwise.
  233. The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
  234. static inline vMaskType JUCE_VECTOR_CALLTYPE greaterThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (a.value, b.value)); }
  235. //==============================================================================
  236. /** Returns a new vector where each element is the minimum of the corresponding element of a and b. */
  237. static inline SIMDRegister JUCE_VECTOR_CALLTYPE min (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::min (a.value, b.value) }; }
  238. /** Returns a new vector where each element is the maximum of the corresponding element of a and b. */
  239. static inline SIMDRegister JUCE_VECTOR_CALLTYPE max (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::max (a.value, b.value) }; }
  240. //==============================================================================
  241. /** Multiplies b and c and adds the result to a. */
  242. static inline SIMDRegister JUCE_VECTOR_CALLTYPE multiplyAdd (SIMDRegister a, const SIMDRegister b, SIMDRegister c) noexcept
  243. {
  244. return { CmplxOps::muladd (a.value, b.value, c.value) };
  245. }
  246. //==============================================================================
  247. /** Returns a scalar which is the sum of all elements of the receiver. */
  248. inline ElementType sum() const noexcept { return CmplxOps::sum (value); }
  249. //==============================================================================
  250. /** Checks if the given pointer is suffeciently aligned for using SIMD operations. */
  251. static inline bool isSIMDAligned (const ElementType* ptr) noexcept
  252. {
  253. uintptr_t bitmask = SIMDRegisterSize - 1;
  254. return (reinterpret_cast<uintptr_t> (ptr) & bitmask) == 0;
  255. }
  256. /** Returns the next position in memory where isSIMDAligned returns true.
  257. If the current position in memory is already aligned then this method
  258. will simply return the pointer.
  259. */
  260. static inline ElementType* getNextSIMDAlignedPtr (ElementType* ptr) noexcept
  261. {
  262. return snapPointerToAlignment (ptr, SIMDRegisterSize);
  263. }
  264. #ifndef DOXYGEN
  265. static inline const ElementType* getNextSIMDAlignedPtr (const ElementType* ptr) noexcept
  266. {
  267. return snapPointerToAlignment (ptr, SIMDRegisterSize);
  268. }
  269. #endif
  270. private:
  271. static inline vMaskType JUCE_VECTOR_CALLTYPE toMaskType (vSIMDType a) noexcept
  272. {
  273. union
  274. {
  275. vSIMDType in;
  276. vMaskSIMDType out;
  277. } u;
  278. u.in = a;
  279. return vMaskType::fromNative (u.out);
  280. }
  281. static inline vSIMDType JUCE_VECTOR_CALLTYPE toVecType (vMaskSIMDType a) noexcept
  282. {
  283. union
  284. {
  285. vMaskSIMDType in;
  286. vSIMDType out;
  287. } u;
  288. u.in = a;
  289. return u.out;
  290. }
  291. static inline vSIMDType JUCE_VECTOR_CALLTYPE toVecType (MaskType a) noexcept
  292. {
  293. union
  294. {
  295. vMaskSIMDType in;
  296. vSIMDType out;
  297. } u;
  298. u.in = CmplxSIMDOps<MaskType>::expand (a);
  299. return u.out;
  300. }
  301. };
  302. } // namespace dsp
  303. } // namespace juce
  304. #ifndef DOXYGEN
  305. #include "juce_SIMDRegister_Impl.h"
  306. #endif