Collection of DPF-based plugins for packaging
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

379 lines
14KB

  1. /// @ref core
  2. /// @file glm/detail/func_integer.inl
  3. #include "../ext/vec1.hpp"
  4. #include "type_vec2.hpp"
  5. #include "type_vec3.hpp"
  6. #include "type_vec4.hpp"
  7. #include "type_int.hpp"
  8. #include "_vectorize.hpp"
  9. #if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC)
  10. # include <intrin.h>
  11. # pragma intrinsic(_BitScanReverse)
  12. #endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC)
  13. #include <limits>
  14. #if !GLM_HAS_EXTENDED_INTEGER_TYPE
  15. # if GLM_COMPILER & GLM_COMPILER_GCC
  16. # pragma GCC diagnostic ignored "-Wlong-long"
  17. # endif
  18. # if (GLM_COMPILER & GLM_COMPILER_CLANG)
  19. # pragma clang diagnostic ignored "-Wc++11-long-long"
  20. # endif
  21. #endif
  22. namespace glm{
  23. namespace detail
  24. {
  25. template<typename T>
  26. GLM_FUNC_QUALIFIER T mask(T Bits)
  27. {
  28. return Bits >= static_cast<T>(sizeof(T) * 8) ? ~static_cast<T>(0) : (static_cast<T>(1) << Bits) - static_cast<T>(1);
  29. }
  30. template<length_t L, typename T, qualifier Q, bool Aligned, bool EXEC>
  31. struct compute_bitfieldReverseStep
  32. {
  33. GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T, T)
  34. {
  35. return v;
  36. }
  37. };
  38. template<length_t L, typename T, qualifier Q, bool Aligned>
  39. struct compute_bitfieldReverseStep<L, T, Q, Aligned, true>
  40. {
  41. GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T Mask, T Shift)
  42. {
  43. return (v & Mask) << Shift | (v & (~Mask)) >> Shift;
  44. }
  45. };
  46. template<length_t L, typename T, qualifier Q, bool Aligned, bool EXEC>
  47. struct compute_bitfieldBitCountStep
  48. {
  49. GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T, T)
  50. {
  51. return v;
  52. }
  53. };
  54. template<length_t L, typename T, qualifier Q, bool Aligned>
  55. struct compute_bitfieldBitCountStep<L, T, Q, Aligned, true>
  56. {
  57. GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& v, T Mask, T Shift)
  58. {
  59. return (v & Mask) + ((v >> Shift) & Mask);
  60. }
  61. };
  62. template<typename genIUType, size_t Bits>
  63. struct compute_findLSB
  64. {
  65. GLM_FUNC_QUALIFIER static int call(genIUType Value)
  66. {
  67. if(Value == 0)
  68. return -1;
  69. return glm::bitCount(~Value & (Value - static_cast<genIUType>(1)));
  70. }
  71. };
  72. # if GLM_HAS_BITSCAN_WINDOWS
  73. template<typename genIUType>
  74. struct compute_findLSB<genIUType, 32>
  75. {
  76. GLM_FUNC_QUALIFIER static int call(genIUType Value)
  77. {
  78. unsigned long Result(0);
  79. unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast<unsigned long*>(&Value));
  80. return IsNotNull ? int(Result) : -1;
  81. }
  82. };
  83. # if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32))
  84. template<typename genIUType>
  85. struct compute_findLSB<genIUType, 64>
  86. {
  87. GLM_FUNC_QUALIFIER static int call(genIUType Value)
  88. {
  89. unsigned long Result(0);
  90. unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast<unsigned __int64*>(&Value));
  91. return IsNotNull ? int(Result) : -1;
  92. }
  93. };
  94. # endif
  95. # endif//GLM_HAS_BITSCAN_WINDOWS
  96. template<length_t L, typename T, qualifier Q, bool EXEC = true>
  97. struct compute_findMSB_step_vec
  98. {
  99. GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, T Shift)
  100. {
  101. return x | (x >> Shift);
  102. }
  103. };
  104. template<length_t L, typename T, qualifier Q>
  105. struct compute_findMSB_step_vec<L, T, Q, false>
  106. {
  107. GLM_FUNC_QUALIFIER static vec<L, T, Q> call(vec<L, T, Q> const& x, T)
  108. {
  109. return x;
  110. }
  111. };
  112. template<length_t L, typename T, qualifier Q, int>
  113. struct compute_findMSB_vec
  114. {
  115. GLM_FUNC_QUALIFIER static vec<L, int, Q> call(vec<L, T, Q> const& v)
  116. {
  117. vec<L, T, Q> x(v);
  118. x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 1));
  119. x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 2));
  120. x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 8>::call(x, static_cast<T>( 4));
  121. x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 16>::call(x, static_cast<T>( 8));
  122. x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 32>::call(x, static_cast<T>(16));
  123. x = compute_findMSB_step_vec<L, T, Q, sizeof(T) * 8 >= 64>::call(x, static_cast<T>(32));
  124. return vec<L, int, Q>(sizeof(T) * 8 - 1) - glm::bitCount(~x);
  125. }
  126. };
  127. # if GLM_HAS_BITSCAN_WINDOWS
  128. template<typename genIUType>
  129. GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value)
  130. {
  131. unsigned long Result(0);
  132. unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast<unsigned long*>(&Value));
  133. return IsNotNull ? int(Result) : -1;
  134. }
  135. template<length_t L, typename T, qualifier Q>
  136. struct compute_findMSB_vec<L, T, Q, 32>
  137. {
  138. GLM_FUNC_QUALIFIER static vec<L, int, Q> call(vec<L, T, Q> const& x)
  139. {
  140. return detail::functor1<L, int, T, Q>::call(compute_findMSB_32, x);
  141. }
  142. };
  143. # if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32))
  144. template<typename genIUType>
  145. GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value)
  146. {
  147. unsigned long Result(0);
  148. unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast<unsigned __int64*>(&Value));
  149. return IsNotNull ? int(Result) : -1;
  150. }
  151. template<length_t L, typename T, qualifier Q>
  152. struct compute_findMSB_vec<L, T, Q, 64>
  153. {
  154. GLM_FUNC_QUALIFIER static vec<L, int, Q> call(vec<L, T, Q> const& x)
  155. {
  156. return detail::functor1<L, int, T, Q>::call(compute_findMSB_64, x);
  157. }
  158. };
  159. # endif
  160. # endif//GLM_HAS_BITSCAN_WINDOWS
  161. }//namespace detail
  162. // uaddCarry
  163. GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry)
  164. {
  165. uint64 const Value64(static_cast<uint64>(x) + static_cast<uint64>(y));
  166. uint64 const Max32((static_cast<uint64>(1) << static_cast<uint64>(32)) - static_cast<uint64>(1));
  167. Carry = Value64 > Max32 ? 1u : 0u;
  168. return static_cast<uint32>(Value64 % (Max32 + static_cast<uint64>(1)));
  169. }
  170. template<length_t L, qualifier Q>
  171. GLM_FUNC_QUALIFIER vec<L, uint, Q> uaddCarry(vec<L, uint, Q> const& x, vec<L, uint, Q> const& y, vec<L, uint, Q>& Carry)
  172. {
  173. vec<L, uint64, Q> Value64(vec<L, uint64, Q>(x) + vec<L, uint64, Q>(y));
  174. vec<L, uint64, Q> Max32((static_cast<uint64>(1) << static_cast<uint64>(32)) - static_cast<uint64>(1));
  175. Carry = mix(vec<L, uint32, Q>(0), vec<L, uint32, Q>(1), greaterThan(Value64, Max32));
  176. return vec<L, uint32, Q>(Value64 % (Max32 + static_cast<uint64>(1)));
  177. }
  178. // usubBorrow
  179. GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow)
  180. {
  181. GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch");
  182. Borrow = x >= y ? static_cast<uint32>(0) : static_cast<uint32>(1);
  183. if(y >= x)
  184. return y - x;
  185. else
  186. return static_cast<uint32>((static_cast<int64>(1) << static_cast<int64>(32)) + (static_cast<int64>(y) - static_cast<int64>(x)));
  187. }
  188. template<length_t L, qualifier Q>
  189. GLM_FUNC_QUALIFIER vec<L, uint, Q> usubBorrow(vec<L, uint, Q> const& x, vec<L, uint, Q> const& y, vec<L, uint, Q>& Borrow)
  190. {
  191. Borrow = mix(vec<L, uint, Q>(1), vec<L, uint, Q>(0), greaterThanEqual(x, y));
  192. vec<L, uint, Q> const YgeX(y - x);
  193. vec<L, uint, Q> const XgeY(vec<L, uint32, Q>((static_cast<int64>(1) << static_cast<int64>(32)) + (vec<L, int64, Q>(y) - vec<L, int64, Q>(x))));
  194. return mix(XgeY, YgeX, greaterThanEqual(y, x));
  195. }
  196. // umulExtended
  197. GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb)
  198. {
  199. GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch");
  200. uint64 Value64 = static_cast<uint64>(x) * static_cast<uint64>(y);
  201. msb = static_cast<uint>(Value64 >> static_cast<uint64>(32));
  202. lsb = static_cast<uint>(Value64);
  203. }
  204. template<length_t L, qualifier Q>
  205. GLM_FUNC_QUALIFIER void umulExtended(vec<L, uint, Q> const& x, vec<L, uint, Q> const& y, vec<L, uint, Q>& msb, vec<L, uint, Q>& lsb)
  206. {
  207. GLM_STATIC_ASSERT(sizeof(uint) == sizeof(uint32), "uint and uint32 size mismatch");
  208. vec<L, uint64, Q> Value64(vec<L, uint64, Q>(x) * vec<L, uint64, Q>(y));
  209. msb = vec<L, uint32, Q>(Value64 >> static_cast<uint64>(32));
  210. lsb = vec<L, uint32, Q>(Value64);
  211. }
  212. // imulExtended
  213. GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb)
  214. {
  215. GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch");
  216. int64 Value64 = static_cast<int64>(x) * static_cast<int64>(y);
  217. msb = static_cast<int>(Value64 >> static_cast<int64>(32));
  218. lsb = static_cast<int>(Value64);
  219. }
  220. template<length_t L, qualifier Q>
  221. GLM_FUNC_QUALIFIER void imulExtended(vec<L, int, Q> const& x, vec<L, int, Q> const& y, vec<L, int, Q>& msb, vec<L, int, Q>& lsb)
  222. {
  223. GLM_STATIC_ASSERT(sizeof(int) == sizeof(int32), "int and int32 size mismatch");
  224. vec<L, int64, Q> Value64(vec<L, int64, Q>(x) * vec<L, int64, Q>(y));
  225. lsb = vec<L, int32, Q>(Value64 & static_cast<int64>(0xFFFFFFFF));
  226. msb = vec<L, int32, Q>((Value64 >> static_cast<int64>(32)) & static_cast<int64>(0xFFFFFFFF));
  227. }
  228. // bitfieldExtract
  229. template<typename genIUType>
  230. GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits)
  231. {
  232. return bitfieldExtract(vec<1, genIUType>(Value), Offset, Bits).x;
  233. }
  234. template<length_t L, typename T, qualifier Q>
  235. GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldExtract(vec<L, T, Q> const& Value, int Offset, int Bits)
  236. {
  237. GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldExtract' only accept integer inputs");
  238. return (Value >> static_cast<T>(Offset)) & static_cast<T>(detail::mask(Bits));
  239. }
  240. // bitfieldInsert
  241. template<typename genIUType>
  242. GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const& Base, genIUType const& Insert, int Offset, int Bits)
  243. {
  244. return bitfieldInsert(vec<1, genIUType>(Base), vec<1, genIUType>(Insert), Offset, Bits).x;
  245. }
  246. template<length_t L, typename T, qualifier Q>
  247. GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldInsert(vec<L, T, Q> const& Base, vec<L, T, Q> const& Insert, int Offset, int Bits)
  248. {
  249. GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'bitfieldInsert' only accept integer values");
  250. T const Mask = static_cast<T>(detail::mask(Bits) << Offset);
  251. return (Base & ~Mask) | (Insert & Mask);
  252. }
  253. // bitfieldReverse
  254. template<typename genType>
  255. GLM_FUNC_QUALIFIER genType bitfieldReverse(genType x)
  256. {
  257. return bitfieldReverse(glm::vec<1, genType, glm::defaultp>(x)).x;
  258. }
  259. template<length_t L, typename T, qualifier Q>
  260. GLM_FUNC_QUALIFIER vec<L, T, Q> bitfieldReverse(vec<L, T, Q> const& v)
  261. {
  262. vec<L, T, Q> x(v);
  263. x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 2>::call(x, static_cast<T>(0x5555555555555555ull), static_cast<T>( 1));
  264. x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 4>::call(x, static_cast<T>(0x3333333333333333ull), static_cast<T>( 2));
  265. x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 8>::call(x, static_cast<T>(0x0F0F0F0F0F0F0F0Full), static_cast<T>( 4));
  266. x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 16>::call(x, static_cast<T>(0x00FF00FF00FF00FFull), static_cast<T>( 8));
  267. x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 32>::call(x, static_cast<T>(0x0000FFFF0000FFFFull), static_cast<T>(16));
  268. x = detail::compute_bitfieldReverseStep<L, T, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 64>::call(x, static_cast<T>(0x00000000FFFFFFFFull), static_cast<T>(32));
  269. return x;
  270. }
  271. // bitCount
  272. template<typename genType>
  273. GLM_FUNC_QUALIFIER int bitCount(genType x)
  274. {
  275. return bitCount(glm::vec<1, genType, glm::defaultp>(x)).x;
  276. }
  277. template<length_t L, typename T, qualifier Q>
  278. GLM_FUNC_QUALIFIER vec<L, int, Q> bitCount(vec<L, T, Q> const& v)
  279. {
  280. # if GLM_COMPILER & GLM_COMPILER_VC
  281. # pragma warning(push)
  282. # pragma warning(disable : 4310) //cast truncates constant value
  283. # endif
  284. vec<L, typename detail::make_unsigned<T>::type, Q> x(*reinterpret_cast<vec<L, typename detail::make_unsigned<T>::type, Q> const *>(&v));
  285. x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned<T>::type(0x5555555555555555ull), typename detail::make_unsigned<T>::type( 1));
  286. x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned<T>::type(0x3333333333333333ull), typename detail::make_unsigned<T>::type( 2));
  287. x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned<T>::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned<T>::type( 4));
  288. x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned<T>::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned<T>::type( 8));
  289. x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned<T>::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned<T>::type(16));
  290. x = detail::compute_bitfieldBitCountStep<L, typename detail::make_unsigned<T>::type, Q, detail::is_aligned<Q>::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned<T>::type(0x00000000FFFFFFFFull), typename detail::make_unsigned<T>::type(32));
  291. return vec<L, int, Q>(x);
  292. # if GLM_COMPILER & GLM_COMPILER_VC
  293. # pragma warning(pop)
  294. # endif
  295. }
  296. // findLSB
  297. template<typename genIUType>
  298. GLM_FUNC_QUALIFIER int findLSB(genIUType Value)
  299. {
  300. GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findLSB' only accept integer values");
  301. return detail::compute_findLSB<genIUType, sizeof(genIUType) * 8>::call(Value);
  302. }
  303. template<length_t L, typename T, qualifier Q>
  304. GLM_FUNC_QUALIFIER vec<L, int, Q> findLSB(vec<L, T, Q> const& x)
  305. {
  306. GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findLSB' only accept integer values");
  307. return detail::functor1<L, int, T, Q>::call(findLSB, x);
  308. }
  309. // findMSB
  310. template<typename genIUType>
  311. GLM_FUNC_QUALIFIER int findMSB(genIUType v)
  312. {
  313. GLM_STATIC_ASSERT(std::numeric_limits<genIUType>::is_integer, "'findMSB' only accept integer values");
  314. return findMSB(vec<1, genIUType>(v)).x;
  315. }
  316. template<length_t L, typename T, qualifier Q>
  317. GLM_FUNC_QUALIFIER vec<L, int, Q> findMSB(vec<L, T, Q> const& v)
  318. {
  319. GLM_STATIC_ASSERT(std::numeric_limits<T>::is_integer, "'findMSB' only accept integer values");
  320. return detail::compute_findMSB_vec<L, T, Q, sizeof(T) * 8>::call(v);
  321. }
  322. }//namespace glm
  323. #if GLM_ARCH != GLM_ARCH_PURE && GLM_HAS_UNRESTRICTED_UNIONS
  324. # include "func_integer_simd.inl"
  325. #endif