diff --git a/glm/detail/func_matrix.inl b/glm/detail/func_matrix.inl index 8328b9eb..1493ae02 100644 --- a/glm/detail/func_matrix.inl +++ b/glm/detail/func_matrix.inl @@ -306,5 +306,11 @@ namespace detail GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'inverse' only accept floating-point inputs"); return detail::compute_inverse(m); } - }//namespace glm + +#if GLM_HAS_ANONYMOUS_UNION && GLM_NOT_BUGGY_VC32BITS +#if GLM_ARCH & GLM_ARCH_SSE2 +# include "func_matrix_sse2.inl" +#endif +#endif// + diff --git a/glm/detail/func_matrix_sse2.inl b/glm/detail/func_matrix_sse2.inl new file mode 100644 index 00000000..c0d5f395 --- /dev/null +++ b/glm/detail/func_matrix_sse2.inl @@ -0,0 +1,237 @@ +#include "type_mat4x4.hpp" + +namespace glm +{ + GLM_FUNC_QUALIFIER __m128 sse_dot_ps(__m128 v1, __m128 v2) + { + __m128 mul0 = _mm_mul_ps(v1, v2); + __m128 swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); + __m128 add0 = _mm_add_ps(mul0, swp0); + __m128 swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); + __m128 add1 = _mm_add_ps(add0, swp1); + return add1; + } + + template <> + GLM_FUNC_QUALIFIER tmat4x4 inverse(tmat4x4 const& m) + { + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = sse_dot_ps(m[0].data, Row2); + __m128 Rcp0 = _mm_rcp_ps(Det0); + //__m128 Rcp0 = _mm_div_ps(one, Det0); + // Inverse /= Determinant; + + tmat4x4 Result(uninitialize); + Result[0].data = _mm_mul_ps(Inv0, Rcp0); + Result[1].data = _mm_mul_ps(Inv1, Rcp0); + Result[2].data = _mm_mul_ps(Inv2, Rcp0); + Result[3].data = _mm_mul_ps(Inv3, Rcp0); + return Result; + } +}//namespace glm diff --git a/glm/detail/intrinsic_matrix.inl b/glm/detail/intrinsic_matrix.inl index 380ffe4a..491b58b0 100644 --- a/glm/detail/intrinsic_matrix.inl +++ b/glm/detail/intrinsic_matrix.inl @@ -608,7 +608,7 @@ GLM_FUNC_QUALIFIER void sse_inverse_ps(__m128 const in[4], __m128 out[4]) __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); Fac2 = _mm_sub_ps(Mul00, Mul01); - } + } __m128 Fac3; { diff --git a/glm/detail/setup.hpp b/glm/detail/setup.hpp index e599e177..1b3ccd4b 100644 --- a/glm/detail/setup.hpp +++ b/glm/detail/setup.hpp @@ -942,6 +942,11 @@ # define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef __declspec(align(alignment)) type name # define GLM_RESTRICT_FUNC __declspec(restrict) # define GLM_RESTRICT __restrict +# if GLM_COMPILER >= GLM_COMPILER_VC2013 +# define GLM_VECTOR_CALL __vectorcall +# else +# define GLM_VECTOR_CALL +# endif #elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_APPLE_CLANG | GLM_COMPILER_LLVM | GLM_COMPILER_CUDA | GLM_COMPILER_INTEL) # define GLM_DEPRECATED __attribute__((__deprecated__)) # define GLM_ALIGN(x) __attribute__((aligned(x))) @@ -949,6 +954,15 @@ # define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __attribute__((aligned(alignment))) # define GLM_RESTRICT_FUNC __restrict__ # define GLM_RESTRICT __restrict__ +# ifdef GLM_COMPILER & GLM_COMPILER_LLVM +# if GLM_COMPILER >= GLM_COMPILER_LLVM37 +# define GLM_VECTOR_CALL __vectorcall +# else +# define GLM_VECTOR_CALL +# endif +# else +# define GLM_VECTOR_CALL +# endif #else # define GLM_DEPRECATED # define GLM_ALIGN @@ -956,6 +970,7 @@ # define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name # define GLM_RESTRICT_FUNC # define GLM_RESTRICT +# define GLM_VECTOR_CALL #endif//GLM_COMPILER #if GLM_HAS_DEFAULTED_FUNCTIONS diff --git a/glm/detail/type_mat4x4.inl b/glm/detail/type_mat4x4.inl index bd80a126..e1771dd4 100644 --- a/glm/detail/type_mat4x4.inl +++ b/glm/detail/type_mat4x4.inl @@ -758,3 +758,9 @@ namespace detail return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); } }//namespace glm + +#if GLM_HAS_ANONYMOUS_UNION && GLM_NOT_BUGGY_VC32BITS +#if GLM_ARCH & GLM_ARCH_SSE2 +# include "type_mat4x4_sse2.inl" +#endif +#endif// diff --git a/glm/detail/type_mat4x4_sse2.inl b/glm/detail/type_mat4x4_sse2.inl new file mode 100644 index 00000000..09d0b1f1 --- /dev/null +++ b/glm/detail/type_mat4x4_sse2.inl @@ -0,0 +1,7 @@ +/// @ref core +/// @file glm/detail/type_mat4x4_sse2.inl + +namespace glm +{ + +}//namespace glm diff --git a/glm/detail/type_vec4.hpp b/glm/detail/type_vec4.hpp index 624bcf8f..98862759 100644 --- a/glm/detail/type_vec4.hpp +++ b/glm/detail/type_vec4.hpp @@ -259,10 +259,10 @@ namespace detail // -- Unary arithmetic operators -- - GLM_FUNC_DECL tvec4 & operator=(tvec4 const & v) GLM_DEFAULT; + GLM_FUNC_DECL tvec4 & __vectorcall operator=(tvec4 const & v) GLM_DEFAULT; template - GLM_FUNC_DECL tvec4 & operator=(tvec4 const & v); + GLM_FUNC_DECL tvec4 & __vectorcall operator=(tvec4 const & v); template GLM_FUNC_DECL tvec4 & operator+=(U scalar); template @@ -358,7 +358,7 @@ namespace detail GLM_FUNC_DECL tvec4 operator+(tvec1 const & v1, tvec4 const & v2); template - GLM_FUNC_DECL tvec4 operator+(tvec4 const & v1, tvec4 const & v2); + GLM_FUNC_DECL tvec4 __vectorcall operator+(tvec4 const & v1, tvec4 const & v2); template GLM_FUNC_DECL tvec4 operator-(tvec4 const & v, T scalar); @@ -388,7 +388,7 @@ namespace detail GLM_FUNC_DECL tvec4 operator*(tvec1 const & v1, tvec4 const & v2); template - GLM_FUNC_DECL tvec4 operator*(tvec4 const & v1, tvec4 const & v2); + GLM_FUNC_DECL tvec4 __vectorcall operator*(tvec4 const & v1, tvec4 const & v2); template GLM_FUNC_DECL tvec4 operator/(tvec4 const & v, T scalar); diff --git a/glm/detail/type_vec4.inl b/glm/detail/type_vec4.inl index 625c515c..1d36a363 100644 --- a/glm/detail/type_vec4.inl +++ b/glm/detail/type_vec4.inl @@ -228,7 +228,7 @@ namespace glm # if !GLM_HAS_DEFAULTED_FUNCTIONS template - GLM_FUNC_QUALIFIER tvec4 & tvec4::operator=(tvec4 const & v) + GLM_FUNC_QUALIFIER tvec4 & __vectorcall tvec4::operator=(tvec4 const & v) { this->x = v.x; this->y = v.y; @@ -240,7 +240,7 @@ namespace glm template template - GLM_FUNC_QUALIFIER tvec4 & tvec4::operator=(tvec4 const & v) + GLM_FUNC_QUALIFIER tvec4 & __vectorcall tvec4::operator=(tvec4 const & v) { this->x = static_cast(v.x); this->y = static_cast(v.y); @@ -682,7 +682,7 @@ namespace glm } template - GLM_FUNC_QUALIFIER tvec4 operator+(tvec4 const & v1, tvec4 const & v2) + GLM_FUNC_QUALIFIER tvec4 __vectorcall operator+(tvec4 const & v1, tvec4 const & v2) { return tvec4( v1.x + v2.x, @@ -782,7 +782,7 @@ namespace glm } template - GLM_FUNC_QUALIFIER tvec4 operator*(tvec4 const & v1, tvec4 const & v2) + GLM_FUNC_QUALIFIER tvec4 __vectorcall operator*(tvec4 const & v1, tvec4 const & v2) { return tvec4( v1.x * v2.x, @@ -1181,13 +1181,11 @@ namespace glm }//namespace glm #if GLM_HAS_ANONYMOUS_UNION && GLM_NOT_BUGGY_VC32BITS -#if GLM_ARCH & GLM_ARCH_SSE2 -# include "type_vec4_sse2.inl" -#endif -#if GLM_ARCH & GLM_ARCH_AVX -# include "type_vec4_avx.inl" -#endif -#if GLM_ARCH & GLM_ARCH_AVX2 -# include "type_vec4_avx2.inl" -#endif -#endif// +# if GLM_ARCH & GLM_ARCH_AVX2 +# include "type_vec4_avx2.inl" +# elif GLM_ARCH & GLM_ARCH_AVX +# include "type_vec4_avx.inl" +# elif GLM_ARCH & GLM_ARCH_SSE2 +# include "type_vec4_sse2.inl" +# endif +#endif//GLM_HAS_ANONYMOUS_UNION && GLM_NOT_BUGGY_VC32BITS diff --git a/glm/detail/type_vec4_sse2.inl b/glm/detail/type_vec4_sse2.inl index 968e9fed..88dd4ed1 100644 --- a/glm/detail/type_vec4_sse2.inl +++ b/glm/detail/type_vec4_sse2.inl @@ -30,8 +30,8 @@ /// @author Christophe Riccio /////////////////////////////////////////////////////////////////////////////////// -namespace glm{ - +namespace glm +{ # if !GLM_HAS_DEFAULTED_FUNCTIONS template <> GLM_FUNC_QUALIFIER tvec4::tvec4() @@ -74,4 +74,20 @@ namespace glm{ this->data = _mm_add_ps(this->data, _mm_set_ps1(static_cast(v.x))); return *this; } + + template <> + GLM_FUNC_QUALIFIER tvec4 __vectorcall operator+(tvec4 const & v1, tvec4 const & v2) + { + tvec4 Result(uninitialize); + Result.data = _mm_add_ps(v1.data, v2.data); + return Result; + } + + template <> + GLM_FUNC_QUALIFIER tvec4 __vectorcall operator*(tvec4 const & v1, tvec4 const & v2) + { + tvec4 Result(uninitialize); + Result.data = _mm_mul_ps(v1.data, v2.data); + return Result; + } }//namespace glm diff --git a/test/core/core_func_matrix.cpp b/test/core/core_func_matrix.cpp index 80562e0f..db09e72d 100644 --- a/test/core/core_func_matrix.cpp +++ b/test/core/core_func_matrix.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -105,14 +106,14 @@ int test_outerProduct() { glm::mat3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec3(1.0f)); } { glm::mat4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec4(1.0f)); } - { glm::mat2x3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec2(1.0f)); } - { glm::mat2x4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec2(1.0f)); } + { glm::mat2x3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec2(1.0f)); } + { glm::mat2x4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec2(1.0f)); } - { glm::mat3x2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec3(1.0f)); } - { glm::mat3x4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec3(1.0f)); } + { glm::mat3x2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec3(1.0f)); } + { glm::mat3x4 m = glm::outerProduct(glm::vec4(1.0f), glm::vec3(1.0f)); } - { glm::mat4x2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec4(1.0f)); } - { glm::mat4x3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec4(1.0f)); } + { glm::mat4x2 m = glm::outerProduct(glm::vec2(1.0f), glm::vec4(1.0f)); } + { glm::mat4x3 m = glm::outerProduct(glm::vec3(1.0f), glm::vec4(1.0f)); } return 0; } @@ -213,7 +214,27 @@ int test_inverse() glm::mat2x2 I2x2 = A2x2 * B2x2; Failed += I2x2 == glm::mat2x2(1) ? 0 : 1; + return Failed; +} +int test_inverse_simd() +{ + int Failed(0); + + glm::tmat4x4 const Identity(1); + + glm::tmat4x4 const A4x4( + glm::tvec4(1, 0, 1, 0), + glm::tvec4(0, 1, 0, 0), + glm::tvec4(0, 0, 1, 0), + glm::tvec4(0, 0, 0, 1)); + glm::tmat4x4 const B4x4 = glm::inverse(A4x4); + glm::tmat4x4 const I4x4 = A4x4 * B4x4; + + Failed += glm::all(glm::epsilonEqual(I4x4[0], Identity[0], 0.001f)) ? 0 : 1; + Failed += glm::all(glm::epsilonEqual(I4x4[1], Identity[1], 0.001f)) ? 0 : 1; + Failed += glm::all(glm::epsilonEqual(I4x4[2], Identity[2], 0.001f)) ? 0 : 1; + Failed += glm::all(glm::epsilonEqual(I4x4[3], Identity[3], 0.001f)) ? 0 : 1; return Failed; } @@ -271,6 +292,7 @@ int main() Error += test_transpose(); Error += test_determinant(); Error += test_inverse(); + Error += test_inverse_simd(); # ifdef NDEBUG std::size_t const Samples(1000); diff --git a/test/core/core_type_vec4.cpp b/test/core/core_type_vec4.cpp index 33ea274c..edaf051f 100644 --- a/test/core/core_type_vec4.cpp +++ b/test/core/core_type_vec4.cpp @@ -59,7 +59,6 @@ enum comp // return _mm_shuffle_ps(Src, Src, mask<(int(W) << 6) | (int(Z) << 4) | (int(Y) << 2) | (int(X) << 0)>::value); //} - int test_vec4_ctor() { int Error = 0; @@ -485,6 +484,21 @@ namespace heap } }//namespace heap +int test_vec4_simd() +{ + int Error = 0; + + glm::tvec4 a(std::clock(), std::clock(), std::clock(), std::clock()); + glm::tvec4 b(std::clock(), std::clock(), std::clock(), std::clock()); + + glm::tvec4 c(b * a); + glm::tvec4 d(a + c); + + Error += glm::all(glm::greaterThan(d, glm::tvec4(0))) ? 0 : 1; + + return Error; +} + int main() { int Error(0); @@ -503,6 +517,7 @@ int main() Error += test_vec4_size(); Error += test_vec4_operators(); Error += test_vec4_swizzle_partial(); + Error += test_vec4_simd(); Error += test_operator_increment(); Error += heap::test();