Clean up inverse SSE code

This commit is contained in:
Christophe Riccio 2016-05-01 03:42:47 +02:00
parent be8d6c9ccc
commit 5f05a5e953
8 changed files with 145 additions and 108 deletions

View File

@ -199,3 +199,11 @@ namespace detail
return (eta * I - (eta * dotValue + std::sqrt(k)) * N) * static_cast<T>(k >= static_cast<T>(0));
}
}//namespace glm
#if GLM_HAS_ANONYMOUS_UNION && GLM_NOT_BUGGY_VC32BITS
# if GLM_ARCH & GLM_ARCH_AVX
# include "func_geometric_avx.inl"
# elif GLM_ARCH & GLM_ARCH_SSE2
# include "func_geometric_sse2.inl"
# endif
#endif//

View File

View File

@ -0,0 +1,38 @@
namespace glm{
namespace detail
{
GLM_FUNC_QUALIFIER __m128 dot_ps(__m128 v1, __m128 v2)
{
__m128 mul0 = _mm_mul_ps(v1, v2);
__m128 swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1));
__m128 add0 = _mm_add_ps(mul0, swp0);
__m128 swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3));
__m128 add1 = _mm_add_ps(add0, swp1);
return add1;
}
GLM_FUNC_QUALIFIER __m128 dot_ss(__m128 v1, __m128 v2)
{
__m128 mul0 = _mm_mul_ps(v1, v2);
__m128 mov0 = _mm_movehl_ps(mul0, mul0);
__m128 add0 = _mm_add_ps(mov0, mul0);
__m128 swp1 = _mm_shuffle_ps(add0, add0, 1);
__m128 add1 = _mm_add_ss(add0, swp1);
return add1;
}
template <>
struct compute_dot<tvec4, float, simd>
{
GLM_FUNC_QUALIFIER static float call(tvec4<float, simd> const& x, tvec4<float, simd> const& y)
{
__m128 const dot0 = dot_ss(x.data, y.data);
float Result = 0;
_mm_store_ss(&Result, dot0);
return Result;
}
};
}//namespace detail
}//namespace glm

View File

@ -309,8 +309,8 @@ namespace detail
}//namespace glm
#if GLM_HAS_ANONYMOUS_UNION && GLM_NOT_BUGGY_VC32BITS
#if GLM_ARCH & GLM_ARCH_SSE2
# include "func_matrix_sse2.inl"
#endif
# if GLM_ARCH & GLM_ARCH_SSE2
# include "func_matrix_sse2.inl"
# endif
#endif//

View File

@ -1,17 +1,8 @@
#include "type_mat4x4.hpp"
#include "func_geometric.hpp"
namespace glm
{
GLM_FUNC_QUALIFIER __m128 sse_dot_ps(__m128 v1, __m128 v2)
{
__m128 mul0 = _mm_mul_ps(v1, v2);
__m128 swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1));
__m128 add0 = _mm_add_ps(mul0, swp0);
__m128 swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3));
__m128 add1 = _mm_add_ps(add0, swp1);
return add1;
}
template <>
GLM_FUNC_QUALIFIER tmat4x4<float, simd> inverse(tmat4x4<float, simd> const& m)
{
@ -22,16 +13,16 @@ namespace glm
// valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
// valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
__m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
__m128 const Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 const Mul01 = _mm_mul_ps(Swp02, Swp03);
Fac0 = _mm_sub_ps(Mul00, Mul01);
}
@ -42,16 +33,16 @@ namespace glm
// valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
// valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
__m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
__m128 const Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 const Mul01 = _mm_mul_ps(Swp02, Swp03);
Fac1 = _mm_sub_ps(Mul00, Mul01);
}
@ -62,16 +53,16 @@ namespace glm
// valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
// valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
__m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
__m128 const Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 const Mul01 = _mm_mul_ps(Swp02, Swp03);
Fac2 = _mm_sub_ps(Mul00, Mul01);
}
@ -82,16 +73,16 @@ namespace glm
// valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
// valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
__m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
__m128 const Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 const Mul01 = _mm_mul_ps(Swp02, Swp03);
Fac3 = _mm_sub_ps(Mul00, Mul01);
}
@ -102,16 +93,16 @@ namespace glm
// valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
// valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
__m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
__m128 const Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 const Mul01 = _mm_mul_ps(Swp02, Swp03);
Fac4 = _mm_sub_ps(Mul00, Mul01);
}
@ -122,108 +113,108 @@ namespace glm
// valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
// valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
__m128 Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Swp0a = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Swp0b = _mm_shuffle_ps(m[3].data, m[2].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Swp00 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
__m128 const Swp03 = _mm_shuffle_ps(m[2].data, m[1].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
__m128 const Mul00 = _mm_mul_ps(Swp00, Swp01);
__m128 const Mul01 = _mm_mul_ps(Swp02, Swp03);
Fac5 = _mm_sub_ps(Mul00, Mul01);
}
__m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
__m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
__m128 const SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
__m128 const SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
// m[1][0]
// m[0][0]
// m[0][0]
// m[0][0]
__m128 Temp0 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
__m128 const Temp0 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
// m[1][1]
// m[0][1]
// m[0][1]
// m[0][1]
__m128 Temp1 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
__m128 const Temp1 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(1, 1, 1, 1));
__m128 const Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
// m[1][2]
// m[0][2]
// m[0][2]
// m[0][2]
__m128 Temp2 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
__m128 const Temp2 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(2, 2, 2, 2));
__m128 const Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
// m[1][3]
// m[0][3]
// m[0][3]
// m[0][3]
__m128 Temp3 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
__m128 const Temp3 = _mm_shuffle_ps(m[1].data, m[0].data, _MM_SHUFFLE(3, 3, 3, 3));
__m128 const Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
// col0
// + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
// - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
// + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
// - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
__m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
__m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
__m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
__m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
__m128 Add00 = _mm_add_ps(Sub00, Mul02);
__m128 Inv0 = _mm_mul_ps(SignB, Add00);
__m128 const Mul00 = _mm_mul_ps(Vec1, Fac0);
__m128 const Mul01 = _mm_mul_ps(Vec2, Fac1);
__m128 const Mul02 = _mm_mul_ps(Vec3, Fac2);
__m128 const Sub00 = _mm_sub_ps(Mul00, Mul01);
__m128 const Add00 = _mm_add_ps(Sub00, Mul02);
__m128 const Inv0 = _mm_mul_ps(SignB, Add00);
// col1
// - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
// + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
// - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
// + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
__m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
__m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
__m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
__m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
__m128 Add01 = _mm_add_ps(Sub01, Mul05);
__m128 Inv1 = _mm_mul_ps(SignA, Add01);
__m128 const Mul03 = _mm_mul_ps(Vec0, Fac0);
__m128 const Mul04 = _mm_mul_ps(Vec2, Fac3);
__m128 const Mul05 = _mm_mul_ps(Vec3, Fac4);
__m128 const Sub01 = _mm_sub_ps(Mul03, Mul04);
__m128 const Add01 = _mm_add_ps(Sub01, Mul05);
__m128 const Inv1 = _mm_mul_ps(SignA, Add01);
// col2
// + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
// - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
// + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
// - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
__m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
__m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
__m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
__m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
__m128 Add02 = _mm_add_ps(Sub02, Mul08);
__m128 Inv2 = _mm_mul_ps(SignB, Add02);
__m128 const Mul06 = _mm_mul_ps(Vec0, Fac1);
__m128 const Mul07 = _mm_mul_ps(Vec1, Fac3);
__m128 const Mul08 = _mm_mul_ps(Vec3, Fac5);
__m128 const Sub02 = _mm_sub_ps(Mul06, Mul07);
__m128 const Add02 = _mm_add_ps(Sub02, Mul08);
__m128 const Inv2 = _mm_mul_ps(SignB, Add02);
// col3
// - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
// + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
// - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
// + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
__m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
__m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
__m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
__m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
__m128 Add03 = _mm_add_ps(Sub03, Mul11);
__m128 Inv3 = _mm_mul_ps(SignA, Add03);
__m128 const Mul09 = _mm_mul_ps(Vec0, Fac2);
__m128 const Mul10 = _mm_mul_ps(Vec1, Fac4);
__m128 const Mul11 = _mm_mul_ps(Vec2, Fac5);
__m128 const Sub03 = _mm_sub_ps(Mul09, Mul10);
__m128 const Add03 = _mm_add_ps(Sub03, Mul11);
__m128 const Inv3 = _mm_mul_ps(SignA, Add03);
__m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
__m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
__m128 const Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
__m128 const Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
// valType Determinant = m[0][0] * Inverse[0][0]
// + m[0][1] * Inverse[1][0]
// + m[0][2] * Inverse[2][0]
// + m[0][3] * Inverse[3][0];
__m128 Det0 = sse_dot_ps(m[0].data, Row2);
__m128 Rcp0 = _mm_rcp_ps(Det0);
__m128 const Det0 = detail::dot_ps(m[0].data, Row2);
__m128 const Rcp0 = _mm_rcp_ps(Det0);
//__m128 Rcp0 = _mm_div_ps(one, Det0);
// Inverse /= Determinant;

View File

@ -153,9 +153,9 @@ namespace detail
// -- Implicit basic constructors --
GLM_FUNC_DECL tvec4() GLM_DEFAULT_CTOR;
GLM_FUNC_DECL tvec4(tvec4<T, P> const & v) GLM_DEFAULT;
GLM_FUNC_DECL tvec4(tvec4<T, P> const& v) GLM_DEFAULT;
template <precision Q>
GLM_FUNC_DECL tvec4(tvec4<T, Q> const & v);
GLM_FUNC_DECL tvec4(tvec4<T, Q> const& v);
// -- Explicit basic constructors --
@ -169,7 +169,7 @@ namespace detail
template <typename A, typename B, typename C, typename D>
GLM_FUNC_DECL tvec4(A a, B b, C c, D d);
template <typename A, typename B, typename C, typename D>
GLM_FUNC_DECL tvec4(tvec1<A, P> const & a, tvec1<B, P> const & b, tvec1<C, P> const & c, tvec1<D, P> const & d);
GLM_FUNC_DECL tvec4(tvec1<A, P> const& a, tvec1<B, P> const& b, tvec1<C, P> const& c, tvec1<D, P> const& d);
// -- Conversion vector constructors --
@ -209,7 +209,7 @@ namespace detail
/// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification)
template <typename U, precision Q>
GLM_FUNC_DECL GLM_EXPLICIT tvec4(tvec4<U, Q> const & v);
GLM_FUNC_DECL GLM_EXPLICIT tvec4(tvec4<U, Q> const& v);
// -- Swizzle constructors --
@ -278,9 +278,9 @@ namespace detail
template <typename U>
GLM_FUNC_DECL tvec4<T, P> & operator*=(U scalar);
template <typename U>
GLM_FUNC_DECL tvec4<T, P> & operator*=(tvec1<U, P> const & v);
GLM_FUNC_DECL tvec4<T, P> & operator*=(tvec1<U, P> const& v);
template <typename U>
GLM_FUNC_DECL tvec4<T, P> & operator*=(tvec4<U, P> const & v);
GLM_FUNC_DECL tvec4<T, P> & operator*=(tvec4<U, P> const& v);
template <typename U>
GLM_FUNC_DECL tvec4<T, P> & operator/=(U scalar);
template <typename U>

View File

@ -478,7 +478,7 @@ int main()
Error += ceilPowerOfTwo_advanced::test();
# ifdef NDEBUG
Error += ceilPowerOfTwo::perf();
Error += ceilPowerOfTwo_advanced::perf();
# endif//NDEBUG
Error += floorMultiple::test();

View File

@ -149,7 +149,7 @@ int test_isdenormal()
int main()
{
int Error(0);
int Error = 0;
Error += test_isdenormal();
Error += ::fmod_::test();