Uniformalize low level SIMD API

This commit is contained in:
Christophe Riccio 2016-05-28 11:21:04 +02:00
parent 79894a58cc
commit 0ee3a79bfd
5 changed files with 88 additions and 71 deletions

View File

@ -8,7 +8,7 @@ namespace detail
{ {
GLM_FUNC_QUALIFIER static float call(tvec4<float, P> const& x, tvec4<float, P> const& y) GLM_FUNC_QUALIFIER static float call(tvec4<float, P> const& x, tvec4<float, P> const& y)
{ {
__m128 const dot0 = glm_dot_ss(x.data, y.data); __m128 const dot0 = glm_f32v1_dot(x.data, y.data);
return _mm_cvtss_f32(dot0); return _mm_cvtss_f32(dot0);
} }
}; };

View File

@ -15,7 +15,7 @@ namespace detail
GLM_FUNC_QUALIFIER static tmat4x4<float, P> call(tmat4x4<float, P> const& m) GLM_FUNC_QUALIFIER static tmat4x4<float, P> call(tmat4x4<float, P> const& m)
{ {
tmat4x4<float, P> Result(uninitialize); tmat4x4<float, P> Result(uninitialize);
glm_inverse_ps(*reinterpret_cast<__m128 const(*)[4]>(&m[0].data), *reinterpret_cast<__m128(*)[4]>(&Result[0].data)); glm_f32m4_inverse(*reinterpret_cast<__m128 const(*)[4]>(&m[0].data), *reinterpret_cast<__m128(*)[4]>(&Result[0].data));
return Result; return Result;
} }
}; };

View File

@ -10,17 +10,15 @@ static const __m128 GLM_VAR_USED glm_minus_one = _mm_set_ps1(-1.0f);
static const __m128 GLM_VAR_USED glm_two = _mm_set_ps1(2.0f); static const __m128 GLM_VAR_USED glm_two = _mm_set_ps1(2.0f);
static const __m128 GLM_VAR_USED glm_three = _mm_set_ps1(3.0f); static const __m128 GLM_VAR_USED glm_three = _mm_set_ps1(3.0f);
static const __m128 GLM_VAR_USED glm_epi32_sign_mask = _mm_castsi128_ps(_mm_set1_epi32(static_cast<int>(0x80000000)));
static const __m128 GLM_VAR_USED glm_ps_2pow23 = _mm_set_ps1(8388608.0f); static const __m128 GLM_VAR_USED glm_ps_2pow23 = _mm_set_ps1(8388608.0f);
static const __m128 GLM_VAR_USED glm_ps_1 = _mm_set_ps1(1.0f);
GLM_FUNC_QUALIFIER __m128 glm_abs_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_abs(__m128 x)
{ {
return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)));
} }
//sign //sign
GLM_FUNC_QUALIFIER __m128 glm_sgn_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_sgn(__m128 x)
{ {
__m128 const Cmp0 = _mm_cmplt_ps(x, glm_zero); __m128 const Cmp0 = _mm_cmplt_ps(x, glm_zero);
__m128 const Cmp1 = _mm_cmpgt_ps(x, glm_zero); __m128 const Cmp1 = _mm_cmpgt_ps(x, glm_zero);
@ -30,9 +28,10 @@ GLM_FUNC_QUALIFIER __m128 glm_sgn_ps(__m128 x)
} }
//round //round
GLM_FUNC_QUALIFIER __m128 glm_rnd_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_rnd(__m128 x)
{ {
__m128 const and0 = _mm_and_ps(glm_epi32_sign_mask, x); __m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(static_cast<int>(0x80000000)));
__m128 const and0 = _mm_and_ps(sgn0, x);
__m128 const or0 = _mm_or_ps(and0, glm_ps_2pow23); __m128 const or0 = _mm_or_ps(and0, glm_ps_2pow23);
__m128 const add0 = _mm_add_ps(x, or0); __m128 const add0 = _mm_add_ps(x, or0);
__m128 const sub0 = _mm_sub_ps(add0, or0); __m128 const sub0 = _mm_sub_ps(add0, or0);
@ -40,11 +39,11 @@ GLM_FUNC_QUALIFIER __m128 glm_rnd_ps(__m128 x)
} }
//floor //floor
GLM_FUNC_QUALIFIER __m128 glm_flr_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_flr(__m128 x)
{ {
__m128 const rnd0 = glm_rnd_ps(x); __m128 const rnd0 = glm_f32v4_rnd(x);
__m128 const cmp0 = _mm_cmplt_ps(x, rnd0); __m128 const cmp0 = _mm_cmplt_ps(x, rnd0);
__m128 const and0 = _mm_and_ps(cmp0, glm_ps_1); __m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
__m128 const sub0 = _mm_sub_ps(rnd0, and0); __m128 const sub0 = _mm_sub_ps(rnd0, and0);
return sub0; return sub0;
} }
@ -56,50 +55,51 @@ GLM_FUNC_QUALIFIER __m128 glm_flr_ps(__m128 x)
//} //}
//roundEven //roundEven
GLM_FUNC_QUALIFIER __m128 glm_rde_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_rde(__m128 x)
{ {
__m128 const and0 = _mm_and_ps(glm_epi32_sign_mask, x); __m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(static_cast<int>(0x80000000)));
__m128 const and0 = _mm_and_ps(sgn0, x);
__m128 const or0 = _mm_or_ps(and0, glm_ps_2pow23); __m128 const or0 = _mm_or_ps(and0, glm_ps_2pow23);
__m128 const add0 = _mm_add_ps(x, or0); __m128 const add0 = _mm_add_ps(x, or0);
__m128 const sub0 = _mm_sub_ps(add0, or0); __m128 const sub0 = _mm_sub_ps(add0, or0);
return sub0; return sub0;
} }
GLM_FUNC_QUALIFIER __m128 glm_ceil_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_ceil(__m128 x)
{ {
__m128 const rnd0 = glm_rnd_ps(x); __m128 const rnd0 = glm_f32v4_rnd(x);
__m128 const cmp0 = _mm_cmpgt_ps(x, rnd0); __m128 const cmp0 = _mm_cmpgt_ps(x, rnd0);
__m128 const and0 = _mm_and_ps(cmp0, glm_ps_1); __m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
__m128 const add0 = _mm_add_ps(rnd0, and0); __m128 const add0 = _mm_add_ps(rnd0, and0);
return add0; return add0;
} }
GLM_FUNC_QUALIFIER __m128 glm_frc_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_frc(__m128 x)
{ {
__m128 const flr0 = glm_flr_ps(x); __m128 const flr0 = glm_f32v4_flr(x);
__m128 const sub0 = _mm_sub_ps(x, flr0); __m128 const sub0 = _mm_sub_ps(x, flr0);
return sub0; return sub0;
} }
GLM_FUNC_QUALIFIER __m128 glm_mod_ps(__m128 x, __m128 y) GLM_FUNC_QUALIFIER __m128 glm_f32v4_mod(__m128 x, __m128 y)
{ {
__m128 const div0 = _mm_div_ps(x, y); __m128 const div0 = _mm_div_ps(x, y);
__m128 const flr0 = glm_flr_ps(div0); __m128 const flr0 = glm_f32v4_flr(div0);
__m128 const mul0 = _mm_mul_ps(y, flr0); __m128 const mul0 = _mm_mul_ps(y, flr0);
__m128 const sub0 = _mm_sub_ps(x, mul0); __m128 const sub0 = _mm_sub_ps(x, mul0);
return sub0; return sub0;
} }
GLM_FUNC_QUALIFIER __m128 glm_clp_ps(__m128 v, __m128 minVal, __m128 maxVal) GLM_FUNC_QUALIFIER __m128 glm_f32v4_clp(__m128 v, __m128 minVal, __m128 maxVal)
{ {
__m128 const min0 = _mm_min_ps(v, maxVal); __m128 const min0 = _mm_min_ps(v, maxVal);
__m128 const max0 = _mm_max_ps(min0, minVal); __m128 const max0 = _mm_max_ps(min0, minVal);
return max0; return max0;
} }
GLM_FUNC_QUALIFIER __m128 glm_mix_ps(__m128 v1, __m128 v2, __m128 a) GLM_FUNC_QUALIFIER __m128 glm_f32v4_mix(__m128 v1, __m128 v2, __m128 a)
{ {
__m128 const sub0 = _mm_sub_ps(glm_one, a); __m128 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), a);
__m128 const mul0 = _mm_mul_ps(v1, sub0); __m128 const mul0 = _mm_mul_ps(v1, sub0);
__m128 const mul1 = _mm_mul_ps(v2, a); __m128 const mul1 = _mm_mul_ps(v2, a);
__m128 const add0 = _mm_add_ps(mul0, mul1); __m128 const add0 = _mm_add_ps(mul0, mul1);
@ -107,19 +107,19 @@ GLM_FUNC_QUALIFIER __m128 glm_mix_ps(__m128 v1, __m128 v2, __m128 a)
} }
//step //step
GLM_FUNC_QUALIFIER __m128 glm_stp_ps(__m128 edge, __m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_stp(__m128 edge, __m128 x)
{ {
__m128 const cmp = _mm_cmple_ps(x, edge); __m128 const cmp = _mm_cmple_ps(x, edge);
return _mm_movemask_ps(cmp) == 0 ? glm_one : glm_zero; return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_set1_ps(0.0f);
} }
// smoothstep // smoothstep
GLM_FUNC_QUALIFIER __m128 glm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_ssp(__m128 edge0, __m128 edge1, __m128 x)
{ {
__m128 const sub0 = _mm_sub_ps(x, edge0); __m128 const sub0 = _mm_sub_ps(x, edge0);
__m128 const sub1 = _mm_sub_ps(edge1, edge0); __m128 const sub1 = _mm_sub_ps(edge1, edge0);
__m128 const div0 = _mm_sub_ps(sub0, sub1); __m128 const div0 = _mm_sub_ps(sub0, sub1);
__m128 const clp0 = glm_clp_ps(div0, glm_zero, glm_one); __m128 const clp0 = glm_f32v4_clp(div0, _mm_set1_ps(0.0f), _mm_set1_ps(1.0f));
__m128 const mul0 = _mm_mul_ps(glm_two, clp0); __m128 const mul0 = _mm_mul_ps(glm_two, clp0);
__m128 const sub2 = _mm_sub_ps(glm_three, mul0); __m128 const sub2 = _mm_sub_ps(glm_three, mul0);
__m128 const mul1 = _mm_mul_ps(clp0, clp0); __m128 const mul1 = _mm_mul_ps(clp0, clp0);
@ -128,7 +128,7 @@ GLM_FUNC_QUALIFIER __m128 glm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x)
} }
// Agner Fog method // Agner Fog method
GLM_FUNC_QUALIFIER __m128 glm_nan_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_nan(__m128 x)
{ {
__m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer __m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
__m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit __m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
@ -142,7 +142,7 @@ GLM_FUNC_QUALIFIER __m128 glm_nan_ps(__m128 x)
} }
// Agner Fog method // Agner Fog method
GLM_FUNC_QUALIFIER __m128 glm_inf_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_inf(__m128 x)
{ {
__m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer __m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
__m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit __m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
@ -151,10 +151,27 @@ GLM_FUNC_QUALIFIER __m128 glm_inf_ps(__m128 x)
// SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration // SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration
// By Elan Ruskin, http://assemblyrequired.crashworks.org/ // By Elan Ruskin, http://assemblyrequired.crashworks.org/
GLM_FUNC_QUALIFIER __m128 glm_sqrt_wip_ss(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v1_sqrt_wip(__m128 x)
{ {
__m128 const recip = _mm_rsqrt_ss(x); // "estimate" opcode __m128 const Rcp0 = _mm_rsqrt_ss(x); // "estimate" opcode
__m128 const halfrecip = _mm_mul_ss(glm_half, recip); __m128 const Mul0 = _mm_mul_ss(_mm_set1_ps(0.5f), Rcp0);
__m128 const threeminus_xrr = _mm_sub_ss(glm_three, _mm_mul_ss(x, _mm_mul_ss(recip, recip))); __m128 const Mul1 = _mm_mul_ss(Rcp0, Rcp0);
return _mm_mul_ss(halfrecip, threeminus_xrr); __m128 const Mul2 = _mm_mul_ss(x, Mul1);
__m128 const Sub0 = _mm_sub_ss(_mm_set1_ps(3.0f), Mul2);
__m128 const Mul3 = _mm_mul_ss(Mul0, Sub0);
return Mul3;
} }
// SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration
// By Elan Ruskin, http://assemblyrequired.crashworks.org/
GLM_FUNC_QUALIFIER __m128 glm_f32v4_sqrt_wip(__m128 x)
{
__m128 const Rcp0 = _mm_rsqrt_ps(x); // "estimate" opcode
__m128 const Mul0 = _mm_mul_ps(_mm_set1_ps(0.5f), Rcp0);
__m128 const Mul1 = _mm_mul_ps(Mul0, Mul0);
__m128 const Mul2 = _mm_mul_ps(x, Mul1);
__m128 const Sub0 = _mm_sub_ps(_mm_set1_ps(3.0f), Mul2);
__m128 const Mul3 = _mm_mul_ps(Mul0, Sub0);
return Mul3;
}

View File

@ -5,7 +5,7 @@
#include "common.h" #include "common.h"
GLM_FUNC_QUALIFIER __m128 glm_dot_ps(__m128 v1, __m128 v2) GLM_FUNC_QUALIFIER __m128 glm_f32v4_dot(__m128 v1, __m128 v2)
{ {
# if GLM_ARCH & GLM_ARCH_AVX # if GLM_ARCH & GLM_ARCH_AVX
return _mm_dp_ps(v1, v2, 0xff); return _mm_dp_ps(v1, v2, 0xff);
@ -24,7 +24,7 @@ GLM_FUNC_QUALIFIER __m128 glm_dot_ps(__m128 v1, __m128 v2)
# endif # endif
} }
GLM_FUNC_QUALIFIER __m128 glm_dot_ss(__m128 v1, __m128 v2) GLM_FUNC_QUALIFIER __m128 glm_f32v1_dot(__m128 v1, __m128 v2)
{ {
# if GLM_ARCH & GLM_ARCH_AVX # if GLM_ARCH & GLM_ARCH_AVX
return _mm_dp_ps(v1, v2, 0xff); return _mm_dp_ps(v1, v2, 0xff);
@ -43,21 +43,21 @@ GLM_FUNC_QUALIFIER __m128 glm_dot_ss(__m128 v1, __m128 v2)
# endif # endif
} }
GLM_FUNC_QUALIFIER __m128 glm_len_ps(__m128 x) GLM_FUNC_QUALIFIER __m128 glm_f32v4_len(__m128 x)
{ {
__m128 const dot0 = glm_dot_ps(x, x); __m128 const dot0 = glm_f32v4_dot(x, x);
__m128 const sqt0 = _mm_sqrt_ps(dot0); __m128 const sqt0 = _mm_sqrt_ps(dot0);
return sqt0; return sqt0;
} }
GLM_FUNC_QUALIFIER __m128 glm_dst_ps(__m128 p0, __m128 p1) GLM_FUNC_QUALIFIER __m128 glm_f32v4_dst(__m128 p0, __m128 p1)
{ {
__m128 sub0 = _mm_sub_ps(p0, p1); __m128 sub0 = _mm_sub_ps(p0, p1);
__m128 len0 = glm_len_ps(sub0); __m128 len0 = glm_f32v4_len(sub0);
return len0; return len0;
} }
GLM_FUNC_QUALIFIER __m128 glm_xpd_ps(__m128 v1, __m128 v2) GLM_FUNC_QUALIFIER __m128 glm_f32v4_xpd(__m128 v1, __m128 v2)
{ {
__m128 swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); __m128 swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1));
__m128 swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); __m128 swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2));
@ -69,43 +69,43 @@ GLM_FUNC_QUALIFIER __m128 glm_xpd_ps(__m128 v1, __m128 v2)
return sub0; return sub0;
} }
GLM_FUNC_QUALIFIER __m128 glm_nrm_ps(__m128 v) GLM_FUNC_QUALIFIER __m128 glm_f32v4_nrm(__m128 v)
{ {
__m128 dot0 = glm_dot_ps(v, v); __m128 dot0 = glm_f32v4_dot(v, v);
__m128 isr0 = _mm_rsqrt_ps(dot0); __m128 isr0 = _mm_rsqrt_ps(dot0);
__m128 mul0 = _mm_mul_ps(v, isr0); __m128 mul0 = _mm_mul_ps(v, isr0);
return mul0; return mul0;
} }
GLM_FUNC_QUALIFIER __m128 glm_ffd_ps(__m128 N, __m128 I, __m128 Nref) GLM_FUNC_QUALIFIER __m128 glm_f32v4_ffd(__m128 N, __m128 I, __m128 Nref)
{ {
__m128 dot0 = glm_dot_ps(Nref, I); __m128 dot0 = glm_f32v4_dot(Nref, I);
__m128 sgn0 = glm_sgn_ps(dot0); __m128 sgn0 = glm_f32v4_sgn(dot0);
__m128 mul0 = _mm_mul_ps(sgn0, glm_minus_one); __m128 mul0 = _mm_mul_ps(sgn0, glm_minus_one);
__m128 mul1 = _mm_mul_ps(N, mul0); __m128 mul1 = _mm_mul_ps(N, mul0);
return mul1; return mul1;
} }
GLM_FUNC_QUALIFIER __m128 glm_rfe_ps(__m128 I, __m128 N) GLM_FUNC_QUALIFIER __m128 glm_f32v4_rfe(__m128 I, __m128 N)
{ {
__m128 dot0 = glm_dot_ps(N, I); __m128 dot0 = glm_f32v4_dot(N, I);
__m128 mul0 = _mm_mul_ps(N, dot0); __m128 mul0 = _mm_mul_ps(N, dot0);
__m128 mul1 = _mm_mul_ps(mul0, glm_two); __m128 mul1 = _mm_mul_ps(mul0, glm_two);
__m128 sub0 = _mm_sub_ps(I, mul1); __m128 sub0 = _mm_sub_ps(I, mul1);
return sub0; return sub0;
} }
GLM_FUNC_QUALIFIER __m128 glm_rfa_ps(__m128 I, __m128 N, __m128 eta) GLM_FUNC_QUALIFIER __m128 glm_f32v4_rfa(__m128 I, __m128 N, __m128 eta)
{ {
__m128 dot0 = glm_dot_ps(N, I); __m128 dot0 = glm_f32v4_dot(N, I);
__m128 mul0 = _mm_mul_ps(eta, eta); __m128 mul0 = _mm_mul_ps(eta, eta);
__m128 mul1 = _mm_mul_ps(dot0, dot0); __m128 mul1 = _mm_mul_ps(dot0, dot0);
__m128 sub0 = _mm_sub_ps(glm_one, mul0); __m128 sub0 = _mm_sub_ps(glm_one, mul0);
__m128 sub1 = _mm_sub_ps(glm_one, mul1); __m128 sub1 = _mm_sub_ps(glm_one, mul1);
__m128 mul2 = _mm_mul_ps(sub0, sub1); __m128 mul2 = _mm_mul_ps(sub0, sub1);
if(_mm_movemask_ps(_mm_cmplt_ss(mul2, glm_zero)) == 0) if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0)
return glm_zero; return _mm_set1_ps(0.0f);
__m128 sqt0 = _mm_sqrt_ps(mul2); __m128 sqt0 = _mm_sqrt_ps(mul2);
__m128 mul3 = _mm_mul_ps(eta, dot0); __m128 mul3 = _mm_mul_ps(eta, dot0);

View File

@ -9,7 +9,7 @@ static const __m128 GLM_VAR_USED _m128_rad_ps = _mm_set_ps1(3.141592653589793238
static const __m128 GLM_VAR_USED _m128_deg_ps = _mm_set_ps1(180.f / 3.141592653589793238462643383279f); static const __m128 GLM_VAR_USED _m128_deg_ps = _mm_set_ps1(180.f / 3.141592653589793238462643383279f);
template <typename matType> template <typename matType>
GLM_FUNC_QUALIFIER matType glm_comp_mul_ps GLM_FUNC_QUALIFIER matType glm_comp_mul_f32m4
( (
__m128 const in1[4], __m128 const in1[4],
__m128 const in2[4], __m128 const in2[4],
@ -22,7 +22,7 @@ GLM_FUNC_QUALIFIER matType glm_comp_mul_ps
out[3] = _mm_mul_ps(in1[3], in2[3]); out[3] = _mm_mul_ps(in1[3], in2[3]);
} }
GLM_FUNC_QUALIFIER void glm_add_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_add_f32m4(__m128 const in1[4], __m128 const in2[4], __m128 out[4])
{ {
{ {
out[0] = _mm_add_ps(in1[0], in2[0]); out[0] = _mm_add_ps(in1[0], in2[0]);
@ -32,7 +32,7 @@ GLM_FUNC_QUALIFIER void glm_add_ps(__m128 const in1[4], __m128 const in2[4], __m
} }
} }
GLM_FUNC_QUALIFIER void glm_sub_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_sub_f32v4(__m128 const in1[4], __m128 const in2[4], __m128 out[4])
{ {
{ {
out[0] = _mm_sub_ps(in1[0], in2[0]); out[0] = _mm_sub_ps(in1[0], in2[0]);
@ -42,7 +42,7 @@ GLM_FUNC_QUALIFIER void glm_sub_ps(__m128 const in1[4], __m128 const in2[4], __m
} }
} }
GLM_FUNC_QUALIFIER __m128 glm_mul_ps(__m128 const m[4], __m128 v) GLM_FUNC_QUALIFIER __m128 glm_mul_f32v4(__m128 const m[4], __m128 v)
{ {
__m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0));
__m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1));
@ -61,7 +61,7 @@ GLM_FUNC_QUALIFIER __m128 glm_mul_ps(__m128 const m[4], __m128 v)
return a2; return a2;
} }
GLM_FUNC_QUALIFIER __m128 glm_mul_ps(__m128 v, __m128 const m[4]) GLM_FUNC_QUALIFIER __m128 glm_mul_f32v4(__m128 v, __m128 const m[4])
{ {
__m128 i0 = m[0]; __m128 i0 = m[0];
__m128 i1 = m[1]; __m128 i1 = m[1];
@ -88,7 +88,7 @@ GLM_FUNC_QUALIFIER __m128 glm_mul_ps(__m128 v, __m128 const m[4])
return f2; return f2;
} }
GLM_FUNC_QUALIFIER void glm_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_mul_f32v4(__m128 const in1[4], __m128 const in2[4], __m128 out[4])
{ {
{ {
__m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0));
@ -164,7 +164,7 @@ GLM_FUNC_QUALIFIER void glm_mul_ps(__m128 const in1[4], __m128 const in2[4], __m
} }
} }
GLM_FUNC_QUALIFIER void glm_transpose_ps(__m128 const in[4], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_transpose_f32m4(__m128 const in[4], __m128 out[4])
{ {
__m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44);
__m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE);
@ -177,7 +177,7 @@ GLM_FUNC_QUALIFIER void glm_transpose_ps(__m128 const in[4], __m128 out[4])
out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD);
} }
GLM_FUNC_QUALIFIER __m128 glm_slow_det_ps(__m128 const in[4]) GLM_FUNC_QUALIFIER __m128 glm_det_highp_f32m4(__m128 const in[4])
{ {
__m128 Fac0; __m128 Fac0;
{ {
@ -387,11 +387,11 @@ GLM_FUNC_QUALIFIER __m128 glm_slow_det_ps(__m128 const in[4])
// + m[0][1] * Inverse[1][0] // + m[0][1] * Inverse[1][0]
// + m[0][2] * Inverse[2][0] // + m[0][2] * Inverse[2][0]
// + m[0][3] * Inverse[3][0]; // + m[0][3] * Inverse[3][0];
__m128 Det0 = glm_dot_ps(in[0], Row2); __m128 Det0 = glm_f32v4_dot(in[0], Row2);
return Det0; return Det0;
} }
GLM_FUNC_QUALIFIER __m128 glm_detd_ps(__m128 const m[4]) GLM_FUNC_QUALIFIER __m128 glm_detd_f32m4(__m128 const m[4])
{ {
// _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128( // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(
@ -451,10 +451,10 @@ GLM_FUNC_QUALIFIER __m128 glm_detd_ps(__m128 const m[4])
// + m[0][2] * DetCof[2] // + m[0][2] * DetCof[2]
// + m[0][3] * DetCof[3]; // + m[0][3] * DetCof[3];
return glm_dot_ps(m[0], DetCof); return glm_f32v4_dot(m[0], DetCof);
} }
GLM_FUNC_QUALIFIER __m128 glm_det_ps(__m128 const m[4]) GLM_FUNC_QUALIFIER __m128 glm_det_f32m4(__m128 const m[4])
{ {
// _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add) // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add)
@ -514,10 +514,10 @@ GLM_FUNC_QUALIFIER __m128 glm_det_ps(__m128 const m[4])
// + m[0][2] * DetCof[2] // + m[0][2] * DetCof[2]
// + m[0][3] * DetCof[3]; // + m[0][3] * DetCof[3];
return glm_dot_ps(m[0], DetCof); return glm_f32v4_dot(m[0], DetCof);
} }
GLM_FUNC_QUALIFIER void glm_inverse_ps(__m128 const in[4], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_f32m4_inverse(__m128 const in[4], __m128 out[4])
{ {
__m128 Fac0; __m128 Fac0;
{ {
@ -727,7 +727,7 @@ GLM_FUNC_QUALIFIER void glm_inverse_ps(__m128 const in[4], __m128 out[4])
// + m[0][1] * Inverse[1][0] // + m[0][1] * Inverse[1][0]
// + m[0][2] * Inverse[2][0] // + m[0][2] * Inverse[2][0]
// + m[0][3] * Inverse[3][0]; // + m[0][3] * Inverse[3][0];
__m128 Det0 = glm_dot_ps(in[0], Row2); __m128 Det0 = glm_f32v4_dot(in[0], Row2);
__m128 Rcp0 = _mm_div_ps(glm_one, Det0); __m128 Rcp0 = _mm_div_ps(glm_one, Det0);
//__m128 Rcp0 = _mm_rcp_ps(Det0); //__m128 Rcp0 = _mm_rcp_ps(Det0);
@ -738,7 +738,7 @@ GLM_FUNC_QUALIFIER void glm_inverse_ps(__m128 const in[4], __m128 out[4])
out[3] = _mm_mul_ps(Inv3, Rcp0); out[3] = _mm_mul_ps(Inv3, Rcp0);
} }
GLM_FUNC_QUALIFIER void inverse_fast_ps(__m128 const in[4], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_lowp_f32v4_inverse(__m128 const in[4], __m128 out[4])
{ {
__m128 Fac0; __m128 Fac0;
{ {
@ -948,7 +948,7 @@ GLM_FUNC_QUALIFIER void inverse_fast_ps(__m128 const in[4], __m128 out[4])
// + m[0][1] * Inverse[1][0] // + m[0][1] * Inverse[1][0]
// + m[0][2] * Inverse[2][0] // + m[0][2] * Inverse[2][0]
// + m[0][3] * Inverse[3][0]; // + m[0][3] * Inverse[3][0];
__m128 Det0 = glm_dot_ps(in[0], Row2); __m128 Det0 = glm_f32v4_dot(in[0], Row2);
__m128 Rcp0 = _mm_rcp_ps(Det0); __m128 Rcp0 = _mm_rcp_ps(Det0);
//__m128 Rcp0 = _mm_div_ps(one, Det0); //__m128 Rcp0 = _mm_div_ps(one, Det0);
// Inverse /= Determinant; // Inverse /= Determinant;
@ -958,7 +958,7 @@ GLM_FUNC_QUALIFIER void inverse_fast_ps(__m128 const in[4], __m128 out[4])
out[3] = _mm_mul_ps(Inv3, Rcp0); out[3] = _mm_mul_ps(Inv3, Rcp0);
} }
/* /*
GLM_FUNC_QUALIFIER void glm_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) GLM_FUNC_QUALIFIER void glm_f32m4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4])
{ {
float a = glm::radians(Angle); float a = glm::radians(Angle);
float c = cos(a); float c = cos(a);
@ -1028,7 +1028,7 @@ GLM_FUNC_QUALIFIER void glm_rotate_ps(__m128 const in[4], float Angle, float con
sse_mul_ps(in, Result, out); sse_mul_ps(in, Result, out);
} }
*/ */
GLM_FUNC_QUALIFIER void glm_outer_ps(__m128 const & c, __m128 const & r, __m128 out[4]) GLM_FUNC_QUALIFIER void glm_f32m4_outer(__m128 const & c, __m128 const & r, __m128 out[4])
{ {
out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0))); out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0)));
out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1))); out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1)));