From 1524fd74d1ddd92c7d80713551eb4b8e79fd3861 Mon Sep 17 00:00:00 2001 From: Christophe Riccio Date: Tue, 11 May 2010 12:23:48 +0100 Subject: [PATCH] Fix mix warning --- glm/core/func_common.inl | 2 +- glm/core/intrinsic_common.hpp | 60 ++ glm/core/intrinsic_common.inl | 280 +++++++++ glm/core/intrinsic_exponential.hpp | 34 ++ glm/core/intrinsic_exponential.inl | 0 glm/core/intrinsic_geometric.hpp | 45 ++ glm/core/intrinsic_geometric.inl | 117 ++++ glm/core/intrinsic_matrix.hpp | 36 ++ glm/core/intrinsic_matrix.inl | 704 +++++++++++++++++++++++ glm/core/intrinsic_trigonometric.hpp | 0 glm/core/intrinsic_trigonometric.inl | 0 glm/core/intrinsic_vector_relational.hpp | 18 + glm/core/intrinsic_vector_relational.inl | 347 +++++++++++ 13 files changed, 1642 insertions(+), 1 deletion(-) create mode 100644 glm/core/intrinsic_common.hpp create mode 100644 glm/core/intrinsic_common.inl create mode 100644 glm/core/intrinsic_exponential.hpp create mode 100644 glm/core/intrinsic_exponential.inl create mode 100644 glm/core/intrinsic_geometric.hpp create mode 100644 glm/core/intrinsic_geometric.inl create mode 100644 glm/core/intrinsic_matrix.hpp create mode 100644 glm/core/intrinsic_matrix.inl create mode 100644 glm/core/intrinsic_trigonometric.hpp create mode 100644 glm/core/intrinsic_trigonometric.inl create mode 100644 glm/core/intrinsic_vector_relational.hpp create mode 100644 glm/core/intrinsic_vector_relational.inl diff --git a/glm/core/func_common.inl b/glm/core/func_common.inl index de792803..e09649e4 100644 --- a/glm/core/func_common.inl +++ b/glm/core/func_common.inl @@ -833,7 +833,7 @@ namespace glm // detail::type::is_float); //return x + a * (y - x); - return genTypeU(x) + a * genTypeU(y - x); + return genTypeT(genTypeU(x) + a * genTypeU(y - x)); } template diff --git a/glm/core/intrinsic_common.hpp b/glm/core/intrinsic_common.hpp new file mode 100644 index 00000000..09b0c290 --- /dev/null +++ b/glm/core/intrinsic_common.hpp @@ -0,0 +1,60 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-05-11 +// Updated : 2009-05-11 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_common.hpp +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_DETAIL_INTRINSIC_COMMON_INCLUDED +#define GLM_DETAIL_INTRINSIC_COMMON_INCLUDED + +//#include +//#include +#include +#include + +__m128 _mm_abs_ps(__m128 x); + +__m128 _mm_sgn_ps(__m128 x); + +//floor +__m128 _mm_flr_ps(__m128 v); + +//trunc +__m128 _mm_trc_ps(__m128 v); + +//round +__m128 _mm_rnd_ps(__m128 v); + +//roundEven +__m128 _mm_rde_ps(__m128 v); + +__m128 _mm_ceil_ps(__m128 v); + +__m128 _mm_frc_ps(__m128 x); + +__m128 _mm_mod_ps(__m128 x, __m128 y); + +__m128 _mm_modf_ps(__m128 x, __m128i & i); + +//inline __m128 _mm_min_ps(__m128 x, __m128 y) + +//inline __m128 _mm_max_ps(__m128 x, __m128 y) + +__m128 _mm_clp_ps(__m128 v, __m128 minVal, __m128 maxVal); + +__m128 _mm_mix_ps(__m128 v1, __m128 v2, __m128 a); + +__m128 _mm_stp_ps(__m128 edge, __m128 x); + +__m128 _mm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x); + +__m128 _mm_nan_ps(__m128 x); + +__m128 _mm_inf_ps(__m128 x); + +#include "intrinsic_common.inl" + +#endif//GLM_DETAIL_INTRINSIC_COMMON_INCLUDED diff --git a/glm/core/intrinsic_common.inl b/glm/core/intrinsic_common.inl new file mode 100644 index 00000000..aa7ea584 --- /dev/null +++ b/glm/core/intrinsic_common.inl @@ -0,0 +1,280 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-05-08 +// Updated : 2009-05-08 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_common.inl +/////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace glm{ +namespace detail{ + + union ieee754_QNAN + { + const float f; + struct + { + const unsigned int mantissa:23, exp:8, sign:1; + }; + + ieee754_QNAN() : f(0.0), mantissa(0x7FFFFF), exp(0xFF), sign(0x0) {} + }; + + static const __m128 zero = _mm_setzero_ps(); + static const __m128 one = _mm_set_ps1(1.0f); + static const __m128 minus_one = _mm_set_ps1(-1.0f); + static const __m128 two = _mm_set_ps1(2.0f); + static const __m128 three = _mm_set_ps1(3.0f); + static const __m128 pi = _mm_set_ps1(3.1415926535897932384626433832795f); + static const __m128 hundred_eighty = _mm_set_ps1(180.f); + static const __m128 pi_over_hundred_eighty = _mm_set_ps1(0.017453292519943295769236907684886f); + static const __m128 hundred_eighty_over_pi = _mm_set_ps1(57.295779513082320876798154814105f); + + static const ieee754_QNAN absMask; + static const __m128 abs4Mask = _mm_set_ps1(absMask.f); + + //static const __m128 _epi32_sign_mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); + //static const __m128 _epi32_inv_sign_mask = _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)); + //static const __m128 _epi32_mant_mask = _mm_castsi128_ps(_mm_set1_epi32(0x7F800000)); + //static const __m128 _epi32_inv_mant_mask = _mm_castsi128_ps(_mm_set1_epi32(0x807FFFFF)); + //static const __m128 _epi32_min_norm_pos = _mm_castsi128_ps(_mm_set1_epi32(0x00800000)); + static const __m128 _epi32_0 = _mm_set_ps1(0); + static const __m128 _epi32_1 = _mm_set_ps1(1); + static const __m128 _epi32_2 = _mm_set_ps1(2); + static const __m128 _epi32_3 = _mm_set_ps1(3); + static const __m128 _epi32_4 = _mm_set_ps1(4); + static const __m128 _epi32_5 = _mm_set_ps1(5); + static const __m128 _epi32_6 = _mm_set_ps1(6); + static const __m128 _epi32_7 = _mm_set_ps1(7); + static const __m128 _epi32_8 = _mm_set_ps1(8); + static const __m128 _epi32_9 = _mm_set_ps1(9); + static const __m128 _epi32_127 = _mm_set_ps1(127); + //static const __m128 _epi32_ninf = _mm_castsi128_ps(_mm_set1_epi32(0xFF800000)); + //static const __m128 _epi32_pinf = _mm_castsi128_ps(_mm_set1_epi32(0x7F800000)); + + static const __m128 _ps_1_3 = _mm_set_ps1(0.33333333333333333333333333333333f); + static const __m128 _ps_0p5 = _mm_set_ps1(0.5f); + static const __m128 _ps_1 = _mm_set_ps1(1.0f); + static const __m128 _ps_m1 = _mm_set_ps1(-1.0f); + static const __m128 _ps_2 = _mm_set_ps1(2.0f); + static const __m128 _ps_3 = _mm_set_ps1(3.0f); + static const __m128 _ps_127 = _mm_set_ps1(127.0f); + static const __m128 _ps_255 = _mm_set_ps1(255.0f); + static const __m128 _ps_2pow23 = _mm_set_ps1(8388608.0f); + + static const __m128 _ps_1_0_0_0 = _mm_set_ps(1.0f, 0.0f, 0.0f, 0.0f); + static const __m128 _ps_0_1_0_0 = _mm_set_ps(0.0f, 1.0f, 0.0f, 0.0f); + static const __m128 _ps_0_0_1_0 = _mm_set_ps(0.0f, 0.0f, 1.0f, 0.0f); + static const __m128 _ps_0_0_0_1 = _mm_set_ps(0.0f, 0.0f, 0.0f, 1.0f); + + static const __m128 _ps_pi = _mm_set_ps1(3.1415926535897932384626433832795f); + static const __m128 _ps_pi2 = _mm_set_ps1(6.283185307179586476925286766560f); + static const __m128 _ps_2_pi = _mm_set_ps1(0.63661977236758134307553505349006f); + static const __m128 _ps_pi_2 = _mm_set_ps1(1.5707963267948966192313216916398f); + static const __m128 _ps_4_pi = _mm_set_ps1(1.2732395447351626861510701069801f); + static const __m128 _ps_pi_4 = _mm_set_ps1(0.78539816339744830961566084581988f); + + static const __m128 _ps_sincos_p0 = _mm_set_ps1(0.15707963267948963959e1f); + static const __m128 _ps_sincos_p1 = _mm_set_ps1(-0.64596409750621907082e0f); + static const __m128 _ps_sincos_p2 = _mm_set_ps1(0.7969262624561800806e-1f); + static const __m128 _ps_sincos_p3 = _mm_set_ps1(-0.468175413106023168e-2f); + static const __m128 _ps_tan_p0 = _mm_set_ps1(-1.79565251976484877988e7f); + static const __m128 _ps_tan_p1 = _mm_set_ps1(1.15351664838587416140e6f); + static const __m128 _ps_tan_p2 = _mm_set_ps1(-1.30936939181383777646e4f); + static const __m128 _ps_tan_q0 = _mm_set_ps1(-5.38695755929454629881e7f); + static const __m128 _ps_tan_q1 = _mm_set_ps1(2.50083801823357915839e7f); + static const __m128 _ps_tan_q2 = _mm_set_ps1(-1.32089234440210967447e6f); + static const __m128 _ps_tan_q3 = _mm_set_ps1(1.36812963470692954678e4f); + static const __m128 _ps_tan_poleval = _mm_set_ps1(3.68935e19f); + static const __m128 _ps_atan_t0 = _mm_set_ps1(-0.91646118527267623468e-1f); + static const __m128 _ps_atan_t1 = _mm_set_ps1(-0.13956945682312098640e1f); + static const __m128 _ps_atan_t2 = _mm_set_ps1(-0.94393926122725531747e2f); + static const __m128 _ps_atan_t3 = _mm_set_ps1(0.12888383034157279340e2f); + static const __m128 _ps_atan_s0 = _mm_set_ps1(0.12797564625607904396e1f); + static const __m128 _ps_atan_s1 = _mm_set_ps1(0.21972168858277355914e1f); + static const __m128 _ps_atan_s2 = _mm_set_ps1(0.68193064729268275701e1f); + static const __m128 _ps_atan_s3 = _mm_set_ps1(0.28205206687035841409e2f); + + static const __m128 _ps_exp_hi = _mm_set_ps1(88.3762626647949f); + static const __m128 _ps_exp_lo = _mm_set_ps1(-88.3762626647949f); + static const __m128 _ps_exp_rln2 = _mm_set_ps1(1.4426950408889634073599f); + static const __m128 _ps_exp_p0 = _mm_set_ps1(1.26177193074810590878e-4f); + static const __m128 _ps_exp_p1 = _mm_set_ps1(3.02994407707441961300e-2f); + static const __m128 _ps_exp_q0 = _mm_set_ps1(3.00198505138664455042e-6f); + static const __m128 _ps_exp_q1 = _mm_set_ps1(2.52448340349684104192e-3f); + static const __m128 _ps_exp_q2 = _mm_set_ps1(2.27265548208155028766e-1f); + static const __m128 _ps_exp_q3 = _mm_set_ps1(2.00000000000000000009e0f); + static const __m128 _ps_exp_c1 = _mm_set_ps1(6.93145751953125e-1f); + static const __m128 _ps_exp_c2 = _mm_set_ps1(1.42860682030941723212e-6f); + static const __m128 _ps_exp2_hi = _mm_set_ps1(127.4999961853f); + static const __m128 _ps_exp2_lo = _mm_set_ps1(-127.4999961853f); + static const __m128 _ps_exp2_p0 = _mm_set_ps1(2.30933477057345225087e-2f); + static const __m128 _ps_exp2_p1 = _mm_set_ps1(2.02020656693165307700e1f); + static const __m128 _ps_exp2_p2 = _mm_set_ps1(1.51390680115615096133e3f); + static const __m128 _ps_exp2_q0 = _mm_set_ps1(2.33184211722314911771e2f); + static const __m128 _ps_exp2_q1 = _mm_set_ps1(4.36821166879210612817e3f); + static const __m128 _ps_log_p0 = _mm_set_ps1(-7.89580278884799154124e-1f); + static const __m128 _ps_log_p1 = _mm_set_ps1(1.63866645699558079767e1f); + static const __m128 _ps_log_p2 = _mm_set_ps1(-6.41409952958715622951e1f); + static const __m128 _ps_log_q0 = _mm_set_ps1(-3.56722798256324312549e1f); + static const __m128 _ps_log_q1 = _mm_set_ps1(3.12093766372244180303e2f); + static const __m128 _ps_log_q2 = _mm_set_ps1(-7.69691943550460008604e2f); + static const __m128 _ps_log_c0 = _mm_set_ps1(0.693147180559945f); + static const __m128 _ps_log2_c0 = _mm_set_ps1(1.44269504088896340735992f); + +}//namespace detail +}//namespace glm + +inline __m128 _mm_abs_ps(__m128 x) +{ + return _mm_and_ps(glm::detail::abs4Mask, x); +} + +inline __m128 _mm_sgn_ps(__m128 x) +{ + //__m128 cmp0 = _mm_cmpeq_ps(x, zero); + //__m128 cmp1 = _mm_cmple_ps(x, zero); + //__m128 cmp2 = _mm_cmpge_ps(x, zero); + + __m128 result; + __m128 cmp0 = _mm_cmpeq_ps(x, glm::detail::zero); + if(_mm_movemask_ps(cmp0) == 0) + result = glm::detail::zero; + else + { + __m128 cmp1 = _mm_cmpge_ps(x, glm::detail::zero); + //__m128 cmp2 = _mm_cmple_ps(x, glm::detail::zero); + if(_mm_movemask_ps(cmp1) > 0) + result = glm::detail::one; + else //if(_mm_movemask_ps(cmp2) > 0) + result = glm::detail::minus_one; + } + return result; +} + +//floor +inline __m128 _mm_flr_ps(__m128 x) +{ + __m128 rnd0 = _mm_rnd_ps(x); + __m128 cmp0 = _mm_cmplt_ps(x, rnd0); + __m128 and0 = _mm_and_ps(cmp0, glm::detail::_ps_1); + __m128 sub0 = _mm_sub_ps(rnd0, and0); + return sub0; +} + +//trunc +inline __m128 _mm_trc_ps(__m128 v) +{ + return __m128(); +} + +//round +inline __m128 _mm_rnd_ps(__m128 x) +{ + __m128 and0;// = _mm_and_ps(glm::detail::_epi32_sign_mask, x); + __m128 or0 = _mm_or_ps(and0, glm::detail::_ps_2pow23); + __m128 add0 = _mm_add_ps(x, or0); + __m128 sub0 = _mm_sub_ps(add0, or0); + return sub0; +} + +//roundEven +inline __m128 _mm_rde_ps(__m128 v) +{ + +} + +inline __m128 _mm_ceil_ps(__m128 x) +{ + __m128 rnd0 = _mm_rnd_ps(x); + __m128 cmp0 = _mm_cmpgt_ps(x, rnd0); + __m128 and0 = _mm_and_ps(cmp0, glm::detail::_ps_1); + __m128 add0 = _mm_add_ps(rnd0, and0); + return add0; +} + +inline __m128 _mm_frc_ps(__m128 x) +{ + __m128 flr0 = _mm_flr_ps(x); + __m128 sub0 = _mm_sub_ps(x, flr0); + return sub0; +} + +inline __m128 _mm_mod_ps(__m128 x, __m128 y) +{ + __m128 div0 = _mm_div_ps(x, y); + __m128 flr0 = _mm_flr_ps(div0); + __m128 mul0 = _mm_mul_ps(y, flr0); + __m128 sub0 = _mm_sub_ps(x, mul0); + return sub0; +} + +inline __m128 _mm_modf_ps(__m128 x, __m128i & i) +{ + +} + +//inline __m128 _mm_min_ps(__m128 x, __m128 y) + +//inline __m128 _mm_max_ps(__m128 x, __m128 y) + +inline __m128 _mm_clp_ps(__m128 v, __m128 minVal, __m128 maxVal) +{ + __m128 min0 = _mm_min_ps(v, maxVal); + __m128 max0 = _mm_max_ps(min0, minVal); + return max0; +} + +inline __m128 _mm_mix_ps(__m128 v1, __m128 v2, __m128 a) +{ + __m128 sub0 = _mm_sub_ps(glm::detail::one, a); + __m128 mul0 = _mm_mul_ps(v1, sub0); + __m128 mul1 = _mm_mul_ps(v2, a); + __m128 add0 = _mm_add_ps(mul0, mul1); + return add0; +} + +inline __m128 _mm_stp_ps(__m128 edge, __m128 x) +{ + __m128 cmp = _mm_cmple_ps(x, edge); + if(_mm_movemask_ps(cmp) == 0) + return glm::detail::one; + else + return glm::detail::zero; +} + +inline __m128 _mm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x) +{ + __m128 sub0 = _mm_sub_ps(x, edge0); + __m128 sub1 = _mm_sub_ps(edge1, edge0); + __m128 div0 = _mm_sub_ps(sub0, sub1); + __m128 clp0 = _mm_clp_ps(div0, glm::detail::zero, glm::detail::one); + __m128 mul0 = _mm_mul_ps(glm::detail::two, clp0); + __m128 sub2 = _mm_sub_ps(glm::detail::three, mul0); + __m128 mul1 = _mm_mul_ps(clp0, clp0); + __m128 mul2 = _mm_mul_ps(mul1, sub2); + return mul2; +} + +inline __m128 _mm_nan_ps(__m128 x) +{ + +} + +inline __m128 _mm_inf_ps(__m128 x) +{ + +} + +// SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration +// By Elan Ruskin, +inline __m128 _mm_sqrt_wip_ss(__m128 const x) +{ + __m128 recip = _mm_rsqrt_ss( x ); // "estimate" opcode + const static __m128 three = { 3, 3, 3, 3 }; // aligned consts for fast load + const static __m128 half = { 0.5,0.5,0.5,0.5 }; + __m128 halfrecip = _mm_mul_ss( half, recip ); + __m128 threeminus_xrr = _mm_sub_ss( three, _mm_mul_ss( x, _mm_mul_ss ( recip, recip ) ) ); + return _mm_mul_ss( halfrecip, threeminus_xrr ); +} diff --git a/glm/core/intrinsic_exponential.hpp b/glm/core/intrinsic_exponential.hpp new file mode 100644 index 00000000..a1dc450b --- /dev/null +++ b/glm/core/intrinsic_exponential.hpp @@ -0,0 +1,34 @@ +/* +inline __m128 _mm_rsqrt_nr_ss(__m128 const x) +{ + __m128 recip = _mm_rsqrt_ss( x ); // "estimate" opcode + const static __m128 three = { 3, 3, 3, 3 }; // aligned consts for fast load + const static __m128 half = { 0.5,0.5,0.5,0.5 }; + __m128 halfrecip = _mm_mul_ss( half, recip ); + __m128 threeminus_xrr = _mm_sub_ss( three, _mm_mul_ss( x, _mm_mul_ss ( recip, recip ) ) ); + return _mm_mul_ss( halfrecip, threeminus_xrr ); +} + +inline __m128 __mm_normalize_fast_ps( float * RESTRICT vOut, float * RESTRICT vIn ) +{ + __m128 x = _mm_load_ss(&vIn[0]); + __m128 y = _mm_load_ss(&vIn[1]); + __m128 z = _mm_load_ss(&vIn[2]); + + const __m128 l = // compute x*x + y*y + z*z + _mm_add_ss( + _mm_add_ss( _mm_mul_ss(x,x), + _mm_mul_ss(y,y) + ), + _mm_mul_ss( z, z ) + ); + + + const __m128 rsqt = _mm_rsqrt_nr_ss( l ); + _mm_store_ss( &vOut[0] , _mm_mul_ss( rsqt, x ) ); + _mm_store_ss( &vOut[1] , _mm_mul_ss( rsqt, y ) ); + _mm_store_ss( &vOut[2] , _mm_mul_ss( rsqt, z ) ); + + return _mm_mul_ss( l , rsqt ); +} +*/ \ No newline at end of file diff --git a/glm/core/intrinsic_exponential.inl b/glm/core/intrinsic_exponential.inl new file mode 100644 index 00000000..e69de29b diff --git a/glm/core/intrinsic_geometric.hpp b/glm/core/intrinsic_geometric.hpp new file mode 100644 index 00000000..c0e53438 --- /dev/null +++ b/glm/core/intrinsic_geometric.hpp @@ -0,0 +1,45 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-05-08 +// Updated : 2009-05-08 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_geometric.hpp +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef glm_core_intrinsic_geometric +#define glm_core_intrinsic_geometric + +#include "intrinsic_common.hpp" + +//length +__m128 _mm_len_ps(__m128 x); + +//distance +__m128 _mm_dst_ps(__m128 p0, __m128 p1); + +//dot +__m128 _mm_dot_ps(__m128 v1, __m128 v2); + +// SSE1 +__m128 _mm_dot_ss(__m128 v1, __m128 v2); + +//cross +__m128 _mm_xpd_ps(__m128 v1, __m128 v2); + +//normalize +__m128 _mm_nrm_ps(__m128 v); + +//faceforward +__m128 _mm_ffd_ps(__m128 N, __m128 I, __m128 Nref); + +//reflect +__m128 _mm_rfe_ps(__m128 I, __m128 N); + +//refract +__m128 _mm_rfa_ps(__m128 I, __m128 N, __m128 eta); + + +#include "intrinsic_geometric.inl" + +#endif//glm_core_intrinsic_geometric diff --git a/glm/core/intrinsic_geometric.inl b/glm/core/intrinsic_geometric.inl new file mode 100644 index 00000000..a85f98d0 --- /dev/null +++ b/glm/core/intrinsic_geometric.inl @@ -0,0 +1,117 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-05-08 +// Updated : 2009-05-08 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_geometric.inl +/////////////////////////////////////////////////////////////////////////////////////////////////// + +//length +inline __m128 _mm_len_ps(__m128 x) +{ + __m128 dot0 = _mm_dot_ps(x, x); + __m128 sqt0 = _mm_sqrt_ps(dot0); + return sqt0; +} + +//distance +inline __m128 _mm_dst_ps(__m128 p0, __m128 p1) +{ + __m128 sub0 = _mm_sub_ps(p0, p1); + __m128 len0 = _mm_len_ps(sub0); + return len0; +} + +//dot +inline __m128 _mm_dot_ps(__m128 v1, __m128 v2) +{ + __m128 mul0 = _mm_mul_ps(v1, v2); + __m128 swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); + __m128 add0 = _mm_add_ps(mul0, swp0); + __m128 swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); + __m128 add1 = _mm_add_ps(add0, swp1); + return add1; +} + +// SSE1 +inline __m128 _mm_dot_ss(__m128 v1, __m128 v2) +{ + __m128 mul0 = _mm_mul_ps(v1, v2); + __m128 mov0 = _mm_movehl_ps(mul0, mul0); + __m128 add0 = _mm_add_ps(mov0, mul0); + __m128 swp1 = _mm_shuffle_ps(add0, add0, 1); + __m128 add1 = _mm_add_ss(add0, swp1); + return add1; +} + +//cross +inline __m128 _mm_xpd_ps(__m128 v1, __m128 v2) +{ + __m128 swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 mul0 = _mm_mul_ps(swp0, swp3); + __m128 mul1 = _mm_mul_ps(swp1, swp2); + __m128 sub0 = _mm_sub_ps(mul0, mul1); + return sub0; +} + +//normalize +inline __m128 _mm_nrm_ps(__m128 v) +{ + __m128 dot0 = _mm_dot_ps(v, v); + __m128 isr0 = _mm_rsqrt_ps(dot0); + __m128 mul0 = _mm_mul_ps(v, isr0); + return mul0; +} + +//faceforward +inline __m128 _mm_ffd_ps(__m128 N, __m128 I, __m128 Nref) +{ + //__m128 dot0 = _mm_dot_ps(v, v); + //__m128 neg0 = _mm_neg_ps(N); + //__m128 sgn0 = _mm_sgn_ps(dot0); + //__m128 mix0 = _mm_mix_ps(N, neg0, sgn0); + //return mix0; + + __m128 dot0 = _mm_dot_ps(Nref, I); + __m128 sgn0 = _mm_sgn_ps(dot0); + __m128 mul0 = _mm_mul_ps(sgn0, glm::detail::minus_one); + __m128 mul1 = _mm_mul_ps(N, mul0); + return mul1; +} + +//reflect +inline __m128 _mm_rfe_ps(__m128 I, __m128 N) +{ + __m128 dot0 = _mm_dot_ps(N, I); + __m128 mul0 = _mm_mul_ps(N, I); + __m128 mul1 = _mm_mul_ps(mul0, glm::detail::two); + __m128 sub0 = _mm_sub_ps(I, mul1); + return sub0; +} + +//refract +inline __m128 _mm_rfa_ps(__m128 I, __m128 N, __m128 eta) +{ + __m128 dot0 = _mm_dot_ps(N, I); + __m128 mul0 = _mm_mul_ps(eta, eta); + __m128 mul1 = _mm_mul_ps(dot0, dot0); + __m128 sub0 = _mm_sub_ps(glm::detail::one, mul0); + __m128 sub1 = _mm_sub_ps(glm::detail::one, mul1); + __m128 mul2 = _mm_mul_ps(sub0, sub1); + + if(_mm_movemask_ps(_mm_cmplt_ss(mul2, glm::detail::zero)) == 0) + return glm::detail::zero; + + __m128 sqt0 = _mm_sqrt_ps(mul2); + __m128 mul3 = _mm_mul_ps(eta, dot0); + __m128 add0 = _mm_add_ps(mul3, sqt0); + __m128 mul4 = _mm_mul_ps(add0, N); + __m128 mul5 = _mm_mul_ps(eta, I); + __m128 sub2 = _mm_sub_ps(mul5, mul4); + + return sub2; +} diff --git a/glm/core/intrinsic_matrix.hpp b/glm/core/intrinsic_matrix.hpp new file mode 100644 index 00000000..b5bf575f --- /dev/null +++ b/glm/core/intrinsic_matrix.hpp @@ -0,0 +1,36 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-06-05 +// Updated : 2009-06-05 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_common.hpp +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_DETAIL_INTRINSIC_MATRIX_INCLUDED +#define GLM_DETAIL_INTRINSIC_MATRIX_INCLUDED + +#include "../glm.hpp" + +#include +#include + +void _mm_add_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]); + +void _mm_sub_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]); + +__m128 _mm_mul_ps(__m128 m[4], __m128 v); + +__m128 _mm_mul_ps(__m128 v, __m128 m[4]); + +void _mm_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]); + +void _mm_transpose_ps(__m128 const in[4], __m128 out[4]); + +void _mm_inverse_ps(__m128 const in[4], __m128 out[4]); + +void _mm_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4]); + +#include "intrinsic_matrix.inl" + +#endif//GLM_DETAIL_INTRINSIC_MATRIX_INCLUDED diff --git a/glm/core/intrinsic_matrix.inl b/glm/core/intrinsic_matrix.inl new file mode 100644 index 00000000..367b7929 --- /dev/null +++ b/glm/core/intrinsic_matrix.inl @@ -0,0 +1,704 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-06-05 +// Updated : 2009-06-05 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_common.inl +/////////////////////////////////////////////////////////////////////////////////////////////////// + +static const __m128 one = _mm_set_ps1(1.0f); +static const __m128 pi = _mm_set_ps1(3.141592653589793238462643383279f); +static const __m128 _m128_rad_ps = _mm_set_ps1(3.141592653589793238462643383279f / 180.f); +static const __m128 _m128_deg_ps = _mm_set_ps1(180.f / 3.141592653589793238462643383279f); + +inline void _mm_add_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]) +{ + { + out[0] = _mm_add_ps(in1[0], in2[0]); + out[1] = _mm_add_ps(in1[1], in2[1]); + out[2] = _mm_add_ps(in1[2], in2[2]); + out[3] = _mm_add_ps(in1[3], in2[3]); + } +} + +inline void _mm_sub_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]) +{ + { + out[0] = _mm_sub_ps(in1[0], in2[0]); + out[1] = _mm_sub_ps(in1[1], in2[1]); + out[2] = _mm_sub_ps(in1[2], in2[2]); + out[3] = _mm_sub_ps(in1[3], in2[3]); + } +} + +inline __m128 _mm_mul_ps(__m128 m[4], __m128 v) +{ + __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(m[0], v0); + __m128 m1 = _mm_mul_ps(m[1], v1); + __m128 m2 = _mm_mul_ps(m[2], v2); + __m128 m3 = _mm_mul_ps(m[3], v3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + return a2; +} + +inline __m128 _mm_mul_ps(__m128 v, __m128 m[4]) +{ + __m128 i0 = m[0]; + __m128 i1 = m[1]; + __m128 i2 = m[2]; + __m128 i3 = m[3]; + + __m128 m0 = _mm_mul_ps(v, i0); + __m128 m1 = _mm_mul_ps(v, i1); + __m128 m2 = _mm_mul_ps(v, i2); + __m128 m3 = _mm_mul_ps(v, i3); + + __m128 u0 = _mm_unpacklo_ps(m0, m1); + __m128 u1 = _mm_unpackhi_ps(m0, m1); + __m128 a0 = _mm_add_ps(u0, u1); + + __m128 u2 = _mm_unpacklo_ps(m2, m3); + __m128 u3 = _mm_unpackhi_ps(m2, m3); + __m128 a1 = _mm_add_ps(u2, u3); + + __m128 f0 = _mm_movelh_ps(a0, a1); + __m128 f1 = _mm_movehl_ps(a1, a0); + __m128 f2 = _mm_add_ps(f0, f1); + + return f2; +} + +inline void _mm_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]) +{ + { + __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[0] = a2; + } + + { + __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[1] = a2; + } + + { + __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[2] = a2; + } + + { + //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3)) + __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[3] = a2; + } +} + +inline void _mm_transpose_ps(__m128 const in[4], __m128 out[4]) +{ + __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); + __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); + __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44); + __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE); + + out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); + out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); + out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); + out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); +} + +inline void _mm_inverse_ps(__m128 const in[4], __m128 out[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = _mm_dot_ps(in[0], Row2); + __m128 Rcp0 = _mm_div_ps(one, Det0); + //__m128 Rcp0 = _mm_rcp_ps(Det0); + + // Inverse /= Determinant; + out[0] = _mm_mul_ps(Inv0, Rcp0); + out[1] = _mm_mul_ps(Inv1, Rcp0); + out[2] = _mm_mul_ps(Inv2, Rcp0); + out[3] = _mm_mul_ps(Inv3, Rcp0); +} + +inline void _mm_inverse_fast_ps(__m128 const in[4], __m128 out[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + + bool stop = true; + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = _mm_dot_ps(in[0], Row2); + __m128 Rcp0 = _mm_rcp_ps(Det0); + //__m128 Rcp0 = _mm_div_ps(one, Det0); + // Inverse /= Determinant; + out[0] = _mm_mul_ps(Inv0, Rcp0); + out[1] = _mm_mul_ps(Inv1, Rcp0); + out[2] = _mm_mul_ps(Inv2, Rcp0); + out[3] = _mm_mul_ps(Inv3, Rcp0); +} + + +void _mm_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) +{ + float a = glm::radians(Angle); + float c = cos(a); + float s = sin(a); + + glm::vec4 AxisA(v[0], v[1], v[2], float(0)); + __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x); + __m128 AxisC = _mm_nrm_ps(AxisB); + + __m128 Cos0 = _mm_set_ss(c); + __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Sin0 = _mm_set_ss(s); + __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0)); + + // detail::tvec3 temp = (valType(1) - c) * axis; + __m128 Temp0 = _mm_sub_ps(one, CosA); + __m128 Temp1 = _mm_mul_ps(Temp0, AxisC); + + //Rotate[0][0] = c + temp[0] * axis[0]; + //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; + //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; + __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC); + __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0)); + __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0); + __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f); + __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3)); + __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2); + __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3); + + //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; + //Rotate[1][1] = c + temp[1] * axis[1]; + //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; + __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC); + __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1)); + __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0); + __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f); + __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2)); + __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2); + __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3); + + //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; + //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; + //Rotate[2][2] = c + temp[2] * axis[2]; + __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC); + __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1)); + __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0); + __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f); + __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1)); + __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2); + __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3); + + __m128 Result[4]; + Result[0] = TmpA4; + Result[1] = TmpB4; + Result[2] = TmpC4; + Result[2] = _mm_set_ps(1, 0, 0, 0); + + //detail::tmat4x4 Result(detail::tmat4x4::null); + //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + //Result[3] = m[3]; + //return Result; + _mm_mul_ps(in, Result, out); +} diff --git a/glm/core/intrinsic_trigonometric.hpp b/glm/core/intrinsic_trigonometric.hpp new file mode 100644 index 00000000..e69de29b diff --git a/glm/core/intrinsic_trigonometric.inl b/glm/core/intrinsic_trigonometric.inl new file mode 100644 index 00000000..e69de29b diff --git a/glm/core/intrinsic_vector_relational.hpp b/glm/core/intrinsic_vector_relational.hpp new file mode 100644 index 00000000..11b3d181 --- /dev/null +++ b/glm/core/intrinsic_vector_relational.hpp @@ -0,0 +1,18 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-06-09 +// Updated : 2009-06-09 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_vector_relational.hpp +/////////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef GLM_DETAIL_INTRINSIC_VECTOR_RELATIONAL_INCLUDED +#define GLM_DETAIL_INTRINSIC_VECTOR_RELATIONAL_INCLUDED + +#include +#include + +#include "intrinsic_vector_relational.inl" + +#endif//GLM_DETAIL_INTRINSIC_VECTOR_RELATIONAL_INCLUDED diff --git a/glm/core/intrinsic_vector_relational.inl b/glm/core/intrinsic_vector_relational.inl new file mode 100644 index 00000000..28332e8d --- /dev/null +++ b/glm/core/intrinsic_vector_relational.inl @@ -0,0 +1,347 @@ +/////////////////////////////////////////////////////////////////////////////////////////////////// +// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net) +/////////////////////////////////////////////////////////////////////////////////////////////////// +// Created : 2009-06-09 +// Updated : 2009-06-09 +// Licence : This source is under MIT License +// File : glm/core/intrinsic_vector_relational.inl +/////////////////////////////////////////////////////////////////////////////////////////////////// +// +//// lessThan +//template +//inline typename detail::tvec2::bool_type lessThan +//( +// detail::tvec2 const & x, +// detail::tvec2 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec2::bool_type(x.x < y.x, x.y < y.y); +//} +// +//template +//inline typename detail::tvec3::bool_type lessThan +//( +// detail::tvec3 const & x, +// detail::tvec3 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec3::bool_type(x.x < y.x, x.y < y.y, x.z < y.z); +//} +// +//template +//inline typename detail::tvec4::bool_type lessThan +//( +// detail::tvec4 const & x, +// detail::tvec4 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec4::bool_type(x.x < y.x, x.y < y.y, x.z < y.z, x.w < y.w); +//} +// +//// lessThanEqual +//template +//inline typename detail::tvec2::bool_type lessThanEqual +//( +// detail::tvec2 const & x, +// detail::tvec2 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec2::bool_type(x.x <= y.x, x.y <= y.y); +//} +// +//template +//inline typename detail::tvec3::bool_type lessThanEqual +//( +// detail::tvec3 const & x, +// detail::tvec3 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec3::bool_type(x.x <= y.x, x.y <= y.y, x.z <= y.z); +//} +// +//template +//inline typename detail::tvec4::bool_type lessThanEqual +//( +// detail::tvec4 const & x, +// detail::tvec4 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec4::bool_type(x.x <= y.x, x.y <= y.y, x.z <= y.z, x.w <= y.w); +//} +// +//// greaterThan +//template +//inline typename detail::tvec2::bool_type greaterThan +//( +// detail::tvec2 const & x, +// detail::tvec2 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec2::bool_type(x.x > y.x, x.y > y.y); +//} +// +//template +//inline typename detail::tvec3::bool_type greaterThan +//( +// detail::tvec3 const & x, +// detail::tvec3 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec3::bool_type(x.x > y.x, x.y > y.y, x.z > y.z); +//} +// +//template +//inline typename detail::tvec4::bool_type greaterThan +//( +// detail::tvec4 const & x, +// detail::tvec4 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec4::bool_type(x.x > y.x, x.y > y.y, x.z > y.z, x.w > y.w); +//} +// +//// greaterThanEqual +//template +//inline typename detail::tvec2::bool_type greaterThanEqual +//( +// detail::tvec2 const & x, +// detail::tvec2 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec2::bool_type(x.x >= y.x, x.y >= y.y); +//} +// +//template +//inline typename detail::tvec3::bool_type greaterThanEqual +//( +// detail::tvec3 const & x, +// detail::tvec3 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec3::bool_type(x.x >= y.x, x.y >= y.y, x.z >= y.z); +//} +// +//template +//inline typename detail::tvec4::bool_type greaterThanEqual +//( +// detail::tvec4 const & x, +// detail::tvec4 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint); +// +// return typename detail::tvec4::bool_type(x.x >= y.x, x.y >= y.y, x.z >= y.z, x.w >= y.w); +//} +// +//// equal +//template +//inline typename detail::tvec2::bool_type equal +//( +// detail::tvec2 const & x, +// detail::tvec2 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint || +// detail::type::is_bool); +// +// return typename detail::tvec2::bool_type(x.x == y.x, x.y == y.y); +//} +// +//template +//inline typename detail::tvec3::bool_type equal +//( +// detail::tvec3 const & x, +// detail::tvec3 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint || +// detail::type::is_bool); +// +// return typename detail::tvec3::bool_type(x.x == y.x, x.y == y.y, x.z == y.z); +//} +// +//template +//inline typename detail::tvec4::bool_type equal +//( +// detail::tvec4 const & x, +// detail::tvec4 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint || +// detail::type::is_bool); +// +// return typename detail::tvec4::bool_type(x.x == y.x, x.y == y.y, x.z == y.z, x.w == y.w); +//} +// +//// notEqual +//template +//inline typename detail::tvec2::bool_type notEqual +//( +// detail::tvec2 const & x, +// detail::tvec2 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint || +// detail::type::is_bool); +// +// return typename detail::tvec2::bool_type(x.x != y.x, x.y != y.y); +//} +// +//template +//inline typename detail::tvec3::bool_type notEqual +//( +// detail::tvec3 const & x, +// detail::tvec3 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint || +// detail::type::is_bool); +// +// return typename detail::tvec3::bool_type(x.x != y.x, x.y != y.y, x.z != y.z); +//} +// +//template +//inline typename detail::tvec4::bool_type notEqual +//( +// detail::tvec4 const & x, +// detail::tvec4 const & y +//) +//{ +// GLM_STATIC_ASSERT( +// detail::type::is_float || +// detail::type::is_int || +// detail::type::is_uint || +// detail::type::is_bool); +// +// return typename detail::tvec4::bool_type(x.x != y.x, x.y != y.y, x.z != y.z, x.w != y.w); +//} +// +//// any +//inline bool any(detail::tvec2 const & x) +//{ +// return x.x || x.y; +//} +// +//inline bool any(detail::tvec3 const & x) +//{ +// return x.x || x.y || x.z; +//} +// +//inline bool any(detail::tvec4 const & x) +//{ +// return x.x || x.y || x.z || x.w; +//} +// +//// all +//inline bool all(const detail::tvec2& x) +//{ +// return x.x && x.y; +//} +// +//inline bool all(const detail::tvec3& x) +//{ +// return x.x && x.y && x.z; +//} +// +//inline bool all(const detail::tvec4& x) +//{ +// return x.x && x.y && x.z && x.w; +//} +// +//// not +//inline detail::tvec2::bool_type not_ +//( +// detail::tvec2 const & v +//) +//{ +// return detail::tvec2::bool_type(!v.x, !v.y); +//} +// +//inline detail::tvec3::bool_type not_ +//( +// detail::tvec3 const & v +//) +//{ +// return detail::tvec3::bool_type(!v.x, !v.y, !v.z); +//} +// +//inline detail::tvec4::bool_type not_ +//( +// detail::tvec4 const & v +//) +//{ +// return detail::tvec4::bool_type(!v.x, !v.y, !v.z, !v.w); +//} \ No newline at end of file