diff --git a/glm/detail/func_matrix_simd.inl b/glm/detail/func_matrix_simd.inl index e76eafff..ce1d827f 100644 --- a/glm/detail/func_matrix_simd.inl +++ b/glm/detail/func_matrix_simd.inl @@ -14,7 +14,7 @@ namespace detail template struct compute_matrixCompMult<4, 4, float, Q, true> { - GLM_STATIC_ASSERT(detail::is_aligned

::value, "Specialization requires aligned"); + GLM_STATIC_ASSERT(detail::is_aligned::value, "Specialization requires aligned"); GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& x, mat<4, 4, float, Q> const& y) { diff --git a/glm/gtc/quaternion_simd.inl b/glm/gtc/quaternion_simd.inl index 4a8e4211..ae884b31 100644 --- a/glm/gtc/quaternion_simd.inl +++ b/glm/gtc/quaternion_simd.inl @@ -8,9 +8,9 @@ namespace detail { /* template - struct compute_quat_mul + struct compute_quat_mul { - static tquat call(tquat const& q1, tquat const& q2) + static tquat call(tquat const& q1, tquat const& q2) { // SSE2 STATS: 11 shuffle, 8 mul, 8 add // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps @@ -51,7 +51,7 @@ namespace detail // //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0)); - tquat Result; + tquat Result; _mm_store_ss(&Result.x, add4); _mm_store_ss(&Result.y, add5); _mm_store_ss(&Result.z, add6); @@ -62,7 +62,7 @@ namespace detail */ template - struct compute_dot, float, true> + struct compute_dot, float, true> { static GLM_FUNC_QUALIFIER float call(tquat const& x, tquat const& y) { @@ -71,11 +71,11 @@ namespace detail }; template - struct compute_quat_add + struct compute_quat_add { - static tquat call(tquat const& q, tquat const& p) + static tquat call(tquat const& q, tquat const& p) { - tquat Result; + tquat Result; Result.data = _mm_add_ps(q.data, p.data); return Result; } @@ -83,11 +83,11 @@ namespace detail # if GLM_ARCH & GLM_ARCH_AVX_BIT template - struct compute_quat_add + struct compute_quat_add { - static tquat call(tquat const& a, tquat const& b) + static tquat call(tquat const& a, tquat const& b) { - tquat Result; + tquat Result; Result.data = _mm256_add_pd(a.data, b.data); return Result; } @@ -95,11 +95,11 @@ namespace detail # endif template - struct compute_quat_sub + struct compute_quat_sub { - static tquat call(tquat const& q, tquat const& p) + static tquat call(tquat const& q, tquat const& p) { - vec<4, float, P> Result; + vec<4, float, Q> Result; Result.data = _mm_sub_ps(q.data, p.data); return Result; } @@ -107,11 +107,11 @@ namespace detail # if GLM_ARCH & GLM_ARCH_AVX_BIT template - struct compute_quat_sub + struct compute_quat_sub { - static tquat call(tquat const& a, tquat const& b) + static tquat call(tquat const& a, tquat const& b) { - tquat Result; + tquat Result; Result.data = _mm256_sub_pd(a.data, b.data); return Result; } @@ -119,11 +119,11 @@ namespace detail # endif template - struct compute_quat_mul_scalar + struct compute_quat_mul_scalar { - static tquat call(tquat const& q, float s) + static tquat call(tquat const& q, float s) { - vec<4, float, P> Result; + vec<4, float, Q> Result; Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s)); return Result; } @@ -131,11 +131,11 @@ namespace detail # if GLM_ARCH & GLM_ARCH_AVX_BIT template - struct compute_quat_mul_scalar + struct compute_quat_mul_scalar { - static tquat call(tquat const& q, double s) + static tquat call(tquat const& q, double s) { - tquat Result; + tquat Result; Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s)); return Result; } @@ -143,11 +143,11 @@ namespace detail # endif template - struct compute_quat_div_scalar + struct compute_quat_div_scalar { - static tquat call(tquat const& q, float s) + static tquat call(tquat const& q, float s) { - vec<4, float, P> Result; + vec<4, float, Q> Result; Result.data = _mm_div_ps(q.data, _mm_set_ps1(s)); return Result; } @@ -155,11 +155,11 @@ namespace detail # if GLM_ARCH & GLM_ARCH_AVX_BIT template - struct compute_quat_div_scalar + struct compute_quat_div_scalar { - static tquat call(tquat const& q, double s) + static tquat call(tquat const& q, double s) { - tquat Result; + tquat Result; Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s)); return Result; } @@ -167,9 +167,9 @@ namespace detail # endif template - struct compute_quat_mul_vec4 + struct compute_quat_mul_vec4 { - static vec<4, float, P> call(tquat const& q, vec<4, float, Q> const& v) + static vec<4, float, Q> call(tquat const& q, vec<4, float, Q> const& v) { __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3)); __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1)); @@ -186,7 +186,7 @@ namespace detail uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); uuv = _mm_mul_ps(uuv, two); - vec<4, float, P> Result; + vec<4, float, Q> Result; Result.data = _mm_add_ps(v.Data, _mm_add_ps(uv, uuv)); return Result; }