diff --git a/glm/detail/func_matrix_simd.inl b/glm/detail/func_matrix_simd.inl index f67ac66a..f9409f33 100644 --- a/glm/detail/func_matrix_simd.inl +++ b/glm/detail/func_matrix_simd.inl @@ -121,8 +121,10 @@ namespace glm { } #endif // CXX11 +namespace detail +{ template - struct detail::compute_inverse<4, 4, float, Q, true> + struct compute_inverse<4, 4, float, Q, true> { GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) { @@ -245,5 +247,6 @@ namespace glm { return r; } }; +}//namespace detail }//namespace glm #endif diff --git a/glm/detail/type_vec4_simd.inl b/glm/detail/type_vec4_simd.inl index 149c413e..0fcb64e9 100644 --- a/glm/detail/type_vec4_simd.inl +++ b/glm/detail/type_vec4_simd.inl @@ -499,7 +499,7 @@ namespace detail { vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) { - vec<4, uint, Q> Result; + vec<4, int, Q> Result; Result.data = vaddq_s32(a.data, b.data); return Result; } @@ -593,7 +593,7 @@ namespace detail { cmp = vpminq_u32(cmp, cmp); uint32_t r = cmp[0]; #else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); cmpx2 = vpmin_u32(cmpx2, cmpx2); uint32_t r = cmpx2[0]; #endif @@ -612,7 +612,7 @@ namespace detail { cmp = vpminq_u32(cmp, cmp); uint32_t r = cmp[0]; #else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); cmpx2 = vpmin_u32(cmpx2, cmpx2); uint32_t r = cmpx2[0]; #endif @@ -631,7 +631,7 @@ namespace detail { cmp = vpminq_u32(cmp, cmp); uint32_t r = cmp[0]; #else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); cmpx2 = vpmin_u32(cmpx2, cmpx2); uint32_t r = cmpx2[0]; #endif