
Currently arm_neon.h emits C-style casts to do vector type casts. This relies on implicit conversion between vector types to be enabled, which is currently deprecated behaviour and soon will disappear. To ensure NEON code will keep working afterwards, this patch changes all this vector type casts into bitcasts. Co-authored-by: Momchil Velikov <momchil.velikov@arm.com>
4057 lines
177 KiB
C
4057 lines
177 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
|
|
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
|
|
// RUN: -disable-O0-optnone -emit-llvm -o - %s \
|
|
// RUN: | opt -S -passes=mem2reg,sroa | FileCheck %s
|
|
|
|
// REQUIRES: aarch64-registered-target || arm-registered-target
|
|
|
|
#include <arm_neon.h>
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vceqz_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCEQZ_I]]
|
|
//
|
|
uint8x8_t test_vceqz_s8(int8x8_t a) {
|
|
return vceqz_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vceqz_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VCEQZ_I]]
|
|
//
|
|
uint16x4_t test_vceqz_s16(int16x4_t a) {
|
|
return vceqz_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vceqz_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCEQZ_I]]
|
|
//
|
|
uint32x2_t test_vceqz_s32(int32x2_t a) {
|
|
return vceqz_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vceqz_s64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x1_t test_vceqz_s64(int64x1_t a) {
|
|
return vceqz_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vceqz_u64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x1_t test_vceqz_u64(uint64x1_t a) {
|
|
return vceqz_u64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vceqz_p64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x1_t test_vceqz_p64(poly64x1_t a) {
|
|
return vceqz_p64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vceqzq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCEQZ_I]]
|
|
//
|
|
uint8x16_t test_vceqzq_s8(int8x16_t a) {
|
|
return vceqzq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vceqzq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[VCEQZ_I]]
|
|
//
|
|
uint16x8_t test_vceqzq_s16(int16x8_t a) {
|
|
return vceqzq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vceqzq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCEQZ_I]]
|
|
//
|
|
uint32x4_t test_vceqzq_s32(int32x4_t a) {
|
|
return vceqzq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vceqzq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x2_t test_vceqzq_s64(int64x2_t a) {
|
|
return vceqzq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vceqz_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCEQZ_I]]
|
|
//
|
|
uint8x8_t test_vceqz_u8(uint8x8_t a) {
|
|
return vceqz_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vceqz_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VCEQZ_I]]
|
|
//
|
|
uint16x4_t test_vceqz_u16(uint16x4_t a) {
|
|
return vceqz_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vceqz_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCEQZ_I]]
|
|
//
|
|
uint32x2_t test_vceqz_u32(uint32x2_t a) {
|
|
return vceqz_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vceqzq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCEQZ_I]]
|
|
//
|
|
uint8x16_t test_vceqzq_u8(uint8x16_t a) {
|
|
return vceqzq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vceqzq_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[VCEQZ_I]]
|
|
//
|
|
uint16x8_t test_vceqzq_u16(uint16x8_t a) {
|
|
return vceqzq_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vceqzq_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCEQZ_I]]
|
|
//
|
|
uint32x4_t test_vceqzq_u32(uint32x4_t a) {
|
|
return vceqzq_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vceqzq_u64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x2_t test_vceqzq_u64(uint64x2_t a) {
|
|
return vceqzq_u64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vceqz_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <2 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCEQZ_I]]
|
|
//
|
|
uint32x2_t test_vceqz_f32(float32x2_t a) {
|
|
return vceqz_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vceqz_f64(
|
|
// CHECK-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
|
|
// CHECK-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <1 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x1_t test_vceqz_f64(float64x1_t a) {
|
|
return vceqz_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vceqzq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <4 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCEQZ_I]]
|
|
//
|
|
uint32x4_t test_vceqzq_f32(float32x4_t a) {
|
|
return vceqzq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vceqz_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCEQZ_I]]
|
|
//
|
|
uint8x8_t test_vceqz_p8(poly8x8_t a) {
|
|
return vceqz_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vceqzq_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCEQZ_I]]
|
|
//
|
|
uint8x16_t test_vceqzq_p8(poly8x16_t a) {
|
|
return vceqzq_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vceqzq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <2 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x2_t test_vceqzq_f64(float64x2_t a) {
|
|
return vceqzq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vceqzq_p64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCEQZ_I]]
|
|
//
|
|
uint64x2_t test_vceqzq_p64(poly64x2_t a) {
|
|
return vceqzq_p64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcgez_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCGEZ_I]]
|
|
//
|
|
uint8x8_t test_vcgez_s8(int8x8_t a) {
|
|
return vcgez_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vcgez_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VCGEZ_I]]
|
|
//
|
|
uint16x4_t test_vcgez_s16(int16x4_t a) {
|
|
return vcgez_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcgez_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <2 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCGEZ_I]]
|
|
//
|
|
uint32x2_t test_vcgez_s32(int32x2_t a) {
|
|
return vcgez_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vcgez_s64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCGEZ_I]]
|
|
//
|
|
uint64x1_t test_vcgez_s64(int64x1_t a) {
|
|
return vcgez_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vcgezq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sge <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCGEZ_I]]
|
|
//
|
|
uint8x16_t test_vcgezq_s8(int8x16_t a) {
|
|
return vcgezq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vcgezq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[VCGEZ_I]]
|
|
//
|
|
uint16x8_t test_vcgezq_s16(int16x8_t a) {
|
|
return vcgezq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcgezq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCGEZ_I]]
|
|
//
|
|
uint32x4_t test_vcgezq_s32(int32x4_t a) {
|
|
return vcgezq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcgezq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sge <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCGEZ_I]]
|
|
//
|
|
uint64x2_t test_vcgezq_s64(int64x2_t a) {
|
|
return vcgezq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcgez_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oge <2 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCGEZ_I]]
|
|
//
|
|
uint32x2_t test_vcgez_f32(float32x2_t a) {
|
|
return vcgez_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vcgez_f64(
|
|
// CHECK-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
|
|
// CHECK-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oge <1 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCGEZ_I]]
|
|
//
|
|
uint64x1_t test_vcgez_f64(float64x1_t a) {
|
|
return vcgez_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcgezq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oge <4 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCGEZ_I]]
|
|
//
|
|
uint32x4_t test_vcgezq_f32(float32x4_t a) {
|
|
return vcgezq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcgezq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp oge <2 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCGEZ_I]]
|
|
//
|
|
uint64x2_t test_vcgezq_f64(float64x2_t a) {
|
|
return vcgezq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vclez_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCLEZ_I]]
|
|
//
|
|
uint8x8_t test_vclez_s8(int8x8_t a) {
|
|
return vclez_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vclez_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VCLEZ_I]]
|
|
//
|
|
uint16x4_t test_vclez_s16(int16x4_t a) {
|
|
return vclez_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vclez_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <2 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCLEZ_I]]
|
|
//
|
|
uint32x2_t test_vclez_s32(int32x2_t a) {
|
|
return vclez_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vclez_s64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCLEZ_I]]
|
|
//
|
|
uint64x1_t test_vclez_s64(int64x1_t a) {
|
|
return vclez_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vclezq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sle <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCLEZ_I]]
|
|
//
|
|
uint8x16_t test_vclezq_s8(int8x16_t a) {
|
|
return vclezq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vclezq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[VCLEZ_I]]
|
|
//
|
|
uint16x8_t test_vclezq_s16(int16x8_t a) {
|
|
return vclezq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vclezq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCLEZ_I]]
|
|
//
|
|
uint32x4_t test_vclezq_s32(int32x4_t a) {
|
|
return vclezq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vclezq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sle <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCLEZ_I]]
|
|
//
|
|
uint64x2_t test_vclezq_s64(int64x2_t a) {
|
|
return vclezq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vclez_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ole <2 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCLEZ_I]]
|
|
//
|
|
uint32x2_t test_vclez_f32(float32x2_t a) {
|
|
return vclez_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vclez_f64(
|
|
// CHECK-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
|
|
// CHECK-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ole <1 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCLEZ_I]]
|
|
//
|
|
uint64x1_t test_vclez_f64(float64x1_t a) {
|
|
return vclez_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vclezq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ole <4 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCLEZ_I]]
|
|
//
|
|
uint32x4_t test_vclezq_f32(float32x4_t a) {
|
|
return vclezq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vclezq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ole <2 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLEZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCLEZ_I]]
|
|
//
|
|
uint64x2_t test_vclezq_f64(float64x2_t a) {
|
|
return vclezq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcgtz_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCGTZ_I]]
|
|
//
|
|
uint8x8_t test_vcgtz_s8(int8x8_t a) {
|
|
return vcgtz_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vcgtz_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VCGTZ_I]]
|
|
//
|
|
uint16x4_t test_vcgtz_s16(int16x4_t a) {
|
|
return vcgtz_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcgtz_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCGTZ_I]]
|
|
//
|
|
uint32x2_t test_vcgtz_s32(int32x2_t a) {
|
|
return vcgtz_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vcgtz_s64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCGTZ_I]]
|
|
//
|
|
uint64x1_t test_vcgtz_s64(int64x1_t a) {
|
|
return vcgtz_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vcgtzq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCGTZ_I]]
|
|
//
|
|
uint8x16_t test_vcgtzq_s8(int8x16_t a) {
|
|
return vcgtzq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vcgtzq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[VCGTZ_I]]
|
|
//
|
|
uint16x8_t test_vcgtzq_s16(int16x8_t a) {
|
|
return vcgtzq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcgtzq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCGTZ_I]]
|
|
//
|
|
uint32x4_t test_vcgtzq_s32(int32x4_t a) {
|
|
return vcgtzq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcgtzq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCGTZ_I]]
|
|
//
|
|
uint64x2_t test_vcgtzq_s64(int64x2_t a) {
|
|
return vcgtzq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcgtz_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <2 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCGTZ_I]]
|
|
//
|
|
uint32x2_t test_vcgtz_f32(float32x2_t a) {
|
|
return vcgtz_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vcgtz_f64(
|
|
// CHECK-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
|
|
// CHECK-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <1 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCGTZ_I]]
|
|
//
|
|
uint64x1_t test_vcgtz_f64(float64x1_t a) {
|
|
return vcgtz_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcgtzq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <4 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCGTZ_I]]
|
|
//
|
|
uint32x4_t test_vcgtzq_f32(float32x4_t a) {
|
|
return vcgtzq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcgtzq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp ogt <2 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCGTZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCGTZ_I]]
|
|
//
|
|
uint64x2_t test_vcgtzq_f64(float64x2_t a) {
|
|
return vcgtzq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcltz_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <8 x i1> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VCLTZ_I]]
|
|
//
|
|
uint8x8_t test_vcltz_s8(int8x8_t a) {
|
|
return vcltz_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vcltz_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VCLTZ_I]]
|
|
//
|
|
uint16x4_t test_vcltz_s16(int16x4_t a) {
|
|
return vcltz_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcltz_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCLTZ_I]]
|
|
//
|
|
uint32x2_t test_vcltz_s32(int32x2_t a) {
|
|
return vcltz_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vcltz_s64(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <1 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCLTZ_I]]
|
|
//
|
|
uint64x1_t test_vcltz_s64(int64x1_t a) {
|
|
return vcltz_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vcltzq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <16 x i1> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: ret <16 x i8> [[VCLTZ_I]]
|
|
//
|
|
uint8x16_t test_vcltzq_s8(int8x16_t a) {
|
|
return vcltzq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vcltzq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[VCLTZ_I]]
|
|
//
|
|
uint16x8_t test_vcltzq_s16(int16x8_t a) {
|
|
return vcltzq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcltzq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCLTZ_I]]
|
|
//
|
|
uint32x4_t test_vcltzq_s32(int32x4_t a) {
|
|
return vcltzq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcltzq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i64> [[TMP1]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCLTZ_I]]
|
|
//
|
|
uint64x2_t test_vcltzq_s64(int64x2_t a) {
|
|
return vcltzq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcltz_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <2 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VCLTZ_I]]
|
|
//
|
|
uint32x2_t test_vcltz_f32(float32x2_t a) {
|
|
return vcltz_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vcltz_f64(
|
|
// CHECK-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
|
|
// CHECK-NEXT: [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x i64> undef, i64 [[TMP0]], i32 0
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <1 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64>
|
|
// CHECK-NEXT: ret <1 x i64> [[VCLTZ_I]]
|
|
//
|
|
uint64x1_t test_vcltz_f64(float64x1_t a) {
|
|
return vcltz_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcltzq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <4 x float> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[VCLTZ_I]]
|
|
//
|
|
uint32x4_t test_vcltzq_f32(float32x4_t a) {
|
|
return vcltzq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcltzq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <2 x double> [[TMP2]], zeroinitializer
|
|
// CHECK-NEXT: [[VCLTZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[VCLTZ_I]]
|
|
//
|
|
uint64x2_t test_vcltzq_f64(float64x2_t a) {
|
|
return vcltzq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev16_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x8_t test_vrev16_s8(int8x8_t a) {
|
|
return vrev16_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev16_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x8_t test_vrev16_u8(uint8x8_t a) {
|
|
return vrev16_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev16_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
poly8x8_t test_vrev16_p8(poly8x8_t a) {
|
|
return vrev16_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev16q_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x16_t test_vrev16q_s8(int8x16_t a) {
|
|
return vrev16q_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev16q_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x16_t test_vrev16q_u8(uint8x16_t a) {
|
|
return vrev16q_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev16q_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
poly8x16_t test_vrev16q_p8(poly8x16_t a) {
|
|
return vrev16q_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev32_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x8_t test_vrev32_s8(int8x8_t a) {
|
|
return vrev32_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vrev32_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[A]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
// CHECK-NEXT: ret <4 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x4_t test_vrev32_s16(int16x4_t a) {
|
|
return vrev32_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev32_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x8_t test_vrev32_u8(uint8x8_t a) {
|
|
return vrev32_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vrev32_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[A]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
// CHECK-NEXT: ret <4 x i16> [[SHUFFLE_I]]
|
|
//
|
|
uint16x4_t test_vrev32_u16(uint16x4_t a) {
|
|
return vrev32_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev32_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
poly8x8_t test_vrev32_p8(poly8x8_t a) {
|
|
return vrev32_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vrev32_p16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[A]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
// CHECK-NEXT: ret <4 x i16> [[SHUFFLE_I]]
|
|
//
|
|
poly16x4_t test_vrev32_p16(poly16x4_t a) {
|
|
return vrev32_p16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev32q_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x16_t test_vrev32q_s8(int8x16_t a) {
|
|
return vrev32q_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vrev32q_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x8_t test_vrev32q_s16(int16x8_t a) {
|
|
return vrev32q_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev32q_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x16_t test_vrev32q_u8(uint8x16_t a) {
|
|
return vrev32q_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vrev32q_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
uint16x8_t test_vrev32q_u16(uint16x8_t a) {
|
|
return vrev32q_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev32q_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
poly8x16_t test_vrev32q_p8(poly8x16_t a) {
|
|
return vrev32q_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vrev32q_p16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
poly16x8_t test_vrev32q_p16(poly16x8_t a) {
|
|
return vrev32q_p16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev64_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x8_t test_vrev64_s8(int8x8_t a) {
|
|
return vrev64_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vrev64_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[A]], <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
// CHECK-NEXT: ret <4 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x4_t test_vrev64_s16(int16x4_t a) {
|
|
return vrev64_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vrev64_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[A]], <2 x i32> <i32 1, i32 0>
|
|
// CHECK-NEXT: ret <2 x i32> [[SHUFFLE_I]]
|
|
//
|
|
int32x2_t test_vrev64_s32(int32x2_t a) {
|
|
return vrev64_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev64_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x8_t test_vrev64_u8(uint8x8_t a) {
|
|
return vrev64_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vrev64_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[A]], <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
// CHECK-NEXT: ret <4 x i16> [[SHUFFLE_I]]
|
|
//
|
|
uint16x4_t test_vrev64_u16(uint16x4_t a) {
|
|
return vrev64_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vrev64_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[A]], <2 x i32> <i32 1, i32 0>
|
|
// CHECK-NEXT: ret <2 x i32> [[SHUFFLE_I]]
|
|
//
|
|
uint32x2_t test_vrev64_u32(uint32x2_t a) {
|
|
return vrev64_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrev64_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[A]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
// CHECK-NEXT: ret <8 x i8> [[SHUFFLE_I]]
|
|
//
|
|
poly8x8_t test_vrev64_p8(poly8x8_t a) {
|
|
return vrev64_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vrev64_p16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[A]], <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
// CHECK-NEXT: ret <4 x i16> [[SHUFFLE_I]]
|
|
//
|
|
poly16x4_t test_vrev64_p16(poly16x4_t a) {
|
|
return vrev64_p16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vrev64_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[A]], <2 x i32> <i32 1, i32 0>
|
|
// CHECK-NEXT: ret <2 x float> [[SHUFFLE_I]]
|
|
//
|
|
float32x2_t test_vrev64_f32(float32x2_t a) {
|
|
return vrev64_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev64q_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x16_t test_vrev64q_s8(int8x16_t a) {
|
|
return vrev64q_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vrev64q_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x8_t test_vrev64q_s16(int16x8_t a) {
|
|
return vrev64q_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vrev64q_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> [[A]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
int32x4_t test_vrev64q_s32(int32x4_t a) {
|
|
return vrev64q_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev64q_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x16_t test_vrev64q_u8(uint8x16_t a) {
|
|
return vrev64q_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vrev64q_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
uint16x8_t test_vrev64q_u16(uint16x8_t a) {
|
|
return vrev64q_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vrev64q_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> [[A]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
uint32x4_t test_vrev64q_u32(uint32x4_t a) {
|
|
return vrev64q_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrev64q_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
poly8x16_t test_vrev64q_p8(poly8x16_t a) {
|
|
return vrev64q_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vrev64q_p16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
poly16x8_t test_vrev64q_p16(poly16x8_t a) {
|
|
return vrev64q_p16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vrev64q_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[A]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
// CHECK-NEXT: ret <4 x float> [[SHUFFLE_I]]
|
|
//
|
|
float32x4_t test_vrev64q_f32(float32x4_t a) {
|
|
return vrev64q_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vpaddl_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <4 x i16> [[VPADDL_I]]
|
|
//
|
|
int16x4_t test_vpaddl_s8(int8x8_t a) {
|
|
return vpaddl_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vpaddl_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VPADDL1_I]]
|
|
//
|
|
int32x2_t test_vpaddl_s16(int16x4_t a) {
|
|
return vpaddl_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vpaddl_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <1 x i64> [[VPADDL1_I]]
|
|
//
|
|
int64x1_t test_vpaddl_s32(int32x2_t a) {
|
|
return vpaddl_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vpaddl_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <4 x i16> [[VPADDL_I]]
|
|
//
|
|
uint16x4_t test_vpaddl_u8(uint8x8_t a) {
|
|
return vpaddl_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vpaddl_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VPADDL1_I]]
|
|
//
|
|
uint32x2_t test_vpaddl_u16(uint16x4_t a) {
|
|
return vpaddl_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vpaddl_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <1 x i64> [[VPADDL1_I]]
|
|
//
|
|
uint64x1_t test_vpaddl_u32(uint32x2_t a) {
|
|
return vpaddl_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vpaddlq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i16> [[VPADDL_I]]
|
|
//
|
|
int16x8_t test_vpaddlq_s8(int8x16_t a) {
|
|
return vpaddlq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vpaddlq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VPADDL1_I]]
|
|
//
|
|
int32x4_t test_vpaddlq_s16(int16x8_t a) {
|
|
return vpaddlq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vpaddlq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VPADDL1_I]]
|
|
//
|
|
int64x2_t test_vpaddlq_s32(int32x4_t a) {
|
|
return vpaddlq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vpaddlq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i16> [[VPADDL_I]]
|
|
//
|
|
uint16x8_t test_vpaddlq_u8(uint8x16_t a) {
|
|
return vpaddlq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vpaddlq_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VPADDL1_I]]
|
|
//
|
|
uint32x4_t test_vpaddlq_u16(uint16x8_t a) {
|
|
return vpaddlq_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vpaddlq_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADDL_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VPADDL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[VPADDL_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VPADDL1_I]]
|
|
//
|
|
uint64x2_t test_vpaddlq_u32(uint32x4_t a) {
|
|
return vpaddlq_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vpadal_s8(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> [[B]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = add <4 x i16> [[VPADAL_I]], [[TMP1]]
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP2]]
|
|
//
|
|
int16x4_t test_vpadal_s8(int16x4_t a, int8x8_t b) {
|
|
return vpadal_s8(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vpadal_s16(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP3]]
|
|
//
|
|
int32x2_t test_vpadal_s16(int32x2_t a, int16x4_t b) {
|
|
return vpadal_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vpadal_s32(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <1 x i64> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <1 x i64> [[TMP3]]
|
|
//
|
|
int64x1_t test_vpadal_s32(int64x1_t a, int32x2_t b) {
|
|
return vpadal_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vpadal_u8(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> [[B]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = add <4 x i16> [[VPADAL_I]], [[TMP1]]
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP2]]
|
|
//
|
|
uint16x4_t test_vpadal_u8(uint16x4_t a, uint8x8_t b) {
|
|
return vpadal_u8(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vpadal_u16(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP3]]
|
|
//
|
|
uint32x2_t test_vpadal_u16(uint32x2_t a, uint16x4_t b) {
|
|
return vpadal_u16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <1 x i64> @test_vpadal_u32(
|
|
// CHECK-SAME: <1 x i64> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <1 x i64> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <1 x i64> [[TMP3]]
|
|
//
|
|
uint64x1_t test_vpadal_u32(uint64x1_t a, uint32x2_t b) {
|
|
return vpadal_u32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vpadalq_s8(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> [[B]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = add <8 x i16> [[VPADAL_I]], [[TMP1]]
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
int16x8_t test_vpadalq_s8(int16x8_t a, int8x16_t b) {
|
|
return vpadalq_s8(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vpadalq_s16(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <4 x i32> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
|
|
//
|
|
int32x4_t test_vpadalq_s16(int32x4_t a, int16x8_t b) {
|
|
return vpadalq_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vpadalq_s32(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <2 x i64> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <2 x i64> [[TMP3]]
|
|
//
|
|
int64x2_t test_vpadalq_s32(int64x2_t a, int32x4_t b) {
|
|
return vpadalq_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vpadalq_u8(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> [[B]])
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = add <8 x i16> [[VPADAL_I]], [[TMP1]]
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
//
|
|
uint16x8_t test_vpadalq_u8(uint16x8_t a, uint8x16_t b) {
|
|
return vpadalq_u8(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vpadalq_u16(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <4 x i32> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
|
|
//
|
|
uint32x4_t test_vpadalq_u16(uint32x4_t a, uint16x8_t b) {
|
|
return vpadalq_u16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vpadalq_u32(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VPADAL_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VPADAL1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> [[VPADAL_I]])
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = add <2 x i64> [[VPADAL1_I]], [[TMP2]]
|
|
// CHECK-NEXT: ret <2 x i64> [[TMP3]]
|
|
//
|
|
uint64x2_t test_vpadalq_u32(uint64x2_t a, uint32x4_t b) {
|
|
return vpadalq_u32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vqabs_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VQABS_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VQABS_V_I]]
|
|
//
|
|
int8x8_t test_vqabs_s8(int8x8_t a) {
|
|
return vqabs_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vqabsq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VQABSQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqabs.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VQABSQ_V_I]]
|
|
//
|
|
int8x16_t test_vqabsq_s8(int8x16_t a) {
|
|
return vqabsq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vqabs_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VQABS_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VQABS_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16> [[VQABS_V_I]])
|
|
// CHECK-NEXT: [[VQABS_V2_I:%.*]] = bitcast <4 x i16> [[VQABS_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQABS_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vqabs_s16(int16x4_t a) {
|
|
return vqabs_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vqabsq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQABSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQABSQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16> [[VQABSQ_V_I]])
|
|
// CHECK-NEXT: [[VQABSQ_V2_I:%.*]] = bitcast <8 x i16> [[VQABSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VQABSQ_V2_I]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vqabsq_s16(int16x8_t a) {
|
|
return vqabsq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vqabs_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VQABS_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VQABS_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32> [[VQABS_V_I]])
|
|
// CHECK-NEXT: [[VQABS_V2_I:%.*]] = bitcast <2 x i32> [[VQABS_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQABS_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vqabs_s32(int32x2_t a) {
|
|
return vqabs_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vqabsq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQABSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQABSQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32> [[VQABSQ_V_I]])
|
|
// CHECK-NEXT: [[VQABSQ_V2_I:%.*]] = bitcast <4 x i32> [[VQABSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VQABSQ_V2_I]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vqabsq_s32(int32x4_t a) {
|
|
return vqabsq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vqabsq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQABSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQABSQ_V1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqabs.v2i64(<2 x i64> [[VQABSQ_V_I]])
|
|
// CHECK-NEXT: [[VQABSQ_V2_I:%.*]] = bitcast <2 x i64> [[VQABSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VQABSQ_V2_I]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[TMP1]]
|
|
//
|
|
int64x2_t test_vqabsq_s64(int64x2_t a) {
|
|
return vqabsq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vqneg_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VQNEG_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VQNEG_V_I]]
|
|
//
|
|
int8x8_t test_vqneg_s8(int8x8_t a) {
|
|
return vqneg_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vqnegq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VQNEGQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqneg.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VQNEGQ_V_I]]
|
|
//
|
|
int8x16_t test_vqnegq_s8(int8x16_t a) {
|
|
return vqnegq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vqneg_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VQNEG_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VQNEG_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16> [[VQNEG_V_I]])
|
|
// CHECK-NEXT: [[VQNEG_V2_I:%.*]] = bitcast <4 x i16> [[VQNEG_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQNEG_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vqneg_s16(int16x4_t a) {
|
|
return vqneg_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vqnegq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQNEGQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQNEGQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> [[VQNEGQ_V_I]])
|
|
// CHECK-NEXT: [[VQNEGQ_V2_I:%.*]] = bitcast <8 x i16> [[VQNEGQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VQNEGQ_V2_I]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vqnegq_s16(int16x8_t a) {
|
|
return vqnegq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vqneg_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VQNEG_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VQNEG_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32> [[VQNEG_V_I]])
|
|
// CHECK-NEXT: [[VQNEG_V2_I:%.*]] = bitcast <2 x i32> [[VQNEG_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQNEG_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vqneg_s32(int32x2_t a) {
|
|
return vqneg_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vqnegq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQNEGQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQNEGQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32> [[VQNEGQ_V_I]])
|
|
// CHECK-NEXT: [[VQNEGQ_V2_I:%.*]] = bitcast <4 x i32> [[VQNEGQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VQNEGQ_V2_I]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vqnegq_s32(int32x4_t a) {
|
|
return vqnegq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vqnegq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQNEGQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQNEGQ_V1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqneg.v2i64(<2 x i64> [[VQNEGQ_V_I]])
|
|
// CHECK-NEXT: [[VQNEGQ_V2_I:%.*]] = bitcast <2 x i64> [[VQNEGQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VQNEGQ_V2_I]] to <2 x i64>
|
|
// CHECK-NEXT: ret <2 x i64> [[TMP1]]
|
|
//
|
|
int64x2_t test_vqnegq_s64(int64x2_t a) {
|
|
return vqnegq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vneg_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <8 x i8> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <8 x i8> [[SUB_I]]
|
|
//
|
|
int8x8_t test_vneg_s8(int8x8_t a) {
|
|
return vneg_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vnegq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <16 x i8> [[SUB_I]]
|
|
//
|
|
int8x16_t test_vnegq_s8(int8x16_t a) {
|
|
return vnegq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vneg_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <4 x i16> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <4 x i16> [[SUB_I]]
|
|
//
|
|
int16x4_t test_vneg_s16(int16x4_t a) {
|
|
return vneg_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vnegq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <8 x i16> [[SUB_I]]
|
|
//
|
|
int16x8_t test_vnegq_s16(int16x8_t a) {
|
|
return vnegq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vneg_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <2 x i32> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <2 x i32> [[SUB_I]]
|
|
//
|
|
int32x2_t test_vneg_s32(int32x2_t a) {
|
|
return vneg_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vnegq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <4 x i32> [[SUB_I]]
|
|
//
|
|
int32x4_t test_vnegq_s32(int32x4_t a) {
|
|
return vnegq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vnegq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SUB_I:%.*]] = sub <2 x i64> zeroinitializer, [[A]]
|
|
// CHECK-NEXT: ret <2 x i64> [[SUB_I]]
|
|
//
|
|
int64x2_t test_vnegq_s64(int64x2_t a) {
|
|
return vnegq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vneg_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[FNEG_I:%.*]] = fneg <2 x float> [[A]]
|
|
// CHECK-NEXT: ret <2 x float> [[FNEG_I]]
|
|
//
|
|
float32x2_t test_vneg_f32(float32x2_t a) {
|
|
return vneg_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vnegq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[FNEG_I:%.*]] = fneg <4 x float> [[A]]
|
|
// CHECK-NEXT: ret <4 x float> [[FNEG_I]]
|
|
//
|
|
float32x4_t test_vnegq_f32(float32x4_t a) {
|
|
return vnegq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vnegq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[FNEG_I:%.*]] = fneg <2 x double> [[A]]
|
|
// CHECK-NEXT: ret <2 x double> [[FNEG_I]]
|
|
//
|
|
float64x2_t test_vnegq_f64(float64x2_t a) {
|
|
return vnegq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vabs_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.abs.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VABS_I]]
|
|
//
|
|
int8x8_t test_vabs_s8(int8x8_t a) {
|
|
return vabs_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vabsq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.abs.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VABS_I]]
|
|
//
|
|
int8x16_t test_vabsq_s8(int8x16_t a) {
|
|
return vabsq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vabs_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.abs.v4i16(<4 x i16> [[VABS_I]])
|
|
// CHECK-NEXT: ret <4 x i16> [[VABS1_I]]
|
|
//
|
|
int16x4_t test_vabs_s16(int16x4_t a) {
|
|
return vabs_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vabsq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.abs.v8i16(<8 x i16> [[VABS_I]])
|
|
// CHECK-NEXT: ret <8 x i16> [[VABS1_I]]
|
|
//
|
|
int16x8_t test_vabsq_s16(int16x8_t a) {
|
|
return vabsq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vabs_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.abs.v2i32(<2 x i32> [[VABS_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VABS1_I]]
|
|
//
|
|
int32x2_t test_vabs_s32(int32x2_t a) {
|
|
return vabs_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vabsq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.abs.v4i32(<4 x i32> [[VABS_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VABS1_I]]
|
|
//
|
|
int32x4_t test_vabsq_s32(int32x4_t a) {
|
|
return vabsq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vabsq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.abs.v2i64(<2 x i64> [[VABS_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VABS1_I]]
|
|
//
|
|
int64x2_t test_vabsq_s64(int64x2_t a) {
|
|
return vabsq_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vabs_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[VABS_I]])
|
|
// CHECK-NEXT: ret <2 x float> [[VABS1_I]]
|
|
//
|
|
float32x2_t test_vabs_f32(float32x2_t a) {
|
|
return vabs_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vabsq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[VABS_I]])
|
|
// CHECK-NEXT: ret <4 x float> [[VABS1_I]]
|
|
//
|
|
float32x4_t test_vabsq_f32(float32x4_t a) {
|
|
return vabsq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vabsq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VABS_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VABS1_I:%.*]] = call <2 x double> @llvm.fabs.v2f64(<2 x double> [[VABS_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VABS1_I]]
|
|
//
|
|
float64x2_t test_vabsq_f64(float64x2_t a) {
|
|
return vabsq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vuqadd_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VUQADD_I]]
|
|
//
|
|
int8x8_t test_vuqadd_s8(int8x8_t a, int8x8_t b) {
|
|
return vuqadd_s8(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vuqaddq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VUQADD_I]]
|
|
//
|
|
int8x16_t test_vuqaddq_s8(int8x16_t a, int8x16_t b) {
|
|
return vuqaddq_s8(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vuqadd_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VUQADD1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[VUQADD2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> [[VUQADD_I]], <4 x i16> [[VUQADD1_I]])
|
|
// CHECK-NEXT: ret <4 x i16> [[VUQADD2_I]]
|
|
//
|
|
int16x4_t test_vuqadd_s16(int16x4_t a, int16x4_t b) {
|
|
return vuqadd_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vuqaddq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VUQADD1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
|
|
// CHECK-NEXT: [[VUQADD2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> [[VUQADD_I]], <8 x i16> [[VUQADD1_I]])
|
|
// CHECK-NEXT: ret <8 x i16> [[VUQADD2_I]]
|
|
//
|
|
int16x8_t test_vuqaddq_s16(int16x8_t a, int16x8_t b) {
|
|
return vuqaddq_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vuqadd_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VUQADD1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: [[VUQADD2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> [[VUQADD_I]], <2 x i32> [[VUQADD1_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VUQADD2_I]]
|
|
//
|
|
int32x2_t test_vuqadd_s32(int32x2_t a, int32x2_t b) {
|
|
return vuqadd_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vuqaddq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VUQADD1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VUQADD2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> [[VUQADD_I]], <4 x i32> [[VUQADD1_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VUQADD2_I]]
|
|
//
|
|
int32x4_t test_vuqaddq_s32(int32x4_t a, int32x4_t b) {
|
|
return vuqaddq_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vuqaddq_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VUQADD_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VUQADD1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
|
|
// CHECK-NEXT: [[VUQADD2_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> [[VUQADD_I]], <2 x i64> [[VUQADD1_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VUQADD2_I]]
|
|
//
|
|
int64x2_t test_vuqaddq_s64(int64x2_t a, int64x2_t b) {
|
|
return vuqaddq_s64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcls_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLS_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VCLS_V_I]]
|
|
//
|
|
int8x8_t test_vcls_s8(int8x8_t a) {
|
|
return vcls_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcls_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLS_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VCLS_V_I]]
|
|
//
|
|
int8x8_t test_vcls_u8(uint8x8_t a) {
|
|
return vcls_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vclsq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLSQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VCLSQ_V_I]]
|
|
//
|
|
int8x16_t test_vclsq_s8(int8x16_t a) {
|
|
return vclsq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vclsq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLSQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VCLSQ_V_I]]
|
|
//
|
|
int8x16_t test_vclsq_u8(uint8x16_t a) {
|
|
return vclsq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vcls_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLS_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VCLS_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> [[VCLS_V_I]])
|
|
// CHECK-NEXT: [[VCLS_V2_I:%.*]] = bitcast <4 x i16> [[VCLS_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLS_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vcls_s16(int16x4_t a) {
|
|
return vcls_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vcls_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLS_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VCLS_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> [[VCLS_V_I]])
|
|
// CHECK-NEXT: [[VCLS_V2_I:%.*]] = bitcast <4 x i16> [[VCLS_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLS_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vcls_u16(uint16x4_t a) {
|
|
return vcls_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vclsq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VCLSQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> [[VCLSQ_V_I]])
|
|
// CHECK-NEXT: [[VCLSQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLSQ_V2_I]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vclsq_s16(int16x8_t a) {
|
|
return vclsq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vclsq_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VCLSQ_V1_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> [[VCLSQ_V_I]])
|
|
// CHECK-NEXT: [[VCLSQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLSQ_V2_I]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vclsq_u16(uint16x8_t a) {
|
|
return vclsq_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcls_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLS_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VCLS_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> [[VCLS_V_I]])
|
|
// CHECK-NEXT: [[VCLS_V2_I:%.*]] = bitcast <2 x i32> [[VCLS_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLS_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vcls_s32(int32x2_t a) {
|
|
return vcls_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcls_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLS_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VCLS_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> [[VCLS_V_I]])
|
|
// CHECK-NEXT: [[VCLS_V2_I:%.*]] = bitcast <2 x i32> [[VCLS_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLS_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vcls_u32(uint32x2_t a) {
|
|
return vcls_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vclsq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VCLSQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> [[VCLSQ_V_I]])
|
|
// CHECK-NEXT: [[VCLSQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLSQ_V2_I]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vclsq_s32(int32x4_t a) {
|
|
return vclsq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vclsq_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLSQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VCLSQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> [[VCLSQ_V_I]])
|
|
// CHECK-NEXT: [[VCLSQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLSQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLSQ_V2_I]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vclsq_u32(uint32x4_t a) {
|
|
return vclsq_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vclz_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLZ_V_I:%.*]] = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> [[A]], i1 false)
|
|
// CHECK-NEXT: ret <8 x i8> [[VCLZ_V_I]]
|
|
//
|
|
int8x8_t test_vclz_s8(int8x8_t a) {
|
|
return vclz_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vclzq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLZQ_V_I:%.*]] = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> [[A]], i1 false)
|
|
// CHECK-NEXT: ret <16 x i8> [[VCLZQ_V_I]]
|
|
//
|
|
int8x16_t test_vclzq_s8(int8x16_t a) {
|
|
return vclzq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vclz_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLZ_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VCLZ_V1_I:%.*]] = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> [[VCLZ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZ_V2_I:%.*]] = bitcast <4 x i16> [[VCLZ_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLZ_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vclz_s16(int16x4_t a) {
|
|
return vclz_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vclzq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLZQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VCLZQ_V1_I:%.*]] = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> [[VCLZQ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLZQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLZQ_V2_I]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
int16x8_t test_vclzq_s16(int16x8_t a) {
|
|
return vclzq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vclz_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLZ_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VCLZ_V1_I:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[VCLZ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZ_V2_I:%.*]] = bitcast <2 x i32> [[VCLZ_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLZ_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vclz_s32(int32x2_t a) {
|
|
return vclz_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vclzq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLZQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VCLZQ_V1_I:%.*]] = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> [[VCLZQ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLZQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLZQ_V2_I]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
int32x4_t test_vclzq_s32(int32x4_t a) {
|
|
return vclzq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vclz_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLZ_V_I:%.*]] = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> [[A]], i1 false)
|
|
// CHECK-NEXT: ret <8 x i8> [[VCLZ_V_I]]
|
|
//
|
|
uint8x8_t test_vclz_u8(uint8x8_t a) {
|
|
return vclz_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vclzq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCLZQ_V_I:%.*]] = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> [[A]], i1 false)
|
|
// CHECK-NEXT: ret <16 x i8> [[VCLZQ_V_I]]
|
|
//
|
|
uint8x16_t test_vclzq_u8(uint8x16_t a) {
|
|
return vclzq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vclz_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLZ_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[VCLZ_V1_I:%.*]] = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> [[VCLZ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZ_V2_I:%.*]] = bitcast <4 x i16> [[VCLZ_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLZ_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
uint16x4_t test_vclz_u16(uint16x4_t a) {
|
|
return vclz_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vclzq_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLZQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VCLZQ_V1_I:%.*]] = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> [[VCLZQ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZQ_V2_I:%.*]] = bitcast <8 x i16> [[VCLZQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLZQ_V2_I]] to <8 x i16>
|
|
// CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
//
|
|
uint16x8_t test_vclzq_u16(uint16x8_t a) {
|
|
return vclzq_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vclz_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCLZ_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VCLZ_V1_I:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[VCLZ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZ_V2_I:%.*]] = bitcast <2 x i32> [[VCLZ_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VCLZ_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
uint32x2_t test_vclz_u32(uint32x2_t a) {
|
|
return vclz_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vclzq_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCLZQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VCLZQ_V1_I:%.*]] = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> [[VCLZQ_V_I]], i1 false)
|
|
// CHECK-NEXT: [[VCLZQ_V2_I:%.*]] = bitcast <4 x i32> [[VCLZQ_V1_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[VCLZQ_V2_I]] to <4 x i32>
|
|
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
//
|
|
uint32x4_t test_vclzq_u32(uint32x4_t a) {
|
|
return vclzq_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcnt_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCNT_V_I:%.*]] = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VCNT_V_I]]
|
|
//
|
|
int8x8_t test_vcnt_s8(int8x8_t a) {
|
|
return vcnt_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vcntq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCNTQ_V_I:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VCNTQ_V_I]]
|
|
//
|
|
int8x16_t test_vcntq_s8(int8x16_t a) {
|
|
return vcntq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcnt_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCNT_V_I:%.*]] = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VCNT_V_I]]
|
|
//
|
|
uint8x8_t test_vcnt_u8(uint8x8_t a) {
|
|
return vcnt_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vcntq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCNTQ_V_I:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VCNTQ_V_I]]
|
|
//
|
|
uint8x16_t test_vcntq_u8(uint8x16_t a) {
|
|
return vcntq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vcnt_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCNT_V_I:%.*]] = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VCNT_V_I]]
|
|
//
|
|
poly8x8_t test_vcnt_p8(poly8x8_t a) {
|
|
return vcnt_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vcntq_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VCNTQ_V_I:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VCNTQ_V_I]]
|
|
//
|
|
poly8x16_t test_vcntq_p8(poly8x16_t a) {
|
|
return vcntq_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vmvn_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <8 x i8> [[A]], splat (i8 -1)
|
|
// CHECK-NEXT: ret <8 x i8> [[NOT_I]]
|
|
//
|
|
int8x8_t test_vmvn_s8(int8x8_t a) {
|
|
return vmvn_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vmvnq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <16 x i8> [[A]], splat (i8 -1)
|
|
// CHECK-NEXT: ret <16 x i8> [[NOT_I]]
|
|
//
|
|
int8x16_t test_vmvnq_s8(int8x16_t a) {
|
|
return vmvnq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vmvn_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <4 x i16> [[A]], splat (i16 -1)
|
|
// CHECK-NEXT: ret <4 x i16> [[NOT_I]]
|
|
//
|
|
int16x4_t test_vmvn_s16(int16x4_t a) {
|
|
return vmvn_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vmvnq_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <8 x i16> [[A]], splat (i16 -1)
|
|
// CHECK-NEXT: ret <8 x i16> [[NOT_I]]
|
|
//
|
|
int16x8_t test_vmvnq_s16(int16x8_t a) {
|
|
return vmvnq_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vmvn_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <2 x i32> [[A]], splat (i32 -1)
|
|
// CHECK-NEXT: ret <2 x i32> [[NOT_I]]
|
|
//
|
|
int32x2_t test_vmvn_s32(int32x2_t a) {
|
|
return vmvn_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vmvnq_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <4 x i32> [[A]], splat (i32 -1)
|
|
// CHECK-NEXT: ret <4 x i32> [[NOT_I]]
|
|
//
|
|
int32x4_t test_vmvnq_s32(int32x4_t a) {
|
|
return vmvnq_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vmvn_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <8 x i8> [[A]], splat (i8 -1)
|
|
// CHECK-NEXT: ret <8 x i8> [[NOT_I]]
|
|
//
|
|
uint8x8_t test_vmvn_u8(uint8x8_t a) {
|
|
return vmvn_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vmvnq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <16 x i8> [[A]], splat (i8 -1)
|
|
// CHECK-NEXT: ret <16 x i8> [[NOT_I]]
|
|
//
|
|
uint8x16_t test_vmvnq_u8(uint8x16_t a) {
|
|
return vmvnq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vmvn_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <4 x i16> [[A]], splat (i16 -1)
|
|
// CHECK-NEXT: ret <4 x i16> [[NOT_I]]
|
|
//
|
|
uint16x4_t test_vmvn_u16(uint16x4_t a) {
|
|
return vmvn_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vmvnq_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <8 x i16> [[A]], splat (i16 -1)
|
|
// CHECK-NEXT: ret <8 x i16> [[NOT_I]]
|
|
//
|
|
uint16x8_t test_vmvnq_u16(uint16x8_t a) {
|
|
return vmvnq_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vmvn_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <2 x i32> [[A]], splat (i32 -1)
|
|
// CHECK-NEXT: ret <2 x i32> [[NOT_I]]
|
|
//
|
|
uint32x2_t test_vmvn_u32(uint32x2_t a) {
|
|
return vmvn_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vmvnq_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <4 x i32> [[A]], splat (i32 -1)
|
|
// CHECK-NEXT: ret <4 x i32> [[NOT_I]]
|
|
//
|
|
uint32x4_t test_vmvnq_u32(uint32x4_t a) {
|
|
return vmvnq_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vmvn_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <8 x i8> [[A]], splat (i8 -1)
|
|
// CHECK-NEXT: ret <8 x i8> [[NOT_I]]
|
|
//
|
|
poly8x8_t test_vmvn_p8(poly8x8_t a) {
|
|
return vmvn_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vmvnq_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[NOT_I:%.*]] = xor <16 x i8> [[A]], splat (i8 -1)
|
|
// CHECK-NEXT: ret <16 x i8> [[NOT_I]]
|
|
//
|
|
poly8x16_t test_vmvnq_p8(poly8x16_t a) {
|
|
return vmvnq_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrbit_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VRBIT_I:%.*]] = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VRBIT_I]]
|
|
//
|
|
int8x8_t test_vrbit_s8(int8x8_t a) {
|
|
return vrbit_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrbitq_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VRBIT_I:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VRBIT_I]]
|
|
//
|
|
int8x16_t test_vrbitq_s8(int8x16_t a) {
|
|
return vrbitq_s8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrbit_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VRBIT_I:%.*]] = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VRBIT_I]]
|
|
//
|
|
uint8x8_t test_vrbit_u8(uint8x8_t a) {
|
|
return vrbit_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrbitq_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VRBIT_I:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VRBIT_I]]
|
|
//
|
|
uint8x16_t test_vrbitq_u8(uint8x16_t a) {
|
|
return vrbitq_u8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vrbit_p8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VRBIT_I:%.*]] = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> [[A]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VRBIT_I]]
|
|
//
|
|
poly8x8_t test_vrbit_p8(poly8x8_t a) {
|
|
return vrbit_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vrbitq_p8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[VRBIT_I:%.*]] = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> [[A]])
|
|
// CHECK-NEXT: ret <16 x i8> [[VRBIT_I]]
|
|
//
|
|
poly8x16_t test_vrbitq_p8(poly8x16_t a) {
|
|
return vrbitq_p8(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vmovn_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VMOVN_I:%.*]] = trunc <8 x i16> [[TMP1]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VMOVN_I]]
|
|
//
|
|
int8x8_t test_vmovn_s16(int16x8_t a) {
|
|
return vmovn_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vmovn_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VMOVN_I:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VMOVN_I]]
|
|
//
|
|
int16x4_t test_vmovn_s32(int32x4_t a) {
|
|
return vmovn_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vmovn_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VMOVN_I:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VMOVN_I]]
|
|
//
|
|
int32x2_t test_vmovn_s64(int64x2_t a) {
|
|
return vmovn_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vmovn_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VMOVN_I:%.*]] = trunc <8 x i16> [[TMP1]] to <8 x i8>
|
|
// CHECK-NEXT: ret <8 x i8> [[VMOVN_I]]
|
|
//
|
|
uint8x8_t test_vmovn_u16(uint16x8_t a) {
|
|
return vmovn_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vmovn_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VMOVN_I:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[VMOVN_I]]
|
|
//
|
|
uint16x4_t test_vmovn_u32(uint32x4_t a) {
|
|
return vmovn_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vmovn_u64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VMOVN_I:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[VMOVN_I]]
|
|
//
|
|
uint32x2_t test_vmovn_u64(uint64x2_t a) {
|
|
return vmovn_u64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vmovn_high_s16(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VMOVN_I_I:%.*]] = trunc <8 x i16> [[TMP1]] to <8 x i8>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[VMOVN_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x16_t test_vmovn_high_s16(int8x8_t a, int16x8_t b) {
|
|
return vmovn_high_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vmovn_high_s32(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VMOVN_I_I:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[VMOVN_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x8_t test_vmovn_high_s32(int16x4_t a, int32x4_t b) {
|
|
return vmovn_high_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vmovn_high_s64(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VMOVN_I_I:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[VMOVN_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
int32x4_t test_vmovn_high_s64(int32x2_t a, int64x2_t b) {
|
|
return vmovn_high_s64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vmovn_high_u16(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VMOVN_I_I:%.*]] = trunc <8 x i16> [[TMP1]] to <8 x i8>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[VMOVN_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x16_t test_vmovn_high_u16(int8x8_t a, int16x8_t b) {
|
|
return vmovn_high_u16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vmovn_high_u32(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VMOVN_I_I:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[VMOVN_I_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x8_t test_vmovn_high_u32(int16x4_t a, int32x4_t b) {
|
|
return vmovn_high_u32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vmovn_high_u64(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VMOVN_I_I:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[VMOVN_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
int32x4_t test_vmovn_high_u64(int32x2_t a, int64x2_t b) {
|
|
return vmovn_high_u64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vqmovun_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVUN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQMOVUN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> [[VQMOVUN_V_I]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VQMOVUN_V1_I]]
|
|
//
|
|
int8x8_t test_vqmovun_s16(int16x8_t a) {
|
|
return vqmovun_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vqmovun_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVUN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQMOVUN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> [[VQMOVUN_V_I]])
|
|
// CHECK-NEXT: [[VQMOVUN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVUN_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVUN_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vqmovun_s32(int32x4_t a) {
|
|
return vqmovun_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vqmovun_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVUN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQMOVUN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> [[VQMOVUN_V_I]])
|
|
// CHECK-NEXT: [[VQMOVUN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVUN_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVUN_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vqmovun_s64(int64x2_t a) {
|
|
return vqmovun_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vqmovun_high_s16(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVUN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQMOVUN_V1_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> [[VQMOVUN_V_I_I]])
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[VQMOVUN_V1_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x16_t test_vqmovun_high_s16(uint8x8_t a, int16x8_t b) {
|
|
return vqmovun_high_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vqmovun_high_s32(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVUN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQMOVUN_V1_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> [[VQMOVUN_V_I_I]])
|
|
// CHECK-NEXT: [[VQMOVUN_V2_I_I:%.*]] = bitcast <4 x i16> [[VQMOVUN_V1_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVUN_V2_I_I]] to <4 x i16>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
uint16x8_t test_vqmovun_high_s32(uint16x4_t a, int32x4_t b) {
|
|
return vqmovun_high_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vqmovun_high_s64(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVUN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQMOVUN_V1_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> [[VQMOVUN_V_I_I]])
|
|
// CHECK-NEXT: [[VQMOVUN_V2_I_I:%.*]] = bitcast <2 x i32> [[VQMOVUN_V1_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVUN_V2_I_I]] to <2 x i32>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
uint32x4_t test_vqmovun_high_s64(uint32x2_t a, int64x2_t b) {
|
|
return vqmovun_high_s64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vqmovn_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> [[VQMOVN_V_I]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VQMOVN_V1_I]]
|
|
//
|
|
int8x8_t test_vqmovn_s16(int16x8_t a) {
|
|
return vqmovn_s16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vqmovn_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> [[VQMOVN_V_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
int16x4_t test_vqmovn_s32(int32x4_t a) {
|
|
return vqmovn_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vqmovn_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> [[VQMOVN_V_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
int32x2_t test_vqmovn_s64(int64x2_t a) {
|
|
return vqmovn_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vqmovn_high_s16(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> [[VQMOVN_V_I_I]])
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[VQMOVN_V1_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
int8x16_t test_vqmovn_high_s16(int8x8_t a, int16x8_t b) {
|
|
return vqmovn_high_s16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vqmovn_high_s32(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> [[VQMOVN_V_I_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I_I]] to <4 x i16>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
int16x8_t test_vqmovn_high_s32(int16x4_t a, int32x4_t b) {
|
|
return vqmovn_high_s32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vqmovn_high_s64(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> [[VQMOVN_V_I_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I_I]] to <2 x i32>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
int32x4_t test_vqmovn_high_s64(int32x2_t a, int64x2_t b) {
|
|
return vqmovn_high_s64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i8> @test_vqmovn_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> [[VQMOVN_V_I]])
|
|
// CHECK-NEXT: ret <8 x i8> [[VQMOVN_V1_I]]
|
|
//
|
|
uint8x8_t test_vqmovn_u16(uint16x8_t a) {
|
|
return vqmovn_u16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i16> @test_vqmovn_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> [[VQMOVN_V_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I]] to <4 x i16>
|
|
// CHECK-NEXT: ret <4 x i16> [[TMP1]]
|
|
//
|
|
uint16x4_t test_vqmovn_u32(uint32x4_t a) {
|
|
return vqmovn_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vqmovn_u64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> [[VQMOVN_V_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I]] to <2 x i32>
|
|
// CHECK-NEXT: ret <2 x i32> [[TMP1]]
|
|
//
|
|
uint32x2_t test_vqmovn_u64(uint64x2_t a) {
|
|
return vqmovn_u64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <16 x i8> @test_vqmovn_high_u16(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> [[VQMOVN_V_I_I]])
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> [[VQMOVN_V1_I_I]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: ret <16 x i8> [[SHUFFLE_I]]
|
|
//
|
|
uint8x16_t test_vqmovn_high_u16(uint8x8_t a, uint16x8_t b) {
|
|
return vqmovn_high_u16(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vqmovn_high_u32(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> [[VQMOVN_V_I_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I_I:%.*]] = bitcast <4 x i16> [[VQMOVN_V1_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I_I]] to <4 x i16>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> [[A]], <4 x i16> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: ret <8 x i16> [[SHUFFLE_I]]
|
|
//
|
|
uint16x8_t test_vqmovn_high_u32(uint16x4_t a, uint32x4_t b) {
|
|
return vqmovn_high_u32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vqmovn_high_u64(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
|
|
// CHECK-NEXT: [[VQMOVN_V_I_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VQMOVN_V1_I_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> [[VQMOVN_V_I_I]])
|
|
// CHECK-NEXT: [[VQMOVN_V2_I_I:%.*]] = bitcast <2 x i32> [[VQMOVN_V1_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[VQMOVN_V2_I_I]] to <2 x i32>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x i32> [[SHUFFLE_I]]
|
|
//
|
|
uint32x4_t test_vqmovn_high_u64(uint32x2_t a, uint64x2_t b) {
|
|
return vqmovn_high_u64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vshll_n_s8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = sext <8 x i8> [[A]] to <8 x i16>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
|
|
// CHECK-NEXT: ret <8 x i16> [[VSHLL_N]]
|
|
//
|
|
int16x8_t test_vshll_n_s8(int8x8_t a) {
|
|
return vshll_n_s8(a, 8);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vshll_n_s16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
|
|
// CHECK-NEXT: ret <4 x i32> [[VSHLL_N]]
|
|
//
|
|
int32x4_t test_vshll_n_s16(int16x4_t a) {
|
|
return vshll_n_s16(a, 16);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vshll_n_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
|
|
// CHECK-NEXT: ret <2 x i64> [[VSHLL_N]]
|
|
//
|
|
int64x2_t test_vshll_n_s32(int32x2_t a) {
|
|
return vshll_n_s32(a, 32);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vshll_n_u8(
|
|
// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext <8 x i8> [[A]] to <8 x i16>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
|
|
// CHECK-NEXT: ret <8 x i16> [[VSHLL_N]]
|
|
//
|
|
uint16x8_t test_vshll_n_u8(uint8x8_t a) {
|
|
return vshll_n_u8(a, 8);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vshll_n_u16(
|
|
// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
|
|
// CHECK-NEXT: ret <4 x i32> [[VSHLL_N]]
|
|
//
|
|
uint32x4_t test_vshll_n_u16(uint16x4_t a) {
|
|
return vshll_n_u16(a, 16);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vshll_n_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
|
|
// CHECK-NEXT: ret <2 x i64> [[VSHLL_N]]
|
|
//
|
|
uint64x2_t test_vshll_n_u32(uint32x2_t a) {
|
|
return vshll_n_u32(a, 32);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vshll_high_n_s8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = sext <8 x i8> [[SHUFFLE_I]] to <8 x i16>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
|
|
// CHECK-NEXT: ret <8 x i16> [[VSHLL_N]]
|
|
//
|
|
int16x8_t test_vshll_high_n_s8(int8x16_t a) {
|
|
return vshll_high_n_s8(a, 8);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vshll_high_n_s16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
|
|
// CHECK-NEXT: ret <4 x i32> [[VSHLL_N]]
|
|
//
|
|
int32x4_t test_vshll_high_n_s16(int16x8_t a) {
|
|
return vshll_high_n_s16(a, 16);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vshll_high_n_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> [[A]], <2 x i32> <i32 2, i32 3>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i32> [[TMP1]] to <2 x i64>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
|
|
// CHECK-NEXT: ret <2 x i64> [[VSHLL_N]]
|
|
//
|
|
int64x2_t test_vshll_high_n_s32(int32x4_t a) {
|
|
return vshll_high_n_s32(a, 32);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x i16> @test_vshll_high_n_u8(
|
|
// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> [[A]], <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = zext <8 x i8> [[SHUFFLE_I]] to <8 x i16>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <8 x i16> [[TMP0]], splat (i16 8)
|
|
// CHECK-NEXT: ret <8 x i16> [[VSHLL_N]]
|
|
//
|
|
uint16x8_t test_vshll_high_n_u8(uint8x16_t a) {
|
|
return vshll_high_n_u8(a, 8);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vshll_high_n_u16(
|
|
// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> [[A]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[SHUFFLE_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <4 x i32> [[TMP2]], splat (i32 16)
|
|
// CHECK-NEXT: ret <4 x i32> [[VSHLL_N]]
|
|
//
|
|
uint32x4_t test_vshll_high_n_u16(uint16x8_t a) {
|
|
return vshll_high_n_u16(a, 16);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vshll_high_n_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> [[A]], <2 x i32> <i32 2, i32 3>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[SHUFFLE_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
|
|
// CHECK-NEXT: [[VSHLL_N:%.*]] = shl <2 x i64> [[TMP2]], splat (i64 32)
|
|
// CHECK-NEXT: ret <2 x i64> [[VSHLL_N]]
|
|
//
|
|
uint64x2_t test_vshll_high_n_u32(uint32x4_t a) {
|
|
return vshll_high_n_u32(a, 32);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x half> @test_vcvt_f16_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVT_F16_F32_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVT_F16_F321_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float> [[VCVT_F16_F32_I]])
|
|
// CHECK-NEXT: [[VCVT_F16_F322_I:%.*]] = bitcast <4 x i16> [[VCVT_F16_F321_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[VCVT_F16_F322_I]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP2]] to <4 x half>
|
|
// CHECK-NEXT: ret <4 x half> [[TMP3]]
|
|
//
|
|
float16x4_t test_vcvt_f16_f32(float32x4_t a) {
|
|
return vcvt_f16_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <8 x half> @test_vcvt_high_f16_f32(
|
|
// CHECK-SAME: <4 x half> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[B]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVT_F16_F32_I_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVT_F16_F321_I_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.vcvtfp2hf(<4 x float> [[VCVT_F16_F32_I_I]])
|
|
// CHECK-NEXT: [[VCVT_F16_F322_I_I:%.*]] = bitcast <4 x i16> [[VCVT_F16_F321_I_I]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[VCVT_F16_F322_I_I]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP2]] to <4 x half>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> [[A]], <4 x half> [[TMP3]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: ret <8 x half> [[SHUFFLE_I]]
|
|
//
|
|
float16x8_t test_vcvt_high_f16_f32(float16x4_t a, float32x4_t b) {
|
|
return vcvt_high_f16_f32(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vcvt_f32_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = fptrunc <2 x double> [[TMP2]] to <2 x float>
|
|
// CHECK-NEXT: ret <2 x float> [[VCVT_I]]
|
|
//
|
|
float32x2_t test_vcvt_f32_f64(float64x2_t a) {
|
|
return vcvt_f32_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vcvt_high_f32_f64(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[B]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVT_I_I:%.*]] = fptrunc <2 x double> [[TMP2]] to <2 x float>
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[VCVT_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x float> [[SHUFFLE_I]]
|
|
//
|
|
float32x4_t test_vcvt_high_f32_f64(float32x2_t a, float64x2_t b) {
|
|
return vcvt_high_f32_f64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vcvtx_f32_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTX_F32_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTX_F32_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> [[VCVTX_F32_V_I]])
|
|
// CHECK-NEXT: ret <2 x float> [[VCVTX_F32_V1_I]]
|
|
//
|
|
float32x2_t test_vcvtx_f32_f64(float64x2_t a) {
|
|
return vcvtx_f32_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vcvtx_high_f32_f64(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[B]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTX_F32_V_I_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTX_F32_V1_I_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> [[VCVTX_F32_V_I_I]])
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x float> [[A]], <2 x float> [[VCVTX_F32_V1_I_I]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
// CHECK-NEXT: ret <4 x float> [[SHUFFLE_I]]
|
|
//
|
|
float32x4_t test_vcvtx_high_f32_f64(float32x2_t a, float64x2_t b) {
|
|
return vcvtx_high_f32_f64(a, b);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vcvt_f32_f16(
|
|
// CHECK-SAME: <4 x half> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVT_F32_F16_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[VCVT_F32_F161_I:%.*]] = call <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16> [[VCVT_F32_F16_I]])
|
|
// CHECK-NEXT: [[VCVT_F32_F162_I:%.*]] = bitcast <4 x float> [[VCVT_F32_F161_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[VCVT_F32_F162_I]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to <4 x float>
|
|
// CHECK-NEXT: ret <4 x float> [[TMP3]]
|
|
//
|
|
float32x4_t test_vcvt_f32_f16(float16x4_t a) {
|
|
return vcvt_f32_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vcvt_high_f32_f16(
|
|
// CHECK-SAME: <8 x half> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> [[A]], <8 x half> [[A]], <4 x i32> <i32 4, i32 5, i32 6, i32 7>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[SHUFFLE_I]] to <4 x i16>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVT_F32_F16_I_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
|
|
// CHECK-NEXT: [[VCVT_F32_F161_I_I:%.*]] = call <4 x float> @llvm.aarch64.neon.vcvthf2fp(<4 x i16> [[VCVT_F32_F16_I_I]])
|
|
// CHECK-NEXT: [[VCVT_F32_F162_I_I:%.*]] = bitcast <4 x float> [[VCVT_F32_F161_I_I]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[VCVT_F32_F162_I_I]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to <4 x float>
|
|
// CHECK-NEXT: ret <4 x float> [[TMP3]]
|
|
//
|
|
float32x4_t test_vcvt_high_f32_f16(float16x8_t a) {
|
|
return vcvt_high_f32_f16(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vcvt_f64_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
|
|
// CHECK-NEXT: ret <2 x double> [[VCVT_I]]
|
|
//
|
|
float64x2_t test_vcvt_f64_f32(float32x2_t a) {
|
|
return vcvt_f64_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vcvt_high_f64_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[A]], <2 x i32> <i32 2, i32 3>
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[SHUFFLE_I]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVT_I_I:%.*]] = fpext <2 x float> [[TMP2]] to <2 x double>
|
|
// CHECK-NEXT: ret <2 x double> [[VCVT_I_I]]
|
|
//
|
|
float64x2_t test_vcvt_high_f64_f32(float32x4_t a) {
|
|
return vcvt_high_f64_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndnq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDN_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDN1_I:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[VRNDN_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDN1_I]]
|
|
//
|
|
float64x2_t test_vrndnq_f64(float64x2_t a) {
|
|
return vrndnq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndaq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDA_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDA1_I:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[VRNDA_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDA1_I]]
|
|
//
|
|
float64x2_t test_vrndaq_f64(float64x2_t a) {
|
|
return vrndaq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndpq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDP_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDP1_I:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[VRNDP_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDP1_I]]
|
|
//
|
|
float64x2_t test_vrndpq_f64(float64x2_t a) {
|
|
return vrndpq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndmq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDM_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDM1_I:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[VRNDM_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDM1_I]]
|
|
//
|
|
float64x2_t test_vrndmq_f64(float64x2_t a) {
|
|
return vrndmq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndxq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDX_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDX1_I:%.*]] = call <2 x double> @llvm.rint.v2f64(<2 x double> [[VRNDX_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDX1_I]]
|
|
//
|
|
float64x2_t test_vrndxq_f64(float64x2_t a) {
|
|
return vrndxq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDZ_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDZ1_I:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[VRNDZ_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDZ1_I]]
|
|
//
|
|
float64x2_t test_vrndq_f64(float64x2_t a) {
|
|
return vrndq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrndiq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRNDIQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRNDIQ_V1_I:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[VRNDIQ_V_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRNDIQ_V1_I]]
|
|
//
|
|
float64x2_t test_vrndiq_f64(float64x2_t a) {
|
|
return vrndiq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvt_s32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTZ1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtzs.v2i32.v2f32(<2 x float> [[VCVTZ_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTZ1_I]]
|
|
//
|
|
int32x2_t test_vcvt_s32_f32(float32x2_t a) {
|
|
return vcvt_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtq_s32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTZ_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTZ1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtzs.v4i32.v4f32(<4 x float> [[VCVTZ_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTZ1_I]]
|
|
//
|
|
int32x4_t test_vcvtq_s32_f32(float32x4_t a) {
|
|
return vcvtq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtq_s64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTZ_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTZ1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtzs.v2i64.v2f64(<2 x double> [[VCVTZ_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTZ1_I]]
|
|
//
|
|
int64x2_t test_vcvtq_s64_f64(float64x2_t a) {
|
|
return vcvtq_s64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvt_u32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTZ_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTZ1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtzu.v2i32.v2f32(<2 x float> [[VCVTZ_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTZ1_I]]
|
|
//
|
|
uint32x2_t test_vcvt_u32_f32(float32x2_t a) {
|
|
return vcvt_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtq_u32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTZ_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTZ1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtzu.v4i32.v4f32(<4 x float> [[VCVTZ_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTZ1_I]]
|
|
//
|
|
uint32x4_t test_vcvtq_u32_f32(float32x4_t a) {
|
|
return vcvtq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtq_u64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTZ_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTZ1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtzu.v2i64.v2f64(<2 x double> [[VCVTZ_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTZ1_I]]
|
|
//
|
|
uint64x2_t test_vcvtq_u64_f64(float64x2_t a) {
|
|
return vcvtq_u64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvtn_s32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTN_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTN1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float> [[VCVTN_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTN1_I]]
|
|
//
|
|
int32x2_t test_vcvtn_s32_f32(float32x2_t a) {
|
|
return vcvtn_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtnq_s32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTN_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTN1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float> [[VCVTN_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTN1_I]]
|
|
//
|
|
int32x4_t test_vcvtnq_s32_f32(float32x4_t a) {
|
|
return vcvtnq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtnq_s64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTN_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTN1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double> [[VCVTN_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTN1_I]]
|
|
//
|
|
int64x2_t test_vcvtnq_s64_f64(float64x2_t a) {
|
|
return vcvtnq_s64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvtn_u32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTN_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTN1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float> [[VCVTN_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTN1_I]]
|
|
//
|
|
uint32x2_t test_vcvtn_u32_f32(float32x2_t a) {
|
|
return vcvtn_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtnq_u32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTN_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTN1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float> [[VCVTN_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTN1_I]]
|
|
//
|
|
uint32x4_t test_vcvtnq_u32_f32(float32x4_t a) {
|
|
return vcvtnq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtnq_u64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTN_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTN1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double> [[VCVTN_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTN1_I]]
|
|
//
|
|
uint64x2_t test_vcvtnq_u64_f64(float64x2_t a) {
|
|
return vcvtnq_u64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvtp_s32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTP_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTP1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float> [[VCVTP_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTP1_I]]
|
|
//
|
|
int32x2_t test_vcvtp_s32_f32(float32x2_t a) {
|
|
return vcvtp_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtpq_s32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTP_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTP1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> [[VCVTP_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTP1_I]]
|
|
//
|
|
int32x4_t test_vcvtpq_s32_f32(float32x4_t a) {
|
|
return vcvtpq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtpq_s64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTP_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTP1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> [[VCVTP_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTP1_I]]
|
|
//
|
|
int64x2_t test_vcvtpq_s64_f64(float64x2_t a) {
|
|
return vcvtpq_s64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvtp_u32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTP_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTP1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float> [[VCVTP_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTP1_I]]
|
|
//
|
|
uint32x2_t test_vcvtp_u32_f32(float32x2_t a) {
|
|
return vcvtp_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtpq_u32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTP_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTP1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float> [[VCVTP_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTP1_I]]
|
|
//
|
|
uint32x4_t test_vcvtpq_u32_f32(float32x4_t a) {
|
|
return vcvtpq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtpq_u64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTP_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTP1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double> [[VCVTP_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTP1_I]]
|
|
//
|
|
uint64x2_t test_vcvtpq_u64_f64(float64x2_t a) {
|
|
return vcvtpq_u64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvtm_s32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTM_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTM1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float> [[VCVTM_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTM1_I]]
|
|
//
|
|
int32x2_t test_vcvtm_s32_f32(float32x2_t a) {
|
|
return vcvtm_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtmq_s32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTM_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTM1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> [[VCVTM_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTM1_I]]
|
|
//
|
|
int32x4_t test_vcvtmq_s32_f32(float32x4_t a) {
|
|
return vcvtmq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtmq_s64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTM_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTM1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> [[VCVTM_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTM1_I]]
|
|
//
|
|
int64x2_t test_vcvtmq_s64_f64(float64x2_t a) {
|
|
return vcvtmq_s64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvtm_u32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTM_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTM1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float> [[VCVTM_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTM1_I]]
|
|
//
|
|
uint32x2_t test_vcvtm_u32_f32(float32x2_t a) {
|
|
return vcvtm_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtmq_u32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTM_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTM1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> [[VCVTM_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTM1_I]]
|
|
//
|
|
uint32x4_t test_vcvtmq_u32_f32(float32x4_t a) {
|
|
return vcvtmq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtmq_u64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTM_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTM1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> [[VCVTM_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTM1_I]]
|
|
//
|
|
uint64x2_t test_vcvtmq_u64_f64(float64x2_t a) {
|
|
return vcvtmq_u64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvta_s32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTA_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTA1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float> [[VCVTA_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTA1_I]]
|
|
//
|
|
int32x2_t test_vcvta_s32_f32(float32x2_t a) {
|
|
return vcvta_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtaq_s32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTA_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTA1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> [[VCVTA_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTA1_I]]
|
|
//
|
|
int32x4_t test_vcvtaq_s32_f32(float32x4_t a) {
|
|
return vcvtaq_s32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtaq_s64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTA_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTA1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> [[VCVTA_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTA1_I]]
|
|
//
|
|
int64x2_t test_vcvtaq_s64_f64(float64x2_t a) {
|
|
return vcvtaq_s64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vcvta_u32_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VCVTA_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VCVTA1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float> [[VCVTA_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VCVTA1_I]]
|
|
//
|
|
uint32x2_t test_vcvta_u32_f32(float32x2_t a) {
|
|
return vcvta_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vcvtaq_u32_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTA_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VCVTA1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> [[VCVTA_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VCVTA1_I]]
|
|
//
|
|
uint32x4_t test_vcvtaq_u32_f32(float32x4_t a) {
|
|
return vcvtaq_u32_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i64> @test_vcvtaq_u64_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VCVTA_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VCVTA1_I:%.*]] = call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> [[VCVTA_I]])
|
|
// CHECK-NEXT: ret <2 x i64> [[VCVTA1_I]]
|
|
//
|
|
uint64x2_t test_vcvtaq_u64_f64(float64x2_t a) {
|
|
return vcvtaq_u64_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vrsqrte_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VRSQRTE_V_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VRSQRTE_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float> [[VRSQRTE_V_I]])
|
|
// CHECK-NEXT: ret <2 x float> [[VRSQRTE_V1_I]]
|
|
//
|
|
float32x2_t test_vrsqrte_f32(float32x2_t a) {
|
|
return vrsqrte_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vrsqrteq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRSQRTEQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VRSQRTEQ_V1_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float> [[VRSQRTEQ_V_I]])
|
|
// CHECK-NEXT: ret <4 x float> [[VRSQRTEQ_V1_I]]
|
|
//
|
|
float32x4_t test_vrsqrteq_f32(float32x4_t a) {
|
|
return vrsqrteq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrsqrteq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRSQRTEQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRSQRTEQ_V1_I:%.*]] = call <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double> [[VRSQRTEQ_V_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRSQRTEQ_V1_I]]
|
|
//
|
|
float64x2_t test_vrsqrteq_f64(float64x2_t a) {
|
|
return vrsqrteq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vrecpe_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[VRECPE_V_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VRECPE_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frecpe.v2f32(<2 x float> [[VRECPE_V_I]])
|
|
// CHECK-NEXT: ret <2 x float> [[VRECPE_V1_I]]
|
|
//
|
|
float32x2_t test_vrecpe_f32(float32x2_t a) {
|
|
return vrecpe_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vrecpeq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRECPEQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VRECPEQ_V1_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frecpe.v4f32(<4 x float> [[VRECPEQ_V_I]])
|
|
// CHECK-NEXT: ret <4 x float> [[VRECPEQ_V1_I]]
|
|
//
|
|
float32x4_t test_vrecpeq_f32(float32x4_t a) {
|
|
return vrecpeq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vrecpeq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRECPEQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VRECPEQ_V1_I:%.*]] = call <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double> [[VRECPEQ_V_I]])
|
|
// CHECK-NEXT: ret <2 x double> [[VRECPEQ_V1_I]]
|
|
//
|
|
float64x2_t test_vrecpeq_f64(float64x2_t a) {
|
|
return vrecpeq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x i32> @test_vrecpe_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[VRECPE_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VRECPE_V1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32> [[VRECPE_V_I]])
|
|
// CHECK-NEXT: ret <2 x i32> [[VRECPE_V1_I]]
|
|
//
|
|
uint32x2_t test_vrecpe_u32(uint32x2_t a) {
|
|
return vrecpe_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x i32> @test_vrecpeq_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[VRECPEQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VRECPEQ_V1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32> [[VRECPEQ_V_I]])
|
|
// CHECK-NEXT: ret <4 x i32> [[VRECPEQ_V1_I]]
|
|
//
|
|
uint32x4_t test_vrecpeq_u32(uint32x4_t a) {
|
|
return vrecpeq_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vsqrt_f32(
|
|
// CHECK-SAME: <2 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: [[VSQRT_I:%.*]] = call <2 x float> @llvm.sqrt.v2f32(<2 x float> [[TMP2]])
|
|
// CHECK-NEXT: ret <2 x float> [[VSQRT_I]]
|
|
//
|
|
float32x2_t test_vsqrt_f32(float32x2_t a) {
|
|
return vsqrt_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vsqrtq_f32(
|
|
// CHECK-SAME: <4 x float> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: [[VSQRT_I:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[TMP2]])
|
|
// CHECK-NEXT: ret <4 x float> [[VSQRT_I]]
|
|
//
|
|
float32x4_t test_vsqrtq_f32(float32x4_t a) {
|
|
return vsqrtq_f32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vsqrtq_f64(
|
|
// CHECK-SAME: <2 x double> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: [[VSQRT_I:%.*]] = call <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP2]])
|
|
// CHECK-NEXT: ret <2 x double> [[VSQRT_I]]
|
|
//
|
|
float64x2_t test_vsqrtq_f64(float64x2_t a) {
|
|
return vsqrtq_f64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vcvt_f32_s32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = sitofp <2 x i32> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: ret <2 x float> [[VCVT_I]]
|
|
//
|
|
float32x2_t test_vcvt_f32_s32(int32x2_t a) {
|
|
return vcvt_f32_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x float> @test_vcvt_f32_u32(
|
|
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = uitofp <2 x i32> [[TMP1]] to <2 x float>
|
|
// CHECK-NEXT: ret <2 x float> [[VCVT_I]]
|
|
//
|
|
float32x2_t test_vcvt_f32_u32(uint32x2_t a) {
|
|
return vcvt_f32_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vcvtq_f32_s32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = sitofp <4 x i32> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: ret <4 x float> [[VCVT_I]]
|
|
//
|
|
float32x4_t test_vcvtq_f32_s32(int32x4_t a) {
|
|
return vcvtq_f32_s32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <4 x float> @test_vcvtq_f32_u32(
|
|
// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = uitofp <4 x i32> [[TMP1]] to <4 x float>
|
|
// CHECK-NEXT: ret <4 x float> [[VCVT_I]]
|
|
//
|
|
float32x4_t test_vcvtq_f32_u32(uint32x4_t a) {
|
|
return vcvtq_f32_u32(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vcvtq_f64_s64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = sitofp <2 x i64> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: ret <2 x double> [[VCVT_I]]
|
|
//
|
|
float64x2_t test_vcvtq_f64_s64(int64x2_t a) {
|
|
return vcvtq_f64_s64(a);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local <2 x double> @test_vcvtq_f64_u64(
|
|
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
|
|
// CHECK-NEXT: [[VCVT_I:%.*]] = uitofp <2 x i64> [[TMP1]] to <2 x double>
|
|
// CHECK-NEXT: ret <2 x double> [[VCVT_I]]
|
|
//
|
|
float64x2_t test_vcvtq_f64_u64(uint64x2_t a) {
|
|
return vcvtq_f64_u64(a);
|
|
}
|