
WG14 adopted the _ExtInt feature from Clang for C23, but renamed the type to be _BitInt. This patch does the vast majority of the work to rename _ExtInt to _BitInt, which accounts for most of its size. The new type is exposed in older C modes and all C++ modes as a conforming extension. However, there are functional changes worth calling out: * Deprecates _ExtInt with a fix-it to help users migrate to _BitInt. * Updates the mangling for the type. * Updates the documentation and adds a release note to warn users what is going on. * Adds new diagnostics for use of _BitInt to call out when it's used as a Clang extension or as a pre-C23 compatibility concern. * Adds new tests for the new diagnostic behaviors. I want to call out the ABI break specifically. We do not believe that this break will cause a significant imposition for early adopters of the feature, and so this is being done as a full break. If it turns out there are critical uses where recompilation is not an option for some reason, we can consider using ABI tags to ease the transition.
600 lines
23 KiB
C
600 lines
23 KiB
C
// Test CodeGen for Security Check Overflow Builtins.
|
|
// rdar://13421498
|
|
|
|
// RUN: %clang_cc1 -triple "i686-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i32 -DLONG_MAX=2147483647 %s
|
|
// RUN: %clang_cc1 -triple "x86_64-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i64 -DLONG_MAX=9223372036854775807 %s
|
|
// RUN: %clang_cc1 -triple "x86_64-mingw32" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i32 -DLONG_MAX=2147483647 %s
|
|
|
|
extern unsigned UnsignedErrorCode;
|
|
extern unsigned long UnsignedLongErrorCode;
|
|
extern unsigned long long UnsignedLongLongErrorCode;
|
|
extern int IntErrorCode;
|
|
extern long LongErrorCode;
|
|
extern long long LongLongErrorCode;
|
|
void overflowed(void);
|
|
|
|
unsigned test_add_overflow_uint_uint_uint(unsigned x, unsigned y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_uint_uint_uint
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
unsigned r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_add_overflow_int_int_int(int x, int y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_int_int_int
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
int r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_xint31_xint31_xint31({{.+}})
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i31, i1 } @llvm.sadd.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0
|
|
// CHECK: store i31 [[Q]], i31*
|
|
// CHECK: br i1 [[C]]
|
|
_BitInt(31) r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
unsigned test_sub_overflow_uint_uint_uint(unsigned x, unsigned y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_uint_uint_uint
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
unsigned r;
|
|
if (__builtin_sub_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_sub_overflow_int_int_int(int x, int y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_int_int_int
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
int r;
|
|
if (__builtin_sub_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_xint31_xint31_xint31({{.+}})
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i31, i1 } @llvm.ssub.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0
|
|
// CHECK: store i31 [[Q]], i31*
|
|
// CHECK: br i1 [[C]]
|
|
_BitInt(31) r;
|
|
if (__builtin_sub_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
unsigned test_mul_overflow_uint_uint_uint(unsigned x, unsigned y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_uint
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
unsigned r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_mul_overflow_uint_uint_int(unsigned x, unsigned y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647
|
|
// CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]]
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C2]]
|
|
int r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_mul_overflow_uint_uint_int_volatile(unsigned x, unsigned y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int_volatile
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647
|
|
// CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]]
|
|
// CHECK: store volatile i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C2]]
|
|
volatile int r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
long test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y) {
|
|
// CHECK-LABEL: @test_mul_overflow_ulong_ulong_long
|
|
// CHECK: [[S:%.+]] = call { [[LONG_TYPE]], i1 } @llvm.umul.with.overflow.[[LONG_TYPE]]([[LONG_TYPE]] %{{.+}}, [[LONG_TYPE]] %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { [[LONG_TYPE]], i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { [[LONG_TYPE]], i1 } [[S]], 1
|
|
// CHECK: [[C1:%.+]] = icmp ugt [[LONG_TYPE]] [[Q]], [[LONG_MAX]]
|
|
// CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]]
|
|
// LONG64: store [[LONG_TYPE]] [[Q]], [[LONG_TYPE]]*
|
|
// LONG64: br i1 [[C2]]
|
|
long r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_mul_overflow_int_int_int(int x, int y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_int_int_int
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
int r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint31_xint31_xint31({{.+}})
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i31, i1 } @llvm.smul.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0
|
|
// CHECK: store i31 [[Q]], i31*
|
|
// CHECK: br i1 [[C]]
|
|
_BitInt(31) r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_mul_overflow_xint127_xint127_xint127(_BitInt(127) x, _BitInt(127) y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint127_xint127_xint127({{.+}})
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i127, i1 } @llvm.smul.with.overflow.i127(i127 %{{.+}}, i127 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i127, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i127, i1 } [[S]], 0
|
|
// CHECK: store i127 [[Q]], i127*
|
|
// CHECK: br i1 [[C]]
|
|
_BitInt(127) r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_mul_overflow_xint128_xint128_xint128(_BitInt(128) x, _BitInt(128) y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint128_xint128_xint128({{.+}})
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %{{.+}}, i128 %{{.+}})
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i128, i1 } [[S]], 1
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i128, i1 } [[S]], 0
|
|
// CHECK: store i128 [[Q]], i128*
|
|
// CHECK: br i1 [[C]]
|
|
_BitInt(128) r;
|
|
if (__builtin_mul_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_add_overflow_uint_int_int(unsigned x, int y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_uint_int_int
|
|
// CHECK: [[XE:%.+]] = zext i32 %{{.+}} to i33
|
|
// CHECK: [[YE:%.+]] = sext i32 %{{.+}} to i33
|
|
// CHECK: [[S:%.+]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 [[XE]], i33 [[YE]])
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i33, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C1:%.+]] = extractvalue { i33, i1 } [[S]], 1
|
|
// CHECK: [[QT:%.+]] = trunc i33 [[Q]] to i32
|
|
// CHECK: [[QTE:%.+]] = sext i32 [[QT]] to i33
|
|
// CHECK: [[C2:%.+]] = icmp ne i33 [[Q]], [[QTE]]
|
|
// CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
|
|
// CHECK: store i32 [[QT]], i32*
|
|
// CHECK: br i1 [[C3]]
|
|
int r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
_Bool test_add_overflow_uint_uint_bool(unsigned x, unsigned y) {
|
|
// CHECK-LABEL: define {{.*}} i1 @test_add_overflow_uint_uint_bool
|
|
// CHECK-NOT: ext
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C1:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: [[QT:%.+]] = trunc i32 [[Q]] to i1
|
|
// CHECK: [[QTE:%.+]] = zext i1 [[QT]] to i32
|
|
// CHECK: [[C2:%.+]] = icmp ne i32 [[Q]], [[QTE]]
|
|
// CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
|
|
// CHECK: [[QT2:%.+]] = zext i1 [[QT]] to i8
|
|
// CHECK: store i8 [[QT2]], i8*
|
|
// CHECK: br i1 [[C3]]
|
|
_Bool r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
unsigned test_add_overflow_bool_bool_uint(_Bool x, _Bool y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_bool_bool_uint
|
|
// CHECK: [[XE:%.+]] = zext i1 %{{.+}} to i32
|
|
// CHECK: [[YE:%.+]] = zext i1 %{{.+}} to i32
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[XE]], i32 [[YE]])
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: store i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
unsigned r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
_Bool test_add_overflow_bool_bool_bool(_Bool x, _Bool y) {
|
|
// CHECK-LABEL: define {{.*}} i1 @test_add_overflow_bool_bool_bool
|
|
// CHECK: [[S:%.+]] = call { i1, i1 } @llvm.uadd.with.overflow.i1(i1 %{{.+}}, i1 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i1, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i1, i1 } [[S]], 1
|
|
// CHECK: [[QT2:%.+]] = zext i1 [[Q]] to i8
|
|
// CHECK: store i8 [[QT2]], i8*
|
|
// CHECK: br i1 [[C]]
|
|
_Bool r;
|
|
if (__builtin_add_overflow(x, y, &r))
|
|
overflowed();
|
|
return r;
|
|
}
|
|
|
|
int test_add_overflow_volatile(int x, int y) {
|
|
// CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_volatile
|
|
// CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
// CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
|
|
// CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
|
|
// CHECK: store volatile i32 [[Q]], i32*
|
|
// CHECK: br i1 [[C]]
|
|
volatile int result;
|
|
if (__builtin_add_overflow(x, y, &result))
|
|
overflowed();
|
|
return result;
|
|
}
|
|
|
|
unsigned test_uadd_overflow(unsigned x, unsigned y) {
|
|
// CHECK: @test_uadd_overflow
|
|
// CHECK: %{{.+}} = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
unsigned result;
|
|
if (__builtin_uadd_overflow(x, y, &result))
|
|
return UnsignedErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned long test_uaddl_overflow(unsigned long x, unsigned long y) {
|
|
// CHECK: @test_uaddl_overflow([[UL:i32|i64]] %x
|
|
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
|
|
unsigned long result;
|
|
if (__builtin_uaddl_overflow(x, y, &result))
|
|
return UnsignedLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned long long test_uaddll_overflow(unsigned long long x, unsigned long long y) {
|
|
// CHECK: @test_uaddll_overflow
|
|
// CHECK: %{{.+}} = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
|
|
unsigned long long result;
|
|
if (__builtin_uaddll_overflow(x, y, &result))
|
|
return UnsignedLongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned test_usub_overflow(unsigned x, unsigned y) {
|
|
// CHECK: @test_usub_overflow
|
|
// CHECK: %{{.+}} = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
unsigned result;
|
|
if (__builtin_usub_overflow(x, y, &result))
|
|
return UnsignedErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned long test_usubl_overflow(unsigned long x, unsigned long y) {
|
|
// CHECK: @test_usubl_overflow([[UL:i32|i64]] %x
|
|
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
|
|
unsigned long result;
|
|
if (__builtin_usubl_overflow(x, y, &result))
|
|
return UnsignedLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned long long test_usubll_overflow(unsigned long long x, unsigned long long y) {
|
|
// CHECK: @test_usubll_overflow
|
|
// CHECK: %{{.+}} = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
|
|
unsigned long long result;
|
|
if (__builtin_usubll_overflow(x, y, &result))
|
|
return UnsignedLongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned test_umul_overflow(unsigned x, unsigned y) {
|
|
// CHECK: @test_umul_overflow
|
|
// CHECK: %{{.+}} = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
unsigned result;
|
|
if (__builtin_umul_overflow(x, y, &result))
|
|
return UnsignedErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned long test_umull_overflow(unsigned long x, unsigned long y) {
|
|
// CHECK: @test_umull_overflow([[UL:i32|i64]] %x
|
|
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.umul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
|
|
unsigned long result;
|
|
if (__builtin_umull_overflow(x, y, &result))
|
|
return UnsignedLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
unsigned long long test_umulll_overflow(unsigned long long x, unsigned long long y) {
|
|
// CHECK: @test_umulll_overflow
|
|
// CHECK: %{{.+}} = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
|
|
unsigned long long result;
|
|
if (__builtin_umulll_overflow(x, y, &result))
|
|
return UnsignedLongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_sadd_overflow(int x, int y) {
|
|
// CHECK: @test_sadd_overflow
|
|
// CHECK: %{{.+}} = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
int result;
|
|
if (__builtin_sadd_overflow(x, y, &result))
|
|
return IntErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long test_saddl_overflow(long x, long y) {
|
|
// CHECK: @test_saddl_overflow([[UL:i32|i64]] %x
|
|
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.sadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
|
|
long result;
|
|
if (__builtin_saddl_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_saddll_overflow(long long x, long long y) {
|
|
// CHECK: @test_saddll_overflow
|
|
// CHECK: %{{.+}} = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
|
|
long long result;
|
|
if (__builtin_saddll_overflow(x, y, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_ssub_overflow(int x, int y) {
|
|
// CHECK: @test_ssub_overflow
|
|
// CHECK: %{{.+}} = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
int result;
|
|
if (__builtin_ssub_overflow(x, y, &result))
|
|
return IntErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long test_ssubl_overflow(long x, long y) {
|
|
// CHECK: @test_ssubl_overflow([[UL:i32|i64]] %x
|
|
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.ssub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
|
|
long result;
|
|
if (__builtin_ssubl_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_ssubll_overflow(long long x, long long y) {
|
|
// CHECK: @test_ssubll_overflow
|
|
// CHECK: %{{.+}} = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
|
|
long long result;
|
|
if (__builtin_ssubll_overflow(x, y, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_smul_overflow(int x, int y) {
|
|
// CHECK: @test_smul_overflow
|
|
// CHECK: %{{.+}} = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
|
|
int result;
|
|
if (__builtin_smul_overflow(x, y, &result))
|
|
return IntErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long test_smull_overflow(long x, long y) {
|
|
// CHECK: @test_smull_overflow([[UL:i32|i64]] %x
|
|
// CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.smul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
|
|
long result;
|
|
if (__builtin_smull_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_smulll_overflow(long long x, long long y) {
|
|
// CHECK: @test_smulll_overflow
|
|
// CHECK: %{{.+}} = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
|
|
long long result;
|
|
if (__builtin_smulll_overflow(x, y, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_mixed_sign_mul_overflow_sext_signed_op(int x, unsigned long long y) {
|
|
// CHECK: @test_mixed_sign_mul_overflow_sext_signed_op
|
|
// CHECK: [[SignedOp:%.*]] = sext i32 %0 to i64
|
|
// CHECK: [[IsNeg:%.*]] = icmp slt i64 [[SignedOp]], 0
|
|
int result;
|
|
if (__builtin_mul_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_mixed_sign_mul_overflow_zext_unsigned_op(long long x, unsigned y) {
|
|
// CHECK: @test_mixed_sign_mul_overflow_zext_unsigned_op
|
|
// CHECK: [[UnsignedOp:%.*]] = zext i32 %1 to i64
|
|
// CHECK: [[IsNeg:%.*]] = icmp slt i64 %0, 0
|
|
// CHECK: @llvm.umul.with.overflow.i64({{.*}}, i64 [[UnsignedOp]])
|
|
int result;
|
|
if (__builtin_mul_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_mixed_sign_mull_overflow(int x, unsigned y) {
|
|
// CHECK: @test_mixed_sign_mull_overflow
|
|
// CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
|
|
// CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
|
|
// CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
|
|
// CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
|
|
// CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
|
|
// CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
|
|
// CHECK-NEXT: [[IsNegZext:%.*]] = zext i1 [[IsNeg]] to i32
|
|
// CHECK-NEXT: [[MaxResult:%.*]] = add i32 2147483647, [[IsNegZext]]
|
|
// CHECK-NEXT: [[SignedOFlow:%.*]] = icmp ugt i32 [[UnsignedResult]], [[MaxResult]]
|
|
// CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[SignedOFlow]]
|
|
// CHECK-NEXT: [[NegativeResult:%.*]] = sub i32 0, [[UnsignedResult]]
|
|
// CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegativeResult]], i32 [[UnsignedResult]]
|
|
// CHECK-NEXT: store i32 [[Result]], i32* %{{.*}}, align 4
|
|
// CHECK: br i1 [[OFlow]]
|
|
|
|
int result;
|
|
if (__builtin_mul_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_mixed_sign_mull_overflow_unsigned(int x, unsigned y) {
|
|
// CHECK: @test_mixed_sign_mull_overflow_unsigned
|
|
// CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
|
|
// CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
|
|
// CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
|
|
// CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
|
|
// CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
|
|
// CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
|
|
// CHECK-NEXT: [[NotNull:%.*]] = icmp ne i32 [[UnsignedResult]], 0
|
|
// CHECK-NEXT: [[Underflow:%.*]] = and i1 [[IsNeg]], [[NotNull]]
|
|
// CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[Underflow]]
|
|
// CHECK-NEXT: [[NegatedResult:%.*]] = sub i32 0, [[UnsignedResult]]
|
|
// CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegatedResult]], i32 [[UnsignedResult]]
|
|
// CHECK-NEXT: store i32 [[Result]], i32* %{{.*}}, align 4
|
|
// CHECK: br i1 [[OFlow]]
|
|
|
|
unsigned result;
|
|
if (__builtin_mul_overflow(x, y, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
int test_mixed_sign_mull_overflow_swapped(int x, unsigned y) {
|
|
// CHECK: @test_mixed_sign_mull_overflow_swapped
|
|
// CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32
|
|
// CHECK: add i32 2147483647
|
|
int result;
|
|
if (__builtin_mul_overflow(y, x, &result))
|
|
return LongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_mixed_sign_mulll_overflow(long long x, unsigned long long y) {
|
|
// CHECK: @test_mixed_sign_mulll_overflow
|
|
// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
|
|
// CHECK: add i64 92233720368547
|
|
long long result;
|
|
if (__builtin_mul_overflow(x, y, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_mixed_sign_mulll_overflow_swapped(long long x, unsigned long long y) {
|
|
// CHECK: @test_mixed_sign_mulll_overflow_swapped
|
|
// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
|
|
// CHECK: add i64 92233720368547
|
|
long long result;
|
|
if (__builtin_mul_overflow(y, x, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_mixed_sign_mulll_overflow_trunc_signed(long long x, unsigned long long y) {
|
|
// CHECK: @test_mixed_sign_mulll_overflow_trunc_signed
|
|
// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
|
|
// CHECK: add i64 2147483647
|
|
// CHECK: trunc
|
|
// CHECK: store
|
|
int result;
|
|
if (__builtin_mul_overflow(y, x, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_mixed_sign_mulll_overflow_trunc_unsigned(long long x, unsigned long long y) {
|
|
// CHECK: @test_mixed_sign_mulll_overflow_trunc_unsigned
|
|
// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
|
|
// CHECK: [[NON_ZERO:%.*]] = icmp ne i64 [[UNSIGNED_RESULT:%.*]], 0
|
|
// CHECK-NEXT: [[UNDERFLOW:%.*]] = and i1 {{.*}}, [[NON_ZERO]]
|
|
// CHECK-NEXT: [[OVERFLOW_PRE_TRUNC:%.*]] = or i1 {{.*}}, [[UNDERFLOW]]
|
|
// CHECK-NEXT: [[TRUNC_OVERFLOW:%.*]] = icmp ugt i64 [[UNSIGNED_RESULT]], 4294967295
|
|
// CHECK-NEXT: [[OVERFLOW:%.*]] = or i1 [[OVERFLOW_PRE_TRUNC]], [[TRUNC_OVERFLOW]]
|
|
// CHECK-NEXT: [[NEGATED:%.*]] = sub i64 0, [[UNSIGNED_RESULT]]
|
|
// CHECK-NEXT: [[RESULT:%.*]] = select i1 {{.*}}, i64 [[NEGATED]], i64 [[UNSIGNED_RESULT]]
|
|
// CHECK-NEXT: trunc i64 [[RESULT]] to i32
|
|
// CHECK-NEXT: store
|
|
unsigned result;
|
|
if (__builtin_mul_overflow(y, x, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_mixed_sign_mul_overflow_extend_signed(int x, unsigned y) {
|
|
// CHECK: @test_mixed_sign_mul_overflow_extend_signed
|
|
// CHECK: call { i64, i1 } @llvm.smul.with.overflow.i64
|
|
long long result;
|
|
if (__builtin_mul_overflow(y, x, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|
|
|
|
long long test_mixed_sign_mul_overflow_extend_unsigned(int x, unsigned y) {
|
|
// CHECK: @test_mixed_sign_mul_overflow_extend_unsigned
|
|
// CHECK: call { i65, i1 } @llvm.smul.with.overflow.i65
|
|
unsigned long long result;
|
|
if (__builtin_mul_overflow(y, x, &result))
|
|
return LongLongErrorCode;
|
|
return result;
|
|
}
|