
This reapplies: 8ff85ed905a7306977d07a5cd67ab4d5a56fafb4 Original commit message: As a follow-up to my initial mail to llvm-dev here's a first pass at the O1 described there. This change doesn't include any change to move from selection dag to fast isel and that will come with other numbers that should help inform that decision. There also haven't been any real debuggability studies with this pipeline yet, this is just the initial start done so that people could see it and we could start tweaking after. Test updates: Outside of the newpm tests most of the updates are coming from either optimization passes not run anymore (and without a compelling argument at the moment) that were largely used for canonicalization in clang. Original post: http://lists.llvm.org/pipermail/llvm-dev/2019-April/131494.html Tags: #llvm Differential Revision: https://reviews.llvm.org/D65410 This reverts commit c9ddb02659e3ece7a0d9d6b4dac7ceea4ae46e6d.
121 lines
4.6 KiB
C
121 lines
4.6 KiB
C
// RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
|
|
|
|
// FIXME: This file should not be checking -O1 output.
|
|
// Ie, it is testing many IR optimizer passes as part of front-end verification.
|
|
|
|
enum memory_order {
|
|
memory_order_relaxed, memory_order_consume, memory_order_acquire,
|
|
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
|
|
};
|
|
|
|
int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
|
|
// CHECK: test_c11_atomic_fetch_add_int_ptr
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5)
|
|
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
|
|
}
|
|
|
|
int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
|
|
// CHECK: test_c11_atomic_fetch_sub_int_ptr
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5)
|
|
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
|
|
// CHECK: test_c11_atomic_fetch_add_int
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5)
|
|
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
|
|
// CHECK: test_c11_atomic_fetch_sub_int
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5)
|
|
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
|
|
}
|
|
|
|
int *fp2a(int **p) {
|
|
// CHECK: @fp2a
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0)
|
|
// Note, the GNU builtins do not multiply by sizeof(T)!
|
|
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
|
|
}
|
|
|
|
int test_atomic_fetch_add(int *p) {
|
|
// CHECK: test_atomic_fetch_add
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
return __atomic_fetch_add(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_fetch_sub(int *p) {
|
|
// CHECK: test_atomic_fetch_sub
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_fetch_and(int *p) {
|
|
// CHECK: test_atomic_fetch_and
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
return __atomic_fetch_and(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_fetch_or(int *p) {
|
|
// CHECK: test_atomic_fetch_or
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
return __atomic_fetch_or(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_fetch_xor(int *p) {
|
|
// CHECK: test_atomic_fetch_xor
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_fetch_nand(int *p) {
|
|
// CHECK: test_atomic_fetch_nand
|
|
// CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_add_fetch(int *p) {
|
|
// CHECK: test_atomic_add_fetch
|
|
// CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
// CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
|
|
return __atomic_add_fetch(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_sub_fetch(int *p) {
|
|
// CHECK: test_atomic_sub_fetch
|
|
// CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
// CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
|
|
return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_and_fetch(int *p) {
|
|
// CHECK: test_atomic_and_fetch
|
|
// CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
// CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
|
|
return __atomic_and_fetch(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_or_fetch(int *p) {
|
|
// CHECK: test_atomic_or_fetch
|
|
// CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
// CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
|
|
return __atomic_or_fetch(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_xor_fetch(int *p) {
|
|
// CHECK: test_atomic_xor_fetch
|
|
// CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
// CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
|
|
return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
|
|
}
|
|
|
|
int test_atomic_nand_fetch(int *p) {
|
|
// CHECK: test_atomic_nand_fetch
|
|
// CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
// FIXME: We should not be checking optimized IR. It changes independently of clang.
|
|
// FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55
|
|
// FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1
|
|
return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
|
|
}
|