[AMDGPU] Allow relaxed/consume memory order for atomic inc/dec

Reviewed by: Jon Chesterfield

Differential Revision: https://reviews.llvm.org/D100144
This commit is contained in:
Yaxun (Sam) Liu 2021-04-08 17:44:29 -04:00
parent acf3279a03
commit 25942d7c49
4 changed files with 46 additions and 20 deletions

View File

@ -15469,8 +15469,10 @@ bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
// Map C11/C++11 memory ordering to LLVM memory ordering
assert(llvm::isValidAtomicOrderingCABI(ord));
switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
case llvm::AtomicOrderingCABI::acquire:
case llvm::AtomicOrderingCABI::consume:
AO = llvm::AtomicOrdering::Acquire;
break;
case llvm::AtomicOrderingCABI::release:
@ -15482,8 +15484,8 @@ bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
case llvm::AtomicOrderingCABI::seq_cst:
AO = llvm::AtomicOrdering::SequentiallyConsistent;
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::relaxed:
AO = llvm::AtomicOrdering::Monotonic;
break;
}

View File

@ -3384,20 +3384,28 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
<< ArgExpr->getType();
int ord = ArgResult.Val.getInt().getZExtValue();
auto Ord = ArgResult.Val.getInt().getZExtValue();
// Check valididty of memory ordering as per C11 / C++11's memody model.
switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
// Only fence needs check. Atomic dec/inc allow all memory orders.
auto DiagInvalidMemOrder = [&](auto *ArgExpr) {
return Diag(ArgExpr->getBeginLoc(),
diag::warn_atomic_op_has_invalid_memory_order)
<< ArgExpr->getSourceRange();
};
if (!llvm::isValidAtomicOrderingCABI(Ord))
return DiagInvalidMemOrder(ArgExpr);
switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
case llvm::AtomicOrderingCABI::relaxed:
case llvm::AtomicOrderingCABI::consume:
if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
return DiagInvalidMemOrder(ArgExpr);
break;
case llvm::AtomicOrderingCABI::acquire:
case llvm::AtomicOrderingCABI::release:
case llvm::AtomicOrderingCABI::acq_rel:
case llvm::AtomicOrderingCABI::seq_cst:
break;
default: {
return Diag(ArgExpr->getBeginLoc(),
diag::warn_atomic_op_has_invalid_memory_order)
<< ArgExpr->getSourceRange();
}
}
Arg = TheCall->getArg(ScopeIndex);

View File

@ -188,16 +188,22 @@ __attribute__((device)) void test_order32() {
// CHECK-LABEL: test_order32
__attribute__((shared)) __UINT32_TYPE__ val;
// CHECK: %1 = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 %0, i32 4, i32 2, i1 false)
// CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 {{.*}}, i32 2, i32 2, i1 false)
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_RELAXED, "workgroup");
// CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 {{.*}}, i32 4, i32 2, i1 false)
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_CONSUME, "workgroup");
// CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 {{.*}}, i32 4, i32 2, i1 false)
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_ACQUIRE, "workgroup");
// CHECK: %3 = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 %2, i32 5, i32 2, i1 false)
// CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 {{.*}}, i32 5, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_RELEASE, "workgroup");
// CHECK: %5 = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 %4, i32 6, i32 2, i1 false)
// CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 {{.*}}, i32 6, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_ACQ_REL, "workgroup");
// CHECK: %7 = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 %6, i32 7, i32 2, i1 false)
// CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* addrspacecast (i32 addrspace(3)* @_ZZ12test_order32vE3val to i32*), i32 {{.*}}, i32 7, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_SEQ_CST, "workgroup");
}
@ -205,16 +211,22 @@ __attribute__((device)) void test_order64() {
// CHECK-LABEL: test_order64
__attribute__((shared)) __UINT64_TYPE__ val;
// CHECK: %1 = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 %0, i32 4, i32 2, i1 false)
// CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 {{.*}}, i32 2, i32 2, i1 false)
val = __builtin_amdgcn_atomic_inc64(&val, val, __ATOMIC_RELAXED, "workgroup");
// CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 {{.*}}, i32 4, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_CONSUME, "workgroup");
// CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 {{.*}}, i32 4, i32 2, i1 false)
val = __builtin_amdgcn_atomic_inc64(&val, val, __ATOMIC_ACQUIRE, "workgroup");
// CHECK: %3 = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 %2, i32 5, i32 2, i1 false)
// CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 {{.*}}, i32 5, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_RELEASE, "workgroup");
// CHECK: %5 = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 %4, i32 6, i32 2, i1 false)
// CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 {{.*}}, i32 6, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_ACQ_REL, "workgroup");
// CHECK: %7 = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 %6, i32 7, i32 2, i1 false)
// CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* addrspacecast (i64 addrspace(3)* @_ZZ12test_order64vE3val to i64*), i64 {{.*}}, i32 7, i32 2, i1 false)
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_SEQ_CST, "workgroup");
}

View File

@ -148,7 +148,8 @@ void test_s_setreg(int x, int y) {
void test_atomic_inc32() {
uint val = 17;
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_SEQ_CST + 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_ACQUIRE - 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_RELAXED, "workgroup");
val = __builtin_amdgcn_atomic_inc32(&val, val, __ATOMIC_CONSUME, "workgroup");
val = __builtin_amdgcn_atomic_inc32(4); // expected-error {{too few arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_inc32(&val, val, 4, 4, 4, 4); // expected-error {{too many arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_inc32(&val, val, 3.14, ""); // expected-warning {{implicit conversion from 'double' to 'unsigned int' changes value from 3.14 to 3}}
@ -162,7 +163,8 @@ void test_atomic_inc32() {
void test_atomic_inc64() {
__UINT64_TYPE__ val = 17;
val = __builtin_amdgcn_atomic_inc64(&val, val, __ATOMIC_SEQ_CST + 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_inc64(&val, val, __ATOMIC_ACQUIRE - 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_inc64(&val, val, __ATOMIC_RELAXED, "workgroup");
val = __builtin_amdgcn_atomic_inc64(&val, val, __ATOMIC_CONSUME, "workgroup");
val = __builtin_amdgcn_atomic_inc64(4); // expected-error {{too few arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_inc64(&val, val, 4, 4, 4, 4); // expected-error {{too many arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_inc64(&val, val, 3.14, ""); // expected-warning {{implicit conversion from 'double' to 'unsigned int' changes value from 3.14 to 3}}
@ -176,7 +178,8 @@ void test_atomic_inc64() {
void test_atomic_dec32() {
uint val = 17;
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_SEQ_CST + 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_ACQUIRE - 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_RELAXED, "workgroup");
val = __builtin_amdgcn_atomic_dec32(&val, val, __ATOMIC_CONSUME, "workgroup");
val = __builtin_amdgcn_atomic_dec32(4); // expected-error {{too few arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_dec32(&val, val, 4, 4, 4, 4); // expected-error {{too many arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_dec32(&val, val, 3.14, ""); // expected-warning {{implicit conversion from 'double' to 'unsigned int' changes value from 3.14 to 3}}
@ -190,7 +193,8 @@ void test_atomic_dec32() {
void test_atomic_dec64() {
__UINT64_TYPE__ val = 17;
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_SEQ_CST + 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_ACQUIRE - 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_RELAXED, "workgroup");
val = __builtin_amdgcn_atomic_dec64(&val, val, __ATOMIC_CONSUME, "workgroup");
val = __builtin_amdgcn_atomic_dec64(4); // expected-error {{too few arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_dec64(&val, val, 4, 4, 4, 4); // expected-error {{too many arguments to function call, expected 4}}
val = __builtin_amdgcn_atomic_dec64(&val, val, 3.14, ""); // expected-warning {{implicit conversion from 'double' to 'unsigned int' changes value from 3.14 to 3}}