
Similar to 806761a7629df268c8aed49657aeccffa6bca449 -mtriple= specifies the full target triple while -march= merely sets the architecture part of the default target triple (e.g. Windows, macOS), leaving a target triple which may not make sense. Therefore, -march= is error-prone and not recommended for tests without a target triple. The issue has been benign as we recognize bpf*-apple-darwin as ELF instead of rejecting it outrightly.
386 lines
16 KiB
LLVM
386 lines
16 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=bpfel -mcpu=v1 -filetype=asm < %s | FileCheck %s
|
|
;
|
|
; Source:
|
|
; $ cat atomics_mem_order_v1.c
|
|
; #include <stdatomic.h>
|
|
;
|
|
; void test_fetch_add_32_noret(int _Atomic *i) {
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; void test_fetch_add_64_noret(long _Atomic *i) {
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_relaxed);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_acquire);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_release);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_acq_rel);
|
|
; (void)__c11_atomic_fetch_add(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; void test_fetch_sub_64_noret(long _Atomic *i) {
|
|
; (void)__c11_atomic_fetch_sub(i, 10, memory_order_relaxed);
|
|
; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acquire);
|
|
; (void)__c11_atomic_fetch_sub(i, 10, memory_order_release);
|
|
; (void)__c11_atomic_fetch_sub(i, 10, memory_order_acq_rel);
|
|
; (void)__c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; long test_fetch_sub_64_ret(long _Atomic *i) {
|
|
; return __c11_atomic_fetch_sub(i, 10, memory_order_acquire) +
|
|
; __c11_atomic_fetch_sub(i, 10, memory_order_release) +
|
|
; __c11_atomic_fetch_sub(i, 10, memory_order_acq_rel) +
|
|
; __c11_atomic_fetch_sub(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; void test_fetch_and_64_noret(long _Atomic *i) {
|
|
; (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed);
|
|
; (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire);
|
|
; (void)__c11_atomic_fetch_and(i, 10, memory_order_release);
|
|
; (void)__c11_atomic_fetch_and(i, 10, memory_order_acq_rel);
|
|
; (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; long test_fetch_and_64_ret(long _Atomic *i) {
|
|
; return __c11_atomic_fetch_and(i, 10, memory_order_relaxed) +
|
|
; __c11_atomic_fetch_and(i, 10, memory_order_acquire) +
|
|
; __c11_atomic_fetch_and(i, 10, memory_order_release) +
|
|
; __c11_atomic_fetch_and(i, 10, memory_order_acq_rel) +
|
|
; __c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; void test_fetch_or_64_noret(long _Atomic *i) {
|
|
; (void)__c11_atomic_fetch_or(i, 10, memory_order_relaxed);
|
|
; (void)__c11_atomic_fetch_or(i, 10, memory_order_acquire);
|
|
; (void)__c11_atomic_fetch_or(i, 10, memory_order_release);
|
|
; (void)__c11_atomic_fetch_or(i, 10, memory_order_acq_rel);
|
|
; (void)__c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; long test_fetch_or_64_ret(long _Atomic *i) {
|
|
; return __c11_atomic_fetch_or(i, 10, memory_order_relaxed) +
|
|
; __c11_atomic_fetch_or(i, 10, memory_order_acquire) +
|
|
; __c11_atomic_fetch_or(i, 10, memory_order_release) +
|
|
; __c11_atomic_fetch_or(i, 10, memory_order_acq_rel) +
|
|
; __c11_atomic_fetch_or(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; void test_fetch_xor_64_noret(long _Atomic *i) {
|
|
; (void)__c11_atomic_fetch_xor(i, 10, memory_order_relaxed);
|
|
; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acquire);
|
|
; (void)__c11_atomic_fetch_xor(i, 10, memory_order_release);
|
|
; (void)__c11_atomic_fetch_xor(i, 10, memory_order_acq_rel);
|
|
; (void)__c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
|
|
; }
|
|
;
|
|
; long test_fetch_xor_64_ret(long _Atomic *i) {
|
|
; return __c11_atomic_fetch_xor(i, 10, memory_order_relaxed) +
|
|
; __c11_atomic_fetch_xor(i, 10, memory_order_acquire) +
|
|
; __c11_atomic_fetch_xor(i, 10, memory_order_release) +
|
|
; __c11_atomic_fetch_xor(i, 10, memory_order_acq_rel) +
|
|
; __c11_atomic_fetch_xor(i, 10, memory_order_seq_cst);
|
|
; }
|
|
|
|
target triple = "bpf"
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local void @test_fetch_add_32_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_add_32_noret:
|
|
; CHECK: .Ltest_fetch_add_32_noret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_add_32_noret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: lock *(u32 *)(r1 + 0) += r2
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw add ptr %i, i32 10 monotonic, align 4
|
|
%1 = atomicrmw add ptr %i, i32 10 acquire, align 4
|
|
%2 = atomicrmw add ptr %i, i32 10 release, align 4
|
|
%3 = atomicrmw add ptr %i, i32 10 acq_rel, align 4
|
|
%4 = atomicrmw add ptr %i, i32 10 seq_cst, align 4
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local void @test_fetch_add_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_add_64_noret:
|
|
; CHECK: .Ltest_fetch_add_64_noret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_add_64_noret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r2
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw add ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw add ptr %i, i64 10 acquire, align 8
|
|
%2 = atomicrmw add ptr %i, i64 10 release, align 8
|
|
%3 = atomicrmw add ptr %i, i64 10 acq_rel, align 8
|
|
%4 = atomicrmw add ptr %i, i64 10 seq_cst, align 8
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local void @test_fetch_sub_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_sub_64_noret:
|
|
; CHECK: .Ltest_fetch_sub_64_noret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_sub_64_noret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r2 = -r2
|
|
; CHECK-NEXT: r3 = r2
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) += r3
|
|
; CHECK-NEXT: r3 = r2
|
|
; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = r2
|
|
; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = r2
|
|
; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw sub ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw sub ptr %i, i64 10 acquire, align 8
|
|
%2 = atomicrmw sub ptr %i, i64 10 release, align 8
|
|
%3 = atomicrmw sub ptr %i, i64 10 acq_rel, align 8
|
|
%4 = atomicrmw sub ptr %i, i64 10 seq_cst, align 8
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local i64 @test_fetch_sub_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_sub_64_ret:
|
|
; CHECK: .Ltest_fetch_sub_64_ret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_sub_64_ret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r2 = -r2
|
|
; CHECK-NEXT: r3 = r2
|
|
; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 = r2
|
|
; CHECK-NEXT: r0 = atomic_fetch_add((u64 *)(r1 + 0), r0)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = r2
|
|
; CHECK-NEXT: r3 = atomic_fetch_add((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: r0 += r2
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw sub ptr %i, i64 10 acquire, align 8
|
|
%1 = atomicrmw sub ptr %i, i64 10 release, align 8
|
|
%add = add nsw i64 %1, %0
|
|
%2 = atomicrmw sub ptr %i, i64 10 acq_rel, align 8
|
|
%add5 = add nsw i64 %add, %2
|
|
%3 = atomicrmw sub ptr %i, i64 10 seq_cst, align 8
|
|
%add8 = add nsw i64 %add5, %3
|
|
ret i64 %add8
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local void @test_fetch_and_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_and_64_noret:
|
|
; CHECK: .Ltest_fetch_and_64_noret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_and_64_noret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) &= r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r2 = atomic_fetch_and((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw and ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw and ptr %i, i64 10 acquire, align 8
|
|
%2 = atomicrmw and ptr %i, i64 10 release, align 8
|
|
%3 = atomicrmw and ptr %i, i64 10 acq_rel, align 8
|
|
%4 = atomicrmw and ptr %i, i64 10 seq_cst, align 8
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local i64 @test_fetch_and_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_and_64_ret:
|
|
; CHECK: .Ltest_fetch_and_64_ret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_and_64_ret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 = 10
|
|
; CHECK-NEXT: r0 = atomic_fetch_and((u64 *)(r1 + 0), r0)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_and((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r2 = atomic_fetch_and((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: r0 += r2
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw and ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw and ptr %i, i64 10 acquire, align 8
|
|
%add = add nsw i64 %1, %0
|
|
%2 = atomicrmw and ptr %i, i64 10 release, align 8
|
|
%add5 = add nsw i64 %add, %2
|
|
%3 = atomicrmw and ptr %i, i64 10 acq_rel, align 8
|
|
%add8 = add nsw i64 %add5, %3
|
|
%4 = atomicrmw and ptr %i, i64 10 seq_cst, align 8
|
|
%add11 = add nsw i64 %add8, %4
|
|
ret i64 %add11
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local void @test_fetch_or_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_or_64_noret:
|
|
; CHECK: .Ltest_fetch_or_64_noret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_or_64_noret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) |= r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r2 = atomic_fetch_or((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw or ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw or ptr %i, i64 10 acquire, align 8
|
|
%2 = atomicrmw or ptr %i, i64 10 release, align 8
|
|
%3 = atomicrmw or ptr %i, i64 10 acq_rel, align 8
|
|
%4 = atomicrmw or ptr %i, i64 10 seq_cst, align 8
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local i64 @test_fetch_or_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_or_64_ret:
|
|
; CHECK: .Ltest_fetch_or_64_ret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_or_64_ret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 = 10
|
|
; CHECK-NEXT: r0 = atomic_fetch_or((u64 *)(r1 + 0), r0)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_or((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r2 = atomic_fetch_or((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: r0 += r2
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw or ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw or ptr %i, i64 10 acquire, align 8
|
|
%add = add nsw i64 %1, %0
|
|
%2 = atomicrmw or ptr %i, i64 10 release, align 8
|
|
%add5 = add nsw i64 %add, %2
|
|
%3 = atomicrmw or ptr %i, i64 10 acq_rel, align 8
|
|
%add8 = add nsw i64 %add5, %3
|
|
%4 = atomicrmw or ptr %i, i64 10 seq_cst, align 8
|
|
%add11 = add nsw i64 %add8, %4
|
|
ret i64 %add11
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local void @test_fetch_xor_64_noret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_xor_64_noret:
|
|
; CHECK: .Ltest_fetch_xor_64_noret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_xor_64_noret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: lock *(u64 *)(r1 + 0) ^= r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r2 = atomic_fetch_xor((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw xor ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw xor ptr %i, i64 10 acquire, align 8
|
|
%2 = atomicrmw xor ptr %i, i64 10 release, align 8
|
|
%3 = atomicrmw xor ptr %i, i64 10 acq_rel, align 8
|
|
%4 = atomicrmw xor ptr %i, i64 10 seq_cst, align 8
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
|
|
define dso_local i64 @test_fetch_xor_64_ret(ptr nocapture noundef %i) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test_fetch_xor_64_ret:
|
|
; CHECK: .Ltest_fetch_xor_64_ret$local:
|
|
; CHECK-NEXT: .type .Ltest_fetch_xor_64_ret$local,@function
|
|
; CHECK-NEXT: # %bb.0: # %entry
|
|
; CHECK-NEXT: r2 = 10
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 = 10
|
|
; CHECK-NEXT: r0 = atomic_fetch_xor((u64 *)(r1 + 0), r0)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r3 = 10
|
|
; CHECK-NEXT: r3 = atomic_fetch_xor((u64 *)(r1 + 0), r3)
|
|
; CHECK-NEXT: r0 += r3
|
|
; CHECK-NEXT: r2 = atomic_fetch_xor((u64 *)(r1 + 0), r2)
|
|
; CHECK-NEXT: r0 += r2
|
|
; CHECK-NEXT: exit
|
|
entry:
|
|
%0 = atomicrmw xor ptr %i, i64 10 monotonic, align 8
|
|
%1 = atomicrmw xor ptr %i, i64 10 acquire, align 8
|
|
%add = add nsw i64 %1, %0
|
|
%2 = atomicrmw xor ptr %i, i64 10 release, align 8
|
|
%add5 = add nsw i64 %add, %2
|
|
%3 = atomicrmw xor ptr %i, i64 10 acq_rel, align 8
|
|
%add8 = add nsw i64 %add5, %3
|
|
%4 = atomicrmw xor ptr %i, i64 10 seq_cst, align 8
|
|
%add11 = add nsw i64 %add8, %4
|
|
ret i64 %add11
|
|
}
|
|
|
|
attributes #0 = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="v1" }
|
|
|
|
!llvm.module.flags = !{!0, !1}
|
|
!llvm.ident = !{!2}
|
|
|
|
!0 = !{i32 1, !"wchar_size", i32 4}
|
|
!1 = !{i32 7, !"frame-pointer", i32 2}
|
|
!2 = !{!"clang version 20.0.0git (git@github.com:yonghong-song/llvm-project.git 6f71e34e194dab5a52cb2211af575c6067e9e504)"}
|