139 lines
3.6 KiB
LLVM
139 lines
3.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=riscv32 -mattr=+zilsd -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefixes=CHECK,SLOW %s
|
|
; RUN: llc -mtriple=riscv32 -mattr=+zilsd,+unaligned-scalar-mem -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefixes=CHECK,FAST %s
|
|
|
|
define i64 @load(ptr %a) nounwind {
|
|
; CHECK-LABEL: load:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld a2, 80(a0)
|
|
; CHECK-NEXT: ld zero, 0(a0)
|
|
; CHECK-NEXT: mv a0, a2
|
|
; CHECK-NEXT: mv a1, a3
|
|
; CHECK-NEXT: ret
|
|
%1 = getelementptr i64, ptr %a, i32 10
|
|
%2 = load i64, ptr %1
|
|
%3 = load volatile i64, ptr %a
|
|
ret i64 %2
|
|
}
|
|
|
|
define void @store(ptr %a, i64 %b) nounwind {
|
|
; CHECK-LABEL: store:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: mv a3, a2
|
|
; CHECK-NEXT: mv a2, a1
|
|
; CHECK-NEXT: sd a2, 0(a0)
|
|
; CHECK-NEXT: sd a2, 88(a0)
|
|
; CHECK-NEXT: ret
|
|
store i64 %b, ptr %a
|
|
%1 = getelementptr i64, ptr %a, i32 11
|
|
store i64 %b, ptr %1
|
|
ret void
|
|
}
|
|
|
|
define i64 @load_unaligned(ptr %p) {
|
|
; SLOW-LABEL: load_unaligned:
|
|
; SLOW: # %bb.0:
|
|
; SLOW-NEXT: lbu a1, 1(a0)
|
|
; SLOW-NEXT: lbu a2, 2(a0)
|
|
; SLOW-NEXT: lbu a3, 3(a0)
|
|
; SLOW-NEXT: lbu a4, 0(a0)
|
|
; SLOW-NEXT: slli a1, a1, 8
|
|
; SLOW-NEXT: slli a2, a2, 16
|
|
; SLOW-NEXT: slli a3, a3, 24
|
|
; SLOW-NEXT: or a1, a1, a4
|
|
; SLOW-NEXT: or a2, a3, a2
|
|
; SLOW-NEXT: lbu a3, 5(a0)
|
|
; SLOW-NEXT: lbu a4, 4(a0)
|
|
; SLOW-NEXT: lbu a5, 6(a0)
|
|
; SLOW-NEXT: lbu a0, 7(a0)
|
|
; SLOW-NEXT: slli a3, a3, 8
|
|
; SLOW-NEXT: or a3, a3, a4
|
|
; SLOW-NEXT: slli a5, a5, 16
|
|
; SLOW-NEXT: slli a0, a0, 24
|
|
; SLOW-NEXT: or a5, a0, a5
|
|
; SLOW-NEXT: or a0, a2, a1
|
|
; SLOW-NEXT: or a1, a5, a3
|
|
; SLOW-NEXT: ret
|
|
;
|
|
; FAST-LABEL: load_unaligned:
|
|
; FAST: # %bb.0:
|
|
; FAST-NEXT: ld a0, 0(a0)
|
|
; FAST-NEXT: ret
|
|
%res = load i64, ptr %p, align 1
|
|
ret i64 %res
|
|
}
|
|
|
|
define void @store_unaligned(ptr %p, i64 %v) {
|
|
; SLOW-LABEL: store_unaligned:
|
|
; SLOW: # %bb.0:
|
|
; SLOW-NEXT: srli a3, a2, 24
|
|
; SLOW-NEXT: srli a4, a2, 16
|
|
; SLOW-NEXT: srli a5, a2, 8
|
|
; SLOW-NEXT: srli a6, a1, 24
|
|
; SLOW-NEXT: srli a7, a1, 16
|
|
; SLOW-NEXT: sb a2, 4(a0)
|
|
; SLOW-NEXT: sb a5, 5(a0)
|
|
; SLOW-NEXT: sb a4, 6(a0)
|
|
; SLOW-NEXT: sb a3, 7(a0)
|
|
; SLOW-NEXT: srli a2, a1, 8
|
|
; SLOW-NEXT: sb a1, 0(a0)
|
|
; SLOW-NEXT: sb a2, 1(a0)
|
|
; SLOW-NEXT: sb a7, 2(a0)
|
|
; SLOW-NEXT: sb a6, 3(a0)
|
|
; SLOW-NEXT: ret
|
|
;
|
|
; FAST-LABEL: store_unaligned:
|
|
; FAST: # %bb.0:
|
|
; FAST-NEXT: mv a3, a2
|
|
; FAST-NEXT: mv a2, a1
|
|
; FAST-NEXT: sd a2, 0(a0)
|
|
; FAST-NEXT: ret
|
|
store i64 %v, ptr %p, align 1
|
|
ret void
|
|
}
|
|
|
|
@g = dso_local global i64 0, align 8
|
|
|
|
define i64 @load_g() nounwind {
|
|
; CHECK-LABEL: load_g:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: lui a0, %hi(g)
|
|
; CHECK-NEXT: ld a0, %lo(g)(a0)
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%0 = load i64, ptr @g
|
|
ret i64 %0
|
|
}
|
|
|
|
define void @store_g() nounwind {
|
|
; CHECK-LABEL: store_g:
|
|
; CHECK: # %bb.0: # %entyr
|
|
; CHECK-NEXT: lui a0, %hi(g)
|
|
; CHECK-NEXT: sd zero, %lo(g)(a0)
|
|
; CHECK-NEXT: ret
|
|
entyr:
|
|
store i64 0, ptr @g
|
|
ret void
|
|
}
|
|
|
|
define void @large_offset(ptr nocapture %p, i64 %d) nounwind {
|
|
; CHECK-LABEL: large_offset:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: lui a1, 4
|
|
; CHECK-NEXT: add a0, a0, a1
|
|
; CHECK-NEXT: ld a2, -384(a0)
|
|
; CHECK-NEXT: addi a2, a2, 1
|
|
; CHECK-NEXT: seqz a1, a2
|
|
; CHECK-NEXT: add a3, a3, a1
|
|
; CHECK-NEXT: sd a2, -384(a0)
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%add.ptr = getelementptr inbounds i64, ptr %p, i64 2000
|
|
%a = load i64, ptr %add.ptr, align 8
|
|
%b = add i64 %a, 1
|
|
store i64 %b, ptr %add.ptr, align 8
|
|
ret void
|
|
}
|