
This reverts commit 9cc8442a2b438962883bbbfd8ff62ad4b1a2b95d. This reverts commit 859c871184bdfdebb47b5c7ec5e59348e0534e0b. A performance regression was reported on the original review. There appears to have been an unexpected interaction here. Reverting during investigation.
671 lines
20 KiB
LLVM
671 lines
20 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=riscv32 \
|
|
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
|
|
; RUN: llc < %s -mtriple=riscv64 \
|
|
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
|
|
; RUN: llc < %s -mtriple=riscv32 -mattr=+unaligned-scalar-mem \
|
|
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
|
|
; RUN: llc < %s -mtriple=riscv64 -mattr=+unaligned-scalar-mem \
|
|
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
|
|
|
|
; ----------------------------------------------------------------------
|
|
; Fully unaligned cases
|
|
|
|
define void @unaligned_memmove0(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: unaligned_memmove0:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: unaligned_memmove0:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 0, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove1(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: unaligned_memmove1:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lbu a1, 0(a1)
|
|
; RV32-BOTH-NEXT: sb a1, 0(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: unaligned_memmove1:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lbu a1, 0(a1)
|
|
; RV64-BOTH-NEXT: sb a1, 0(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 1, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove2(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove2:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: lbu a2, 0(a1)
|
|
; RV32-NEXT: lbu a1, 1(a1)
|
|
; RV32-NEXT: sb a2, 0(a0)
|
|
; RV32-NEXT: sb a1, 1(a0)
|
|
; RV32-NEXT: ret
|
|
;
|
|
; RV64-LABEL: unaligned_memmove2:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: lbu a2, 0(a1)
|
|
; RV64-NEXT: lbu a1, 1(a1)
|
|
; RV64-NEXT: sb a2, 0(a0)
|
|
; RV64-NEXT: sb a1, 1(a0)
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove2:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lh a1, 0(a1)
|
|
; RV32-FAST-NEXT: sh a1, 0(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove2:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: lh a1, 0(a1)
|
|
; RV64-FAST-NEXT: sh a1, 0(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 2, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove3(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove3:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: lbu a2, 0(a1)
|
|
; RV32-NEXT: lbu a3, 1(a1)
|
|
; RV32-NEXT: lbu a1, 2(a1)
|
|
; RV32-NEXT: sb a2, 0(a0)
|
|
; RV32-NEXT: sb a3, 1(a0)
|
|
; RV32-NEXT: sb a1, 2(a0)
|
|
; RV32-NEXT: ret
|
|
;
|
|
; RV64-LABEL: unaligned_memmove3:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: lbu a2, 0(a1)
|
|
; RV64-NEXT: lbu a3, 1(a1)
|
|
; RV64-NEXT: lbu a1, 2(a1)
|
|
; RV64-NEXT: sb a2, 0(a0)
|
|
; RV64-NEXT: sb a3, 1(a0)
|
|
; RV64-NEXT: sb a1, 2(a0)
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove3:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lh a2, 0(a1)
|
|
; RV32-FAST-NEXT: lbu a1, 2(a1)
|
|
; RV32-FAST-NEXT: sh a2, 0(a0)
|
|
; RV32-FAST-NEXT: sb a1, 2(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove3:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: lh a2, 0(a1)
|
|
; RV64-FAST-NEXT: lbu a1, 2(a1)
|
|
; RV64-FAST-NEXT: sh a2, 0(a0)
|
|
; RV64-FAST-NEXT: sb a1, 2(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 3, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove4(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove4:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: lbu a2, 0(a1)
|
|
; RV32-NEXT: lbu a3, 1(a1)
|
|
; RV32-NEXT: lbu a4, 2(a1)
|
|
; RV32-NEXT: lbu a1, 3(a1)
|
|
; RV32-NEXT: sb a2, 0(a0)
|
|
; RV32-NEXT: sb a3, 1(a0)
|
|
; RV32-NEXT: sb a4, 2(a0)
|
|
; RV32-NEXT: sb a1, 3(a0)
|
|
; RV32-NEXT: ret
|
|
;
|
|
; RV64-LABEL: unaligned_memmove4:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: lbu a2, 0(a1)
|
|
; RV64-NEXT: lbu a3, 1(a1)
|
|
; RV64-NEXT: lbu a4, 2(a1)
|
|
; RV64-NEXT: lbu a1, 3(a1)
|
|
; RV64-NEXT: sb a2, 0(a0)
|
|
; RV64-NEXT: sb a3, 1(a0)
|
|
; RV64-NEXT: sb a4, 2(a0)
|
|
; RV64-NEXT: sb a1, 3(a0)
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove4:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lw a1, 0(a1)
|
|
; RV32-FAST-NEXT: sw a1, 0(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove4:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: lw a1, 0(a1)
|
|
; RV64-FAST-NEXT: sw a1, 0(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 4, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove7(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove7:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: lbu a2, 4(a1)
|
|
; RV32-NEXT: lbu a3, 5(a1)
|
|
; RV32-NEXT: lbu a4, 6(a1)
|
|
; RV32-NEXT: lbu a5, 0(a1)
|
|
; RV32-NEXT: lbu a6, 1(a1)
|
|
; RV32-NEXT: lbu a7, 2(a1)
|
|
; RV32-NEXT: lbu a1, 3(a1)
|
|
; RV32-NEXT: sb a2, 4(a0)
|
|
; RV32-NEXT: sb a3, 5(a0)
|
|
; RV32-NEXT: sb a4, 6(a0)
|
|
; RV32-NEXT: sb a5, 0(a0)
|
|
; RV32-NEXT: sb a6, 1(a0)
|
|
; RV32-NEXT: sb a7, 2(a0)
|
|
; RV32-NEXT: sb a1, 3(a0)
|
|
; RV32-NEXT: ret
|
|
;
|
|
; RV64-LABEL: unaligned_memmove7:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: lbu a2, 4(a1)
|
|
; RV64-NEXT: lbu a3, 5(a1)
|
|
; RV64-NEXT: lbu a4, 6(a1)
|
|
; RV64-NEXT: lbu a5, 0(a1)
|
|
; RV64-NEXT: lbu a6, 1(a1)
|
|
; RV64-NEXT: lbu a7, 2(a1)
|
|
; RV64-NEXT: lbu a1, 3(a1)
|
|
; RV64-NEXT: sb a2, 4(a0)
|
|
; RV64-NEXT: sb a3, 5(a0)
|
|
; RV64-NEXT: sb a4, 6(a0)
|
|
; RV64-NEXT: sb a5, 0(a0)
|
|
; RV64-NEXT: sb a6, 1(a0)
|
|
; RV64-NEXT: sb a7, 2(a0)
|
|
; RV64-NEXT: sb a1, 3(a0)
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove7:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lw a2, 0(a1)
|
|
; RV32-FAST-NEXT: lh a3, 4(a1)
|
|
; RV32-FAST-NEXT: lbu a1, 6(a1)
|
|
; RV32-FAST-NEXT: sw a2, 0(a0)
|
|
; RV32-FAST-NEXT: sh a3, 4(a0)
|
|
; RV32-FAST-NEXT: sb a1, 6(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove7:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: lw a2, 0(a1)
|
|
; RV64-FAST-NEXT: lh a3, 4(a1)
|
|
; RV64-FAST-NEXT: lbu a1, 6(a1)
|
|
; RV64-FAST-NEXT: sw a2, 0(a0)
|
|
; RV64-FAST-NEXT: sh a3, 4(a0)
|
|
; RV64-FAST-NEXT: sb a1, 6(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 7, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove8(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove8:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: lbu a2, 0(a1)
|
|
; RV32-NEXT: lbu a3, 1(a1)
|
|
; RV32-NEXT: lbu a4, 2(a1)
|
|
; RV32-NEXT: lbu a5, 3(a1)
|
|
; RV32-NEXT: lbu a6, 4(a1)
|
|
; RV32-NEXT: lbu a7, 5(a1)
|
|
; RV32-NEXT: lbu t0, 6(a1)
|
|
; RV32-NEXT: lbu a1, 7(a1)
|
|
; RV32-NEXT: sb a6, 4(a0)
|
|
; RV32-NEXT: sb a7, 5(a0)
|
|
; RV32-NEXT: sb t0, 6(a0)
|
|
; RV32-NEXT: sb a1, 7(a0)
|
|
; RV32-NEXT: sb a2, 0(a0)
|
|
; RV32-NEXT: sb a3, 1(a0)
|
|
; RV32-NEXT: sb a4, 2(a0)
|
|
; RV32-NEXT: sb a5, 3(a0)
|
|
; RV32-NEXT: ret
|
|
;
|
|
; RV64-LABEL: unaligned_memmove8:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: lbu a2, 0(a1)
|
|
; RV64-NEXT: lbu a3, 1(a1)
|
|
; RV64-NEXT: lbu a4, 2(a1)
|
|
; RV64-NEXT: lbu a5, 3(a1)
|
|
; RV64-NEXT: lbu a6, 4(a1)
|
|
; RV64-NEXT: lbu a7, 5(a1)
|
|
; RV64-NEXT: lbu t0, 6(a1)
|
|
; RV64-NEXT: lbu a1, 7(a1)
|
|
; RV64-NEXT: sb a6, 4(a0)
|
|
; RV64-NEXT: sb a7, 5(a0)
|
|
; RV64-NEXT: sb t0, 6(a0)
|
|
; RV64-NEXT: sb a1, 7(a0)
|
|
; RV64-NEXT: sb a2, 0(a0)
|
|
; RV64-NEXT: sb a3, 1(a0)
|
|
; RV64-NEXT: sb a4, 2(a0)
|
|
; RV64-NEXT: sb a5, 3(a0)
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove8:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lw a2, 0(a1)
|
|
; RV32-FAST-NEXT: lw a1, 4(a1)
|
|
; RV32-FAST-NEXT: sw a2, 0(a0)
|
|
; RV32-FAST-NEXT: sw a1, 4(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove8:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: ld a1, 0(a1)
|
|
; RV64-FAST-NEXT: sd a1, 0(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 8, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove15(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove15:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: li a2, 15
|
|
; RV32-NEXT: tail memmove
|
|
;
|
|
; RV64-LABEL: unaligned_memmove15:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: li a2, 15
|
|
; RV64-NEXT: tail memmove
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove15:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lbu a2, 14(a1)
|
|
; RV32-FAST-NEXT: lw a3, 0(a1)
|
|
; RV32-FAST-NEXT: lw a4, 4(a1)
|
|
; RV32-FAST-NEXT: lw a5, 8(a1)
|
|
; RV32-FAST-NEXT: lh a1, 12(a1)
|
|
; RV32-FAST-NEXT: sb a2, 14(a0)
|
|
; RV32-FAST-NEXT: sw a3, 0(a0)
|
|
; RV32-FAST-NEXT: sw a4, 4(a0)
|
|
; RV32-FAST-NEXT: sw a5, 8(a0)
|
|
; RV32-FAST-NEXT: sh a1, 12(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove15:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: ld a2, 0(a1)
|
|
; RV64-FAST-NEXT: lw a3, 8(a1)
|
|
; RV64-FAST-NEXT: lh a4, 12(a1)
|
|
; RV64-FAST-NEXT: lbu a1, 14(a1)
|
|
; RV64-FAST-NEXT: sd a2, 0(a0)
|
|
; RV64-FAST-NEXT: sw a3, 8(a0)
|
|
; RV64-FAST-NEXT: sh a4, 12(a0)
|
|
; RV64-FAST-NEXT: sb a1, 14(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 15, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove16(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-LABEL: unaligned_memmove16:
|
|
; RV32: # %bb.0: # %entry
|
|
; RV32-NEXT: li a2, 16
|
|
; RV32-NEXT: tail memmove
|
|
;
|
|
; RV64-LABEL: unaligned_memmove16:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: li a2, 16
|
|
; RV64-NEXT: tail memmove
|
|
;
|
|
; RV32-FAST-LABEL: unaligned_memmove16:
|
|
; RV32-FAST: # %bb.0: # %entry
|
|
; RV32-FAST-NEXT: lw a2, 0(a1)
|
|
; RV32-FAST-NEXT: lw a3, 4(a1)
|
|
; RV32-FAST-NEXT: lw a4, 8(a1)
|
|
; RV32-FAST-NEXT: lw a1, 12(a1)
|
|
; RV32-FAST-NEXT: sw a2, 0(a0)
|
|
; RV32-FAST-NEXT: sw a3, 4(a0)
|
|
; RV32-FAST-NEXT: sw a4, 8(a0)
|
|
; RV32-FAST-NEXT: sw a1, 12(a0)
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove16:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: ld a2, 0(a1)
|
|
; RV64-FAST-NEXT: ld a1, 8(a1)
|
|
; RV64-FAST-NEXT: sd a2, 0(a0)
|
|
; RV64-FAST-NEXT: sd a1, 8(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 16, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @unaligned_memmove31(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: unaligned_memmove31:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: li a2, 31
|
|
; RV32-BOTH-NEXT: tail memmove
|
|
;
|
|
; RV64-LABEL: unaligned_memmove31:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: li a2, 31
|
|
; RV64-NEXT: tail memmove
|
|
;
|
|
; RV64-FAST-LABEL: unaligned_memmove31:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: lh a2, 28(a1)
|
|
; RV64-FAST-NEXT: lbu a3, 30(a1)
|
|
; RV64-FAST-NEXT: ld a4, 0(a1)
|
|
; RV64-FAST-NEXT: ld a5, 8(a1)
|
|
; RV64-FAST-NEXT: ld a6, 16(a1)
|
|
; RV64-FAST-NEXT: lw a1, 24(a1)
|
|
; RV64-FAST-NEXT: sh a2, 28(a0)
|
|
; RV64-FAST-NEXT: sb a3, 30(a0)
|
|
; RV64-FAST-NEXT: sd a4, 0(a0)
|
|
; RV64-FAST-NEXT: sd a5, 8(a0)
|
|
; RV64-FAST-NEXT: sd a6, 16(a0)
|
|
; RV64-FAST-NEXT: sw a1, 24(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 31, i1 false)
|
|
ret void
|
|
}
|
|
|
|
; ----------------------------------------------------------------------
|
|
; Fully aligned cases
|
|
|
|
define void @aligned_memmove0(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove0:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove0:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 0, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove1(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove1:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lbu a1, 0(a1)
|
|
; RV32-BOTH-NEXT: sb a1, 0(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove1:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lbu a1, 0(a1)
|
|
; RV64-BOTH-NEXT: sb a1, 0(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 1, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove2(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove2:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lh a1, 0(a1)
|
|
; RV32-BOTH-NEXT: sh a1, 0(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove2:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lh a1, 0(a1)
|
|
; RV64-BOTH-NEXT: sh a1, 0(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 2, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove3(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove3:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lh a2, 0(a1)
|
|
; RV32-BOTH-NEXT: lbu a1, 2(a1)
|
|
; RV32-BOTH-NEXT: sh a2, 0(a0)
|
|
; RV32-BOTH-NEXT: sb a1, 2(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove3:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lh a2, 0(a1)
|
|
; RV64-BOTH-NEXT: lbu a1, 2(a1)
|
|
; RV64-BOTH-NEXT: sh a2, 0(a0)
|
|
; RV64-BOTH-NEXT: sb a1, 2(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 3, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove4(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove4:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a1, 0(a1)
|
|
; RV32-BOTH-NEXT: sw a1, 0(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove4:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lw a1, 0(a1)
|
|
; RV64-BOTH-NEXT: sw a1, 0(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 4, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove7(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove7:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV32-BOTH-NEXT: lh a3, 4(a1)
|
|
; RV32-BOTH-NEXT: lbu a1, 6(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV32-BOTH-NEXT: sh a3, 4(a0)
|
|
; RV32-BOTH-NEXT: sb a1, 6(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove7:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV64-BOTH-NEXT: lh a3, 4(a1)
|
|
; RV64-BOTH-NEXT: lbu a1, 6(a1)
|
|
; RV64-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV64-BOTH-NEXT: sh a3, 4(a0)
|
|
; RV64-BOTH-NEXT: sb a1, 6(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 7, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove8(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove8:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV32-BOTH-NEXT: lw a1, 4(a1)
|
|
; RV32-BOTH-NEXT: sw a1, 4(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove8:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: ld a1, 0(a1)
|
|
; RV64-BOTH-NEXT: sd a1, 0(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 8, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove15(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove15:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV32-BOTH-NEXT: lw a3, 8(a1)
|
|
; RV32-BOTH-NEXT: lh a4, 12(a1)
|
|
; RV32-BOTH-NEXT: lbu a5, 14(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV32-BOTH-NEXT: lw a1, 4(a1)
|
|
; RV32-BOTH-NEXT: sw a1, 4(a0)
|
|
; RV32-BOTH-NEXT: sw a3, 8(a0)
|
|
; RV32-BOTH-NEXT: sh a4, 12(a0)
|
|
; RV32-BOTH-NEXT: sb a5, 14(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove15:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: ld a2, 0(a1)
|
|
; RV64-BOTH-NEXT: lw a3, 8(a1)
|
|
; RV64-BOTH-NEXT: lh a4, 12(a1)
|
|
; RV64-BOTH-NEXT: lbu a1, 14(a1)
|
|
; RV64-BOTH-NEXT: sd a2, 0(a0)
|
|
; RV64-BOTH-NEXT: sw a3, 8(a0)
|
|
; RV64-BOTH-NEXT: sh a4, 12(a0)
|
|
; RV64-BOTH-NEXT: sb a1, 14(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 15, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove16(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove16:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV32-BOTH-NEXT: lw a3, 8(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV32-BOTH-NEXT: lw a2, 4(a1)
|
|
; RV32-BOTH-NEXT: lw a1, 12(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 4(a0)
|
|
; RV32-BOTH-NEXT: sw a3, 8(a0)
|
|
; RV32-BOTH-NEXT: sw a1, 12(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove16:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: ld a2, 0(a1)
|
|
; RV64-BOTH-NEXT: ld a1, 8(a1)
|
|
; RV64-BOTH-NEXT: sd a2, 0(a0)
|
|
; RV64-BOTH-NEXT: sd a1, 8(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 16, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @aligned_memmove31(ptr nocapture %dest, ptr %src) nounwind {
|
|
; RV32-BOTH-LABEL: aligned_memmove31:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: li a2, 31
|
|
; RV32-BOTH-NEXT: tail memmove
|
|
;
|
|
; RV64-BOTH-LABEL: aligned_memmove31:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: lh a2, 28(a1)
|
|
; RV64-BOTH-NEXT: lbu a3, 30(a1)
|
|
; RV64-BOTH-NEXT: ld a4, 0(a1)
|
|
; RV64-BOTH-NEXT: ld a5, 8(a1)
|
|
; RV64-BOTH-NEXT: ld a6, 16(a1)
|
|
; RV64-BOTH-NEXT: lw a1, 24(a1)
|
|
; RV64-BOTH-NEXT: sh a2, 28(a0)
|
|
; RV64-BOTH-NEXT: sb a3, 30(a0)
|
|
; RV64-BOTH-NEXT: sd a4, 0(a0)
|
|
; RV64-BOTH-NEXT: sd a5, 8(a0)
|
|
; RV64-BOTH-NEXT: sd a6, 16(a0)
|
|
; RV64-BOTH-NEXT: sw a1, 24(a0)
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 31, i1 false)
|
|
ret void
|
|
}
|
|
|
|
; ------------------------------------------------------------------------
|
|
; A few partially aligned cases
|
|
|
|
|
|
define void @memmove16_align4(ptr nocapture %dest, ptr nocapture %src) nounwind {
|
|
; RV32-BOTH-LABEL: memmove16_align4:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV32-BOTH-NEXT: lw a3, 4(a1)
|
|
; RV32-BOTH-NEXT: lw a4, 8(a1)
|
|
; RV32-BOTH-NEXT: lw a1, 12(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV32-BOTH-NEXT: sw a3, 4(a0)
|
|
; RV32-BOTH-NEXT: sw a4, 8(a0)
|
|
; RV32-BOTH-NEXT: sw a1, 12(a0)
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-LABEL: memmove16_align4:
|
|
; RV64: # %bb.0: # %entry
|
|
; RV64-NEXT: lw a2, 0(a1)
|
|
; RV64-NEXT: lw a3, 4(a1)
|
|
; RV64-NEXT: lw a4, 8(a1)
|
|
; RV64-NEXT: lw a1, 12(a1)
|
|
; RV64-NEXT: sw a2, 0(a0)
|
|
; RV64-NEXT: sw a3, 4(a0)
|
|
; RV64-NEXT: sw a4, 8(a0)
|
|
; RV64-NEXT: sw a1, 12(a0)
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: memmove16_align4:
|
|
; RV64-FAST: # %bb.0: # %entry
|
|
; RV64-FAST-NEXT: ld a2, 0(a1)
|
|
; RV64-FAST-NEXT: ld a1, 8(a1)
|
|
; RV64-FAST-NEXT: sd a2, 0(a0)
|
|
; RV64-FAST-NEXT: sd a1, 8(a0)
|
|
; RV64-FAST-NEXT: ret
|
|
entry:
|
|
tail call void @llvm.memmove.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 16, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define i32 @memmove11_align8(ptr nocapture %dest, ptr %src) {
|
|
; RV32-BOTH-LABEL: memmove11_align8:
|
|
; RV32-BOTH: # %bb.0: # %entry
|
|
; RV32-BOTH-NEXT: lw a2, 0(a1)
|
|
; RV32-BOTH-NEXT: lh a3, 8(a1)
|
|
; RV32-BOTH-NEXT: lbu a4, 10(a1)
|
|
; RV32-BOTH-NEXT: sw a2, 0(a0)
|
|
; RV32-BOTH-NEXT: lw a1, 4(a1)
|
|
; RV32-BOTH-NEXT: sw a1, 4(a0)
|
|
; RV32-BOTH-NEXT: sh a3, 8(a0)
|
|
; RV32-BOTH-NEXT: sb a4, 10(a0)
|
|
; RV32-BOTH-NEXT: li a0, 0
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: memmove11_align8:
|
|
; RV64-BOTH: # %bb.0: # %entry
|
|
; RV64-BOTH-NEXT: ld a2, 0(a1)
|
|
; RV64-BOTH-NEXT: lh a3, 8(a1)
|
|
; RV64-BOTH-NEXT: lbu a1, 10(a1)
|
|
; RV64-BOTH-NEXT: sd a2, 0(a0)
|
|
; RV64-BOTH-NEXT: sh a3, 8(a0)
|
|
; RV64-BOTH-NEXT: sb a1, 10(a0)
|
|
; RV64-BOTH-NEXT: li a0, 0
|
|
; RV64-BOTH-NEXT: ret
|
|
entry:
|
|
call void @llvm.memmove.p0.p0.i64(ptr align 8 %dest, ptr align 8 %src, i64 11, i1 false)
|
|
ret i32 0
|
|
}
|
|
|
|
declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
|