
Allows memcpy to memcpy forwarding in cases where the second memcpy is larger, but the overread is known to be undef, by shrinking the memcpy size. Refs https://github.com/llvm/llvm-project/pull/140954 which laid some of the groundwork for this.
74 lines
3.4 KiB
LLVM
74 lines
3.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s
|
|
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
|
|
|
define void @test(ptr %src, i64 %size) {
|
|
; CHECK-LABEL: @test(
|
|
; CHECK-NEXT: [[TMP:%.*]] = alloca i8, i64 [[SIZE:%.*]], align 1
|
|
; CHECK-NEXT: [[DST:%.*]] = alloca i8, i64 [[SIZE]], align 1
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP]], ptr align 8 [[SRC:%.*]], i64 [[SIZE]], i1 false)
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST]], ptr align 8 [[SRC]], i64 [[SIZE]], i1 false)
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%tmp = alloca i8, i64 %size
|
|
%dst = alloca i8, i64 %size
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 %size, i1 false)
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 %size, i1 false)
|
|
|
|
ret void
|
|
}
|
|
|
|
define void @dynalloca_test(ptr %src, i64 %size1) {
|
|
; CHECK-LABEL: @dynalloca_test(
|
|
; CHECK-NEXT: [[TMP:%.*]] = alloca i8, i64 [[SIZE1:%.*]], align 1
|
|
; CHECK-NEXT: [[DST:%.*]] = alloca i8, i64 [[SIZE1]], align 1
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP]], ptr align 8 [[SRC:%.*]], i64 31, i1 false)
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST]], ptr align 8 [[SRC]], i64 31, i1 false)
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%tmp = alloca i8, i64 %size1
|
|
%dst = alloca i8, i64 %size1
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 31, i1 false)
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 32, i1 false)
|
|
|
|
ret void
|
|
}
|
|
|
|
define void @dynalloca_offset_test(ptr %src, i64 %size1) {
|
|
; CHECK-LABEL: @dynalloca_offset_test(
|
|
; CHECK-NEXT: [[TMP:%.*]] = alloca i8, i64 [[SIZE1:%.*]], align 1
|
|
; CHECK-NEXT: [[DST:%.*]] = alloca i8, i64 [[SIZE1]], align 1
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP]], ptr align 8 [[SRC:%.*]], i64 31, i1 false)
|
|
; CHECK-NEXT: [[TMP_OFFSET:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 1
|
|
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST]], ptr align 1 [[TMP1]], i64 30, i1 false)
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%tmp = alloca i8, i64 %size1
|
|
%dst = alloca i8, i64 %size1
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 31, i1 false)
|
|
%tmp_offset = getelementptr inbounds i8, ptr %tmp, i64 1
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp_offset, i64 31, i1 false)
|
|
|
|
ret void
|
|
}
|
|
|
|
; Dynamic sizes, so left as it is.
|
|
define void @negative_test(ptr %src, i64 %size1, i64 %size2) {
|
|
; CHECK-LABEL: @negative_test(
|
|
; CHECK-NEXT: [[TMP:%.*]] = alloca i8, i64 [[SIZE1:%.*]], align 1
|
|
; CHECK-NEXT: [[DST:%.*]] = alloca i8, i64 [[SIZE2:%.*]], align 1
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP]], ptr align 8 [[SRC:%.*]], i64 [[SIZE1]], i1 false)
|
|
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DST]], ptr align 8 [[TMP]], i64 [[SIZE2]], i1 false)
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%tmp = alloca i8, i64 %size1
|
|
%dst = alloca i8, i64 %size2
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 %size1, i1 false)
|
|
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 %size2, i1 false)
|
|
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
|