
The motivation is that differences in unrolling were noticed when trying to switch from the libcall to the intrinsic. There are likely also differences not yet noticed in other cost based decisions - such as inlining, and possibly vectorization. Neither cost is a good, well considered, cost but for the moment, let's have them be equal to simplify migration. We can come back and refine this once we have it being exercised by default.
41 lines
2.6 KiB
LLVM
41 lines
2.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
|
|
; RUN: opt < %s -mtriple=x86_64-apple-darwin10.0.0 -passes="print<cost-model>" 2>&1 -disable-output | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
|
|
|
target triple = "x86_64-apple-darwin10.0.0"
|
|
|
|
@.memset_pattern = private unnamed_addr constant [4 x i32] [i32 2, i32 2, i32 2, i32 2], align 16
|
|
|
|
define void @via_libcall(ptr %p) nounwind ssp {
|
|
; CHECK-LABEL: 'via_libcall'
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @memset_pattern4(ptr %p, ptr @.memset_pattern, i64 200)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @memset_pattern8(ptr %p, ptr @.memset_pattern, i64 200)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @memset_pattern16(ptr %p, ptr @.memset_pattern, i64 200)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
|
|
;
|
|
call void @memset_pattern4(ptr %p, ptr @.memset_pattern, i64 200)
|
|
call void @memset_pattern8(ptr %p, ptr @.memset_pattern, i64 200)
|
|
call void @memset_pattern16(ptr %p, ptr @.memset_pattern, i64 200)
|
|
ret void
|
|
}
|
|
|
|
declare void @memset_pattern4(ptr, ptr, i64)
|
|
declare void @memset_pattern8(ptr, ptr, i64)
|
|
declare void @memset_pattern16(ptr, ptr, i64)
|
|
|
|
define void @via_intrinsic(ptr %p) {
|
|
; CHECK-LABEL: 'via_intrinsic'
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.experimental.memset.pattern.p0.i16.i64(ptr align 4 %p, i16 2, i64 100, i1 false)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.experimental.memset.pattern.p0.i32.i64(ptr align 4 %p, i32 2, i64 50, i1 false)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.experimental.memset.pattern.p0.i64.i64(ptr align 4 %p, i64 2, i64 25, i1 false)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.experimental.memset.pattern.p0.i128.i64(ptr align 4 %p, i128 2, i64 12, i1 false)
|
|
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
|
|
;
|
|
call void @llvm.experimental.memset.pattern(ptr align 4 %p, i16 2, i64 100, i1 false)
|
|
call void @llvm.experimental.memset.pattern(ptr align 4 %p, i32 2, i64 50, i1 false)
|
|
call void @llvm.experimental.memset.pattern(ptr align 4 %p, i64 2, i64 25, i1 false)
|
|
call void @llvm.experimental.memset.pattern(ptr align 4 %p, i128 2, i64 12, i1 false)
|
|
ret void
|
|
}
|