
Currently, opaque pointers are supported in two forms: The -force-opaque-pointers mode, where all pointers are opaque and typed pointers do not exist. And as a simple ptr type that can coexist with typed pointers. This patch removes support for the mixed mode. You either get typed pointers, or you get opaque pointers, but not both. In the (current) default mode, using ptr is forbidden. In -opaque-pointers mode, all pointers are opaque. The motivation here is that the mixed mode introduces additional issues that don't exist in fully opaque mode. D105155 is an example of a design problem. Looking at D109259, it would probably need additional work to support mixed mode (e.g. to generate GEPs for typed base but opaque result). Mixed mode will also end up inserting many casts between i8* and ptr, which would require significant additional work to consistently avoid. I don't think the mixed mode is particularly valuable, as it doesn't align with our end goal. The only thing I've found it to be moderately useful for is adding some opaque pointer tests in between typed pointer tests, but I think we can live without that. Differential Revision: https://reviews.llvm.org/D109290
75 lines
2.9 KiB
LLVM
75 lines
2.9 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -passes=verify -opaque-pointers -S < %s | FileCheck %s
|
|
|
|
define i32 @load(ptr %a) {
|
|
; CHECK-LABEL: @load(
|
|
; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[A:%.*]], align 4
|
|
; CHECK-NEXT: ret i32 [[I]]
|
|
;
|
|
%i = load i32, ptr %a
|
|
ret i32 %i
|
|
}
|
|
|
|
define void @store(ptr %a, i32 %i) {
|
|
; CHECK-LABEL: @store(
|
|
; CHECK-NEXT: store i32 [[I:%.*]], ptr [[A:%.*]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store i32 %i, ptr %a
|
|
ret void
|
|
}
|
|
|
|
define void @cmpxchg(ptr %p, i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @cmpxchg(
|
|
; CHECK-NEXT: [[VAL_SUCCESS:%.*]] = cmpxchg ptr [[P:%.*]], i32 [[A:%.*]], i32 [[B:%.*]] acq_rel monotonic, align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val_success = cmpxchg ptr %p, i32 %a, i32 %b acq_rel monotonic
|
|
ret void
|
|
}
|
|
|
|
define void @atomicrmw(ptr %a, i32 %i) {
|
|
; CHECK-LABEL: @atomicrmw(
|
|
; CHECK-NEXT: [[B:%.*]] = atomicrmw add ptr [[A:%.*]], i32 [[I:%.*]] acquire, align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%b = atomicrmw add ptr %a, i32 %i acquire
|
|
ret void
|
|
}
|
|
|
|
define void @opaque_mangle(ptr %a) {
|
|
; CHECK-LABEL: @opaque_mangle(
|
|
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A:%.*]])
|
|
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]])
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
call void @llvm.lifetime.start.p0(i64 8, ptr %a)
|
|
call void @llvm.lifetime.end.p0(i64 8, ptr %a)
|
|
ret void
|
|
}
|
|
|
|
define void @intrinsic_calls(ptr %a) {
|
|
; CHECK-LABEL: @intrinsic_calls(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr [[A:%.*]], i32 4, <2 x i1> zeroinitializer, <2 x i32> zeroinitializer)
|
|
; CHECK-NEXT: call void @llvm.masked.store.v2i32.p0(<2 x i32> zeroinitializer, ptr [[A]], i32 4, <2 x i1> zeroinitializer)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> zeroinitializer, i32 4, <2 x i1> zeroinitializer, <2 x i64> zeroinitializer)
|
|
; CHECK-NEXT: [[TMP3:%.*]] = call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(i32) null, i32 0, i32 0)
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %a, i32 4, <2 x i1> zeroinitializer, <2 x i32> zeroinitializer)
|
|
call void @llvm.masked.store.v2i32.p0(<2 x i32> zeroinitializer, ptr %a, i32 4, <2 x i1> zeroinitializer)
|
|
call <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr> zeroinitializer, i32 4, <2 x i1> zeroinitializer, <2 x i64> zeroinitializer)
|
|
call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(i32) null, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
; CHECK: @llvm.lifetime.start.p0
|
|
; CHECK: @llvm.lifetime.end.p0
|
|
declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
|
|
declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
|
|
|
|
declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
|
|
declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)
|
|
declare <2 x i64> @llvm.masked.gather.v2i64.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i64>)
|
|
declare ptr @llvm.preserve.array.access.index.p0.p0(ptr, i32, i32)
|