
Attempting to pass a `ptr addrspace(7)` to functions that take `ptr` arguments produces undesirable `addrspacecast(addrspacecast(p8 x to p7) to p0) => addrspacecast(p8 x to p0)` folds. This results in illegal GEP operations on buffer resources, which can't be GEP'd. (However, note that, while unimplemneted, addressspacecast from ptr addrspace(7) to ptr is legal - it's just an effective address computation) To resolve this problem, and thus prevent illegal `getelementptr T, ptr addrspace(8) %x, ...` s from being produces, this commit extends amdgcn.make.buffer.rsrc to also be variadic in its result type, auto-upgrading old manglings. The logic for handling a make.buffer.rsrc in instruction selection remains untouched and expects the output type to be a ptr addrspace(8), as does the Clang lowering for its builtin (the pointer-to-pointer version might want a different name in clang). LowerBufferFatPointers has been updated to lower amdgcn.make.buffer.rsrc.p7.p* to amdgcn.make.buffer.rsrc.p8.p* . This'll also make exposing buffer fat pointers in Clang easier, since you don't have to cast between a `__amdgcn_rsrc_t` and a pointer.
106 lines
7.5 KiB
Plaintext
106 lines
7.5 KiB
Plaintext
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
|
|
// REQUIRES: amdgpu-registered-target
|
|
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu verde -emit-llvm -disable-llvm-optzns -fcuda-is-device -o - %s | FileCheck %s
|
|
|
|
#define __device__ __attribute__((device))
|
|
|
|
// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z31test_amdgcn_make_buffer_rsrc_p0Pvsii(
|
|
// CHECK-SAME: ptr noundef [[P:%.*]], i16 noundef signext [[STRIDE:%.*]], i32 noundef [[NUM:%.*]], i32 noundef [[FLAGS:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
|
|
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[STRIDE_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
|
|
// CHECK-NEXT: [[NUM_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[FLAGS_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
|
|
// CHECK-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
|
|
// CHECK-NEXT: [[STRIDE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRIDE_ADDR]] to ptr
|
|
// CHECK-NEXT: [[NUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[NUM_ADDR]] to ptr
|
|
// CHECK-NEXT: [[FLAGS_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[FLAGS_ADDR]] to ptr
|
|
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i16 [[STRIDE]], ptr [[STRIDE_ADDR_ASCAST]], align 2
|
|
// CHECK-NEXT: store i32 [[NUM]], ptr [[NUM_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]])
|
|
// CHECK-NEXT: ret ptr addrspace(8) [[TMP4]]
|
|
//
|
|
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, int num, int flags) {
|
|
return __builtin_amdgcn_make_buffer_rsrc(p, stride, num, flags);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z47test_amdgcn_make_buffer_rsrc_p0_stride_constantPvii(
|
|
// CHECK-SAME: ptr noundef [[P:%.*]], i32 noundef [[NUM:%.*]], i32 noundef [[FLAGS:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
|
|
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[NUM_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[FLAGS_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
|
|
// CHECK-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
|
|
// CHECK-NEXT: [[NUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[NUM_ADDR]] to ptr
|
|
// CHECK-NEXT: [[FLAGS_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[FLAGS_ADDR]] to ptr
|
|
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i32 [[NUM]], ptr [[NUM_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 [[TMP2]])
|
|
// CHECK-NEXT: ret ptr addrspace(8) [[TMP3]]
|
|
//
|
|
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) {
|
|
return __builtin_amdgcn_make_buffer_rsrc(p, /*stride=*/4, num, flags);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z44test_amdgcn_make_buffer_rsrc_p0_num_constantPvsi(
|
|
// CHECK-SAME: ptr noundef [[P:%.*]], i16 noundef signext [[STRIDE:%.*]], i32 noundef [[FLAGS:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
|
|
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[STRIDE_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
|
|
// CHECK-NEXT: [[FLAGS_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
|
|
// CHECK-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
|
|
// CHECK-NEXT: [[STRIDE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRIDE_ADDR]] to ptr
|
|
// CHECK-NEXT: [[FLAGS_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[FLAGS_ADDR]] to ptr
|
|
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i16 [[STRIDE]], ptr [[STRIDE_ADDR_ASCAST]], align 2
|
|
// CHECK-NEXT: store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 [[TMP2]])
|
|
// CHECK-NEXT: ret ptr addrspace(8) [[TMP3]]
|
|
//
|
|
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) {
|
|
return __builtin_amdgcn_make_buffer_rsrc(p, stride, /*num=*/1234, flags);
|
|
}
|
|
|
|
// CHECK-LABEL: define dso_local ptr addrspace(8) @_Z46test_amdgcn_make_buffer_rsrc_p0_flags_constantPvsi(
|
|
// CHECK-SAME: ptr noundef [[P:%.*]], i16 noundef signext [[STRIDE:%.*]], i32 noundef [[NUM:%.*]]) #[[ATTR0]] {
|
|
// CHECK-NEXT: [[ENTRY:.*:]]
|
|
// CHECK-NEXT: [[RETVAL:%.*]] = alloca ptr addrspace(8), align 16, addrspace(5)
|
|
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[STRIDE_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
|
|
// CHECK-NEXT: [[NUM_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
|
|
// CHECK-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
|
|
// CHECK-NEXT: [[STRIDE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[STRIDE_ADDR]] to ptr
|
|
// CHECK-NEXT: [[NUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[NUM_ADDR]] to ptr
|
|
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i16 [[STRIDE]], ptr [[STRIDE_ADDR_ASCAST]], align 2
|
|
// CHECK-NEXT: store i32 [[NUM]], ptr [[NUM_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 5678)
|
|
// CHECK-NEXT: ret ptr addrspace(8) [[TMP3]]
|
|
//
|
|
__device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) {
|
|
return __builtin_amdgcn_make_buffer_rsrc(p, stride, num, /*flags=*/5678);
|
|
}
|