
This patch migrates the lowering of the non-tensor TMA intrinsics to table-gen based. Also, use ADDR nodes for the pointer operands wherever applicable. Signed-off-by: Durgadoss R <durgadossr@nvidia.com>
194 lines
13 KiB
LLVM
194 lines
13 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| FileCheck --check-prefixes=CHECK,CHECK-PTX64 %s
|
|
; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| FileCheck --check-prefixes=CHECK,CHECK-PTX-SHARED32 %s
|
|
; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
|
|
; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| %ptxas-verify -arch=sm_90 %}
|
|
|
|
target triple = "nvptx64-nvidia-cuda"
|
|
|
|
declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
|
|
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1), ptr addrspace(3), i32, i64, i1)
|
|
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(3), i32)
|
|
declare void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1), i32, i64, i1)
|
|
|
|
define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size, i16 %mc, i64 %ch) {
|
|
; CHECK-PTX64-LABEL: cp_async_bulk_g2s(
|
|
; CHECK-PTX64: {
|
|
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
|
|
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
|
|
; CHECK-PTX64-NEXT: .reg .b64 %rd<5>;
|
|
; CHECK-PTX64-EMPTY:
|
|
; CHECK-PTX64-NEXT: // %bb.0:
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [cp_async_bulk_g2s_param_0];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd2, [cp_async_bulk_g2s_param_1];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd3, [cp_async_bulk_g2s_param_2];
|
|
; CHECK-PTX64-NEXT: ld.param.b32 %r1, [cp_async_bulk_g2s_param_3];
|
|
; CHECK-PTX64-NEXT: ld.param.b16 %rs1, [cp_async_bulk_g2s_param_4];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd4, [cp_async_bulk_g2s_param_5];
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%rd3], [%rd1], %r1, [%rd2];
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint [%rd3], [%rd1], %r1, [%rd2], %rd4;
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%rd3], [%rd1], %r1, [%rd2], %rs1;
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%rd3], [%rd1], %r1, [%rd2], %rs1, %rd4;
|
|
; CHECK-PTX64-NEXT: ret;
|
|
;
|
|
; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_g2s(
|
|
; CHECK-PTX-SHARED32: {
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b16 %rs<2>;
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<4>;
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b64 %rd<3>;
|
|
; CHECK-PTX-SHARED32-EMPTY:
|
|
; CHECK-PTX-SHARED32-NEXT: // %bb.0:
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b64 %rd1, [cp_async_bulk_g2s_param_0];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r1, [cp_async_bulk_g2s_param_1];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r2, [cp_async_bulk_g2s_param_2];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r3, [cp_async_bulk_g2s_param_3];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b16 %rs1, [cp_async_bulk_g2s_param_4];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b64 %rd2, [cp_async_bulk_g2s_param_5];
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%r2], [%rd1], %r3, [%r1];
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rd2;
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%r2], [%rd1], %r3, [%r1], %rs1;
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rs1, %rd2;
|
|
; CHECK-PTX-SHARED32-NEXT: ret;
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 0, i1 0)
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 0, i1 1)
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 0)
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
|
|
ret void
|
|
}
|
|
|
|
; Tests to verify that the immediate values for "mc and ch" land correctly in PTX.
|
|
; The values of 16 and 64 are arbitrary and do not have any significance.
|
|
define void @cp_async_bulk_g2s_imm_mc_ch(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size, i16 %mc, i64 %ch) {
|
|
; CHECK-PTX64-LABEL: cp_async_bulk_g2s_imm_mc_ch(
|
|
; CHECK-PTX64: {
|
|
; CHECK-PTX64-NEXT: .reg .b16 %rs<3>;
|
|
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
|
|
; CHECK-PTX64-NEXT: .reg .b64 %rd<6>;
|
|
; CHECK-PTX64-EMPTY:
|
|
; CHECK-PTX64-NEXT: // %bb.0:
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [cp_async_bulk_g2s_imm_mc_ch_param_0];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd2, [cp_async_bulk_g2s_imm_mc_ch_param_1];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd3, [cp_async_bulk_g2s_imm_mc_ch_param_2];
|
|
; CHECK-PTX64-NEXT: ld.param.b32 %r1, [cp_async_bulk_g2s_imm_mc_ch_param_3];
|
|
; CHECK-PTX64-NEXT: mov.b64 %rd4, 64;
|
|
; CHECK-PTX64-NEXT: mov.b16 %rs1, 16;
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%rd3], [%rd1], %r1, [%rd2], %rs1, %rd4;
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd5, [cp_async_bulk_g2s_imm_mc_ch_param_5];
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%rd3], [%rd1], %r1, [%rd2], %rs1;
|
|
; CHECK-PTX64-NEXT: ld.param.b16 %rs2, [cp_async_bulk_g2s_imm_mc_ch_param_4];
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint [%rd3], [%rd1], %r1, [%rd2], %rd4;
|
|
; CHECK-PTX64-NEXT: ret;
|
|
;
|
|
; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_g2s_imm_mc_ch(
|
|
; CHECK-PTX-SHARED32: {
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b16 %rs<3>;
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<4>;
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b64 %rd<4>;
|
|
; CHECK-PTX-SHARED32-EMPTY:
|
|
; CHECK-PTX-SHARED32-NEXT: // %bb.0:
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b64 %rd1, [cp_async_bulk_g2s_imm_mc_ch_param_0];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r1, [cp_async_bulk_g2s_imm_mc_ch_param_1];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r2, [cp_async_bulk_g2s_imm_mc_ch_param_2];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r3, [cp_async_bulk_g2s_imm_mc_ch_param_3];
|
|
; CHECK-PTX-SHARED32-NEXT: mov.b64 %rd2, 64;
|
|
; CHECK-PTX-SHARED32-NEXT: mov.b16 %rs1, 16;
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rs1, %rd2;
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b64 %rd3, [cp_async_bulk_g2s_imm_mc_ch_param_5];
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%r2], [%rd1], %r3, [%r1], %rs1;
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b16 %rs2, [cp_async_bulk_g2s_imm_mc_ch_param_4];
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rd2;
|
|
; CHECK-PTX-SHARED32-NEXT: ret;
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 16, i64 64, i1 1, i1 1)
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 16, i64 %ch, i1 1, i1 0)
|
|
tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 64, i1 0, i1 1)
|
|
ret void
|
|
}
|
|
|
|
define void @cp_async_bulk_s2g(ptr addrspace(3) %src, ptr addrspace(1) %dst, i32 %size, i64 %ch) {
|
|
; CHECK-PTX64-LABEL: cp_async_bulk_s2g(
|
|
; CHECK-PTX64: {
|
|
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
|
|
; CHECK-PTX64-NEXT: .reg .b64 %rd<5>;
|
|
; CHECK-PTX64-EMPTY:
|
|
; CHECK-PTX64-NEXT: // %bb.0:
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [cp_async_bulk_s2g_param_0];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd2, [cp_async_bulk_s2g_param_1];
|
|
; CHECK-PTX64-NEXT: ld.param.b32 %r1, [cp_async_bulk_s2g_param_2];
|
|
; CHECK-PTX64-NEXT: mov.b64 %rd3, 64;
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.global.shared::cta.bulk_group.L2::cache_hint [%rd2], [%rd1], %r1, %rd3;
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd4, [cp_async_bulk_s2g_param_3];
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.global.shared::cta.bulk_group [%rd2], [%rd1], %r1;
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.global.shared::cta.bulk_group.L2::cache_hint [%rd2], [%rd1], %r1, %rd4;
|
|
; CHECK-PTX64-NEXT: ret;
|
|
;
|
|
; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_s2g(
|
|
; CHECK-PTX-SHARED32: {
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<3>;
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b64 %rd<4>;
|
|
; CHECK-PTX-SHARED32-EMPTY:
|
|
; CHECK-PTX-SHARED32-NEXT: // %bb.0:
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r1, [cp_async_bulk_s2g_param_0];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b64 %rd1, [cp_async_bulk_s2g_param_1];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r2, [cp_async_bulk_s2g_param_2];
|
|
; CHECK-PTX-SHARED32-NEXT: mov.b64 %rd2, 64;
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.global.shared::cta.bulk_group.L2::cache_hint [%rd1], [%r1], %r2, %rd2;
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b64 %rd3, [cp_async_bulk_s2g_param_3];
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.global.shared::cta.bulk_group [%rd1], [%r1], %r2;
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.global.shared::cta.bulk_group.L2::cache_hint [%rd1], [%r1], %r2, %rd3;
|
|
; CHECK-PTX-SHARED32-NEXT: ret;
|
|
tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1) %dst, ptr addrspace(3) %src, i32 %size, i64 64, i1 1)
|
|
tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1) %dst, ptr addrspace(3) %src, i32 %size, i64 %ch, i1 0)
|
|
tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1) %dst, ptr addrspace(3) %src, i32 %size, i64 %ch, i1 1)
|
|
ret void
|
|
}
|
|
|
|
define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size) {
|
|
; CHECK-PTX64-LABEL: cp_async_bulk_cta_to_cluster(
|
|
; CHECK-PTX64: {
|
|
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
|
|
; CHECK-PTX64-NEXT: .reg .b64 %rd<4>;
|
|
; CHECK-PTX64-EMPTY:
|
|
; CHECK-PTX64-NEXT: // %bb.0:
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [cp_async_bulk_cta_to_cluster_param_0];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd2, [cp_async_bulk_cta_to_cluster_param_1];
|
|
; CHECK-PTX64-NEXT: ld.param.b64 %rd3, [cp_async_bulk_cta_to_cluster_param_2];
|
|
; CHECK-PTX64-NEXT: ld.param.b32 %r1, [cp_async_bulk_cta_to_cluster_param_3];
|
|
; CHECK-PTX64-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%rd3], [%rd1], %r1, [%rd2];
|
|
; CHECK-PTX64-NEXT: ret;
|
|
;
|
|
; CHECK-PTX-SHARED32-LABEL: cp_async_bulk_cta_to_cluster(
|
|
; CHECK-PTX-SHARED32: {
|
|
; CHECK-PTX-SHARED32-NEXT: .reg .b32 %r<5>;
|
|
; CHECK-PTX-SHARED32-EMPTY:
|
|
; CHECK-PTX-SHARED32-NEXT: // %bb.0:
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r1, [cp_async_bulk_cta_to_cluster_param_0];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r2, [cp_async_bulk_cta_to_cluster_param_1];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r3, [cp_async_bulk_cta_to_cluster_param_2];
|
|
; CHECK-PTX-SHARED32-NEXT: ld.param.b32 %r4, [cp_async_bulk_cta_to_cluster_param_3];
|
|
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%r3], [%r1], %r4, [%r2];
|
|
; CHECK-PTX-SHARED32-NEXT: ret;
|
|
tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
|
|
ret void
|
|
}
|
|
|
|
define void @cp_async_bulk_prefetch(ptr addrspace(1) %src, i32 %size, i64 %ch) {
|
|
; CHECK-LABEL: cp_async_bulk_prefetch(
|
|
; CHECK: {
|
|
; CHECK-NEXT: .reg .b32 %r<2>;
|
|
; CHECK-NEXT: .reg .b64 %rd<4>;
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: // %bb.0:
|
|
; CHECK-NEXT: ld.param.b64 %rd1, [cp_async_bulk_prefetch_param_0];
|
|
; CHECK-NEXT: ld.param.b32 %r1, [cp_async_bulk_prefetch_param_1];
|
|
; CHECK-NEXT: mov.b64 %rd2, 64;
|
|
; CHECK-NEXT: cp.async.bulk.prefetch.L2.global.L2::cache_hint [%rd1], %r1, %rd2;
|
|
; CHECK-NEXT: ld.param.b64 %rd3, [cp_async_bulk_prefetch_param_2];
|
|
; CHECK-NEXT: cp.async.bulk.prefetch.L2.global.L2::cache_hint [%rd1], %r1, %rd3;
|
|
; CHECK-NEXT: cp.async.bulk.prefetch.L2.global [%rd1], %r1;
|
|
; CHECK-NEXT: ret;
|
|
tail call void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1) %src, i32 %size, i64 64, i1 1)
|
|
tail call void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1) %src, i32 %size, i64 %ch, i1 1)
|
|
tail call void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1) %src, i32 %size, i64 %ch, i1 0)
|
|
ret void
|
|
}
|