; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 | FileCheck %s ; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %} ; Even though general vector types are not supported in PTX, we can still ; optimize loads/stores with pseudo-vector instructions of the form: ; ; ld.v2.f32 {%r0, %r1}, [%r0] ; ; which will load two floats at once into scalar registers. define void @foo(ptr %a) { ; CHECK-LABEL: foo( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<5>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo_param_0]; ; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1]; ; CHECK-NEXT: mul.rn.f32 %r3, %r2, %r2; ; CHECK-NEXT: mul.rn.f32 %r4, %r1, %r1; ; CHECK-NEXT: st.v2.b32 [%rd1], {%r4, %r3}; ; CHECK-NEXT: ret; %t1 = load <2 x float>, ptr %a %t2 = fmul <2 x float> %t1, %t1 store <2 x float> %t2, ptr %a ret void } define void @foo2(ptr %a) { ; CHECK-LABEL: foo2( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<9>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo2_param_0]; ; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; ; CHECK-NEXT: mul.rn.f32 %r5, %r4, %r4; ; CHECK-NEXT: mul.rn.f32 %r6, %r3, %r3; ; CHECK-NEXT: mul.rn.f32 %r7, %r2, %r2; ; CHECK-NEXT: mul.rn.f32 %r8, %r1, %r1; ; CHECK-NEXT: st.v4.b32 [%rd1], {%r8, %r7, %r6, %r5}; ; CHECK-NEXT: ret; %t1 = load <4 x float>, ptr %a %t2 = fmul <4 x float> %t1, %t1 store <4 x float> %t2, ptr %a ret void } define void @foo3(ptr %a) { ; CHECK-LABEL: foo3( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<17>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo3_param_0]; ; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16]; ; CHECK-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1]; ; CHECK-NEXT: mul.rn.f32 %r9, %r8, %r8; ; CHECK-NEXT: mul.rn.f32 %r10, %r7, %r7; ; CHECK-NEXT: mul.rn.f32 %r11, %r6, %r6; ; CHECK-NEXT: mul.rn.f32 %r12, %r5, %r5; ; CHECK-NEXT: mul.rn.f32 %r13, %r4, %r4; ; CHECK-NEXT: mul.rn.f32 %r14, %r3, %r3; ; CHECK-NEXT: mul.rn.f32 %r15, %r2, %r2; ; CHECK-NEXT: mul.rn.f32 %r16, %r1, %r1; ; CHECK-NEXT: st.v4.b32 [%rd1+16], {%r16, %r15, %r14, %r13}; ; CHECK-NEXT: st.v4.b32 [%rd1], {%r12, %r11, %r10, %r9}; ; CHECK-NEXT: ret; %t1 = load <8 x float>, ptr %a %t2 = fmul <8 x float> %t1, %t1 store <8 x float> %t2, ptr %a ret void } define void @foo4(ptr %a) { ; CHECK-LABEL: foo4( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<5>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo4_param_0]; ; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1]; ; CHECK-NEXT: mul.lo.s32 %r3, %r2, %r2; ; CHECK-NEXT: mul.lo.s32 %r4, %r1, %r1; ; CHECK-NEXT: st.v2.b32 [%rd1], {%r4, %r3}; ; CHECK-NEXT: ret; %t1 = load <2 x i32>, ptr %a %t2 = mul <2 x i32> %t1, %t1 store <2 x i32> %t2, ptr %a ret void } define void @foo5(ptr %a) { ; CHECK-LABEL: foo5( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<9>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo5_param_0]; ; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; ; CHECK-NEXT: mul.lo.s32 %r5, %r4, %r4; ; CHECK-NEXT: mul.lo.s32 %r6, %r3, %r3; ; CHECK-NEXT: mul.lo.s32 %r7, %r2, %r2; ; CHECK-NEXT: mul.lo.s32 %r8, %r1, %r1; ; CHECK-NEXT: st.v4.b32 [%rd1], {%r8, %r7, %r6, %r5}; ; CHECK-NEXT: ret; %t1 = load <4 x i32>, ptr %a %t2 = mul <4 x i32> %t1, %t1 store <4 x i32> %t2, ptr %a ret void } define void @foo6(ptr %a) { ; CHECK-LABEL: foo6( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<17>; ; CHECK-NEXT: .reg .b64 %rd<2>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo6_param_0]; ; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16]; ; CHECK-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1]; ; CHECK-NEXT: mul.lo.s32 %r9, %r8, %r8; ; CHECK-NEXT: mul.lo.s32 %r10, %r7, %r7; ; CHECK-NEXT: mul.lo.s32 %r11, %r6, %r6; ; CHECK-NEXT: mul.lo.s32 %r12, %r5, %r5; ; CHECK-NEXT: mul.lo.s32 %r13, %r4, %r4; ; CHECK-NEXT: mul.lo.s32 %r14, %r3, %r3; ; CHECK-NEXT: mul.lo.s32 %r15, %r2, %r2; ; CHECK-NEXT: mul.lo.s32 %r16, %r1, %r1; ; CHECK-NEXT: st.v4.b32 [%rd1+16], {%r16, %r15, %r14, %r13}; ; CHECK-NEXT: st.v4.b32 [%rd1], {%r12, %r11, %r10, %r9}; ; CHECK-NEXT: ret; %t1 = load <8 x i32>, ptr %a %t2 = mul <8 x i32> %t1, %t1 store <8 x i32> %t2, ptr %a ret void } ; The following test wasn't passing previously as the address ; computation was still too complex when LSV was called. declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() #0 declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() #0 define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(134217728) %alloc0) { ; CHECK-LABEL: foo_complex( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<4>; ; CHECK-NEXT: .reg .b32 %r<8>; ; CHECK-NEXT: .reg .b64 %rd<5>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [foo_complex_param_0]; ; CHECK-NEXT: mov.u32 %r1, %tid.x; ; CHECK-NEXT: mov.u32 %r2, %ctaid.x; ; CHECK-NEXT: shr.u32 %r3, %r2, 8; ; CHECK-NEXT: shl.b32 %r4, %r2, 9; ; CHECK-NEXT: and.b32 %r5, %r4, 130560; ; CHECK-NEXT: shl.b32 %r6, %r1, 1; ; CHECK-NEXT: or.b32 %r7, %r5, %r6; ; CHECK-NEXT: cvt.u64.u32 %rd2, %r7; ; CHECK-NEXT: mad.wide.u32 %rd3, %r3, 131072, %rd1; ; CHECK-NEXT: add.s64 %rd4, %rd3, %rd2; ; CHECK-NEXT: ld.v2.b8 {%rs1, %rs2}, [%rd4+128]; ; CHECK-NEXT: max.u16 %rs3, %rs1, %rs2; ; CHECK-NEXT: st.b8 [%rd4+129], %rs3; ; CHECK-NEXT: ret; %t0 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x(), !range !1 %t1 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() %t2 = lshr i32 %t1, 8 %t3 = shl nuw nsw i32 %t1, 9 %ttile_origin.2 = and i32 %t3, 130560 %tstart_offset_x_mul = shl nuw nsw i32 %t0, 1 %t4 = or disjoint i32 %ttile_origin.2, %tstart_offset_x_mul %t6 = or disjoint i32 %t4, 1 %t8 = or disjoint i32 %t4, 128 %t9 = zext i32 %t8 to i64 %t10 = or disjoint i32 %t4, 129 %t11 = zext i32 %t10 to i64 %t20 = zext i32 %t2 to i64 %t27 = getelementptr inbounds [1024 x [131072 x i8]], ptr %alloc0, i64 0, i64 %t20, i64 %t9 %t28 = load i8, ptr %t27, align 2 %t31 = getelementptr inbounds [1024 x [131072 x i8]], ptr %alloc0, i64 0, i64 %t20, i64 %t11 %t32 = load i8, ptr %t31, align 1 %t33 = icmp ult i8 %t28, %t32 %t34 = select i1 %t33, i8 %t32, i8 %t28 store i8 %t34, ptr %t31 ret void } define void @extv8f16_global_a16(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 { ; CHECK-LABEL: extv8f16_global_a16( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<9>; ; CHECK-NEXT: .reg .b32 %r<13>; ; CHECK-NEXT: .reg .b64 %rd<3>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [extv8f16_global_a16_param_0]; ; CHECK-NEXT: ld.param.b64 %rd2, [extv8f16_global_a16_param_1]; ; CHECK-NEXT: ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd2]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; ; CHECK-NEXT: cvt.f32.f16 %r5, %rs2; ; CHECK-NEXT: cvt.f32.f16 %r6, %rs1; ; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r1; ; CHECK-NEXT: cvt.f32.f16 %r7, %rs4; ; CHECK-NEXT: cvt.f32.f16 %r8, %rs3; ; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r4; ; CHECK-NEXT: cvt.f32.f16 %r9, %rs6; ; CHECK-NEXT: cvt.f32.f16 %r10, %rs5; ; CHECK-NEXT: mov.b32 {%rs7, %rs8}, %r3; ; CHECK-NEXT: cvt.f32.f16 %r11, %rs8; ; CHECK-NEXT: cvt.f32.f16 %r12, %rs7; ; CHECK-NEXT: st.global.v4.b32 [%rd1+16], {%r12, %r11, %r10, %r9}; ; CHECK-NEXT: st.global.v4.b32 [%rd1], {%r8, %r7, %r6, %r5}; ; CHECK-NEXT: ret; %v = load <8 x half>, ptr addrspace(1) %src, align 16 %ext = fpext <8 x half> %v to <8 x float> store <8 x float> %ext, ptr addrspace(1) %dst, align 16 ret void } define void @extv8f16_global_a4(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 { ; CHECK-LABEL: extv8f16_global_a4( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<9>; ; CHECK-NEXT: .reg .b32 %r<9>; ; CHECK-NEXT: .reg .b64 %rd<3>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [extv8f16_global_a4_param_0]; ; CHECK-NEXT: ld.param.b64 %rd2, [extv8f16_global_a4_param_1]; ; CHECK-NEXT: ld.global.v2.b16 {%rs1, %rs2}, [%rd2+8]; ; CHECK-NEXT: ld.global.v2.b16 {%rs3, %rs4}, [%rd2+12]; ; CHECK-NEXT: ld.global.v2.b16 {%rs5, %rs6}, [%rd2]; ; CHECK-NEXT: ld.global.v2.b16 {%rs7, %rs8}, [%rd2+4]; ; CHECK-NEXT: cvt.f32.f16 %r1, %rs8; ; CHECK-NEXT: cvt.f32.f16 %r2, %rs7; ; CHECK-NEXT: cvt.f32.f16 %r3, %rs6; ; CHECK-NEXT: cvt.f32.f16 %r4, %rs5; ; CHECK-NEXT: cvt.f32.f16 %r5, %rs4; ; CHECK-NEXT: cvt.f32.f16 %r6, %rs3; ; CHECK-NEXT: cvt.f32.f16 %r7, %rs2; ; CHECK-NEXT: cvt.f32.f16 %r8, %rs1; ; CHECK-NEXT: st.global.v4.b32 [%rd1+16], {%r8, %r7, %r6, %r5}; ; CHECK-NEXT: st.global.v4.b32 [%rd1], {%r4, %r3, %r2, %r1}; ; CHECK-NEXT: ret; %v = load <8 x half>, ptr addrspace(1) %src, align 4 %ext = fpext <8 x half> %v to <8 x float> store <8 x float> %ext, ptr addrspace(1) %dst, align 16 ret void } define void @extv8f16_generic_a16(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 { ; CHECK-LABEL: extv8f16_generic_a16( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<9>; ; CHECK-NEXT: .reg .b32 %r<13>; ; CHECK-NEXT: .reg .b64 %rd<3>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [extv8f16_generic_a16_param_0]; ; CHECK-NEXT: ld.param.b64 %rd2, [extv8f16_generic_a16_param_1]; ; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd2]; ; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2; ; CHECK-NEXT: cvt.f32.f16 %r5, %rs2; ; CHECK-NEXT: cvt.f32.f16 %r6, %rs1; ; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r1; ; CHECK-NEXT: cvt.f32.f16 %r7, %rs4; ; CHECK-NEXT: cvt.f32.f16 %r8, %rs3; ; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r4; ; CHECK-NEXT: cvt.f32.f16 %r9, %rs6; ; CHECK-NEXT: cvt.f32.f16 %r10, %rs5; ; CHECK-NEXT: mov.b32 {%rs7, %rs8}, %r3; ; CHECK-NEXT: cvt.f32.f16 %r11, %rs8; ; CHECK-NEXT: cvt.f32.f16 %r12, %rs7; ; CHECK-NEXT: st.v4.b32 [%rd1+16], {%r12, %r11, %r10, %r9}; ; CHECK-NEXT: st.v4.b32 [%rd1], {%r8, %r7, %r6, %r5}; ; CHECK-NEXT: ret; %v = load <8 x half>, ptr %src, align 16 %ext = fpext <8 x half> %v to <8 x float> store <8 x float> %ext, ptr %dst, align 16 ret void } define void @extv8f16_generic_a4(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 { ; CHECK-LABEL: extv8f16_generic_a4( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<9>; ; CHECK-NEXT: .reg .b32 %r<9>; ; CHECK-NEXT: .reg .b64 %rd<3>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [extv8f16_generic_a4_param_0]; ; CHECK-NEXT: ld.param.b64 %rd2, [extv8f16_generic_a4_param_1]; ; CHECK-NEXT: ld.v2.b16 {%rs1, %rs2}, [%rd2+8]; ; CHECK-NEXT: ld.v2.b16 {%rs3, %rs4}, [%rd2+12]; ; CHECK-NEXT: ld.v2.b16 {%rs5, %rs6}, [%rd2]; ; CHECK-NEXT: ld.v2.b16 {%rs7, %rs8}, [%rd2+4]; ; CHECK-NEXT: cvt.f32.f16 %r1, %rs8; ; CHECK-NEXT: cvt.f32.f16 %r2, %rs7; ; CHECK-NEXT: cvt.f32.f16 %r3, %rs6; ; CHECK-NEXT: cvt.f32.f16 %r4, %rs5; ; CHECK-NEXT: cvt.f32.f16 %r5, %rs4; ; CHECK-NEXT: cvt.f32.f16 %r6, %rs3; ; CHECK-NEXT: cvt.f32.f16 %r7, %rs2; ; CHECK-NEXT: cvt.f32.f16 %r8, %rs1; ; CHECK-NEXT: st.v4.b32 [%rd1+16], {%r8, %r7, %r6, %r5}; ; CHECK-NEXT: st.v4.b32 [%rd1], {%r4, %r3, %r2, %r1}; ; CHECK-NEXT: ret; %v = load <8 x half>, ptr %src, align 4 %ext = fpext <8 x half> %v to <8 x float> store <8 x float> %ext, ptr %dst, align 16 ret void } !1 = !{i32 0, i32 64} define dso_local void @bf16_v4_align_load_store(ptr noundef %0, ptr noundef %1) #0 { ; CHECK-LABEL: bf16_v4_align_load_store( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<3>; ; CHECK-NEXT: .reg .b64 %rd<3>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b64 %rd1, [bf16_v4_align_load_store_param_0]; ; CHECK-NEXT: ld.param.b64 %rd2, [bf16_v4_align_load_store_param_1]; ; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd2]; ; CHECK-NEXT: st.v2.b32 [%rd1], {%r1, %r2}; ; CHECK-NEXT: ret; %3 = load <4 x bfloat>, ptr %1, align 8 store <4 x bfloat> %3, ptr %0, align 8 ret void }