llvm-project/llvm/test/CodeGen/X86/amx_transpose_intrinsics.ll

151 lines
8.2 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+amx-tile,+amx-bf16,+amx-int8,+amx-transpose | FileCheck %s
define void @test_amx(i32 %rv32, i64 %stride, i64 %rvalue, i8* %addr1, <4 x float> %xmm) #0 {
; CHECK-LABEL: test_amx:
; CHECK: # %bb.0:
; CHECK-NEXT: t2rpntlvwz0 (%rcx,%rsi), %tmm0
; CHECK-NEXT: t2rpntlvwz0t1 (%rcx,%rsi), %tmm2
; CHECK-NEXT: t2rpntlvwz1 (%rcx,%rsi), %tmm0
; CHECK-NEXT: t2rpntlvwz1t1 (%rcx,%rsi), %tmm2
; CHECK-NEXT: ttransposed %tmm3, %tmm1
; CHECK-NEXT: retq
call void @llvm.x86.t2rpntlvwz0(i8 1, i8* %addr1, i64 %stride)
call void @llvm.x86.t2rpntlvwz0t1(i8 2, i8* %addr1, i64 %stride)
call void @llvm.x86.t2rpntlvwz1(i8 1, i8* %addr1, i64 %stride)
call void @llvm.x86.t2rpntlvwz1t1(i8 2, i8* %addr1, i64 %stride)
call void @llvm.x86.ttransposed(i8 1, i8 3)
ret void
}
declare void @llvm.x86.t2rpntlvwz0(i8 %tile1, i8* %addr1, i64 %stride)
declare void @llvm.x86.t2rpntlvwz0t1(i8 %tile1, i8* %addr1, i64 %stride)
declare void @llvm.x86.t2rpntlvwz1(i8 %tile1, i8* %addr1, i64 %stride)
declare void @llvm.x86.t2rpntlvwz1t1(i8 %tile1, i8* %addr1, i64 %stride)
declare void @llvm.x86.ttransposed(i8 %tile0, i8 %tile1)
define void @test_amx3(i8* %pointer, i8* %base, i64 %stride) #0 {
; CHECK-LABEL: test_amx3:
; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movw $8, %cx
; CHECK-NEXT: t2rpntlvwz0 (%rsi,%rdx), %tmm4
; CHECK-NEXT: t2rpntlvwz0t1 (%rsi,%rdx), %tmm4
; CHECK-NEXT: t2rpntlvwz1 (%rsi,%rdx), %tmm4
; CHECK-NEXT: t2rpntlvwz1t1 (%rsi,%rdx), %tmm4
; CHECK-NEXT: ttransposed %tmm4, %tmm0
; CHECK-NEXT: tilestored %tmm0, (%rdi,%rdx)
; CHECK-NEXT: tilerelease
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%1 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride)
%2 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0t1.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride)
%3 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride)
%4 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1t1.internal(i16 8, i16 8, i16 0, i8* %base, i64 %stride)
%5 = extractvalue { x86_amx, x86_amx } %4, 0
%6 = call x86_amx @llvm.x86.ttransposed.internal(i16 8, i16 8, x86_amx %5)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %6)
ret void
}
define void @test_amx_spill(i8* %pointer, i8* %base, i64 %stride) #0 {
; CHECK-LABEL: test_amx_spill:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $6088, %rsp # imm = 0x17C8
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movw $8, %ax
; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0
; CHECK-NEXT: t2rpntlvwz0 (%rsi,%rdx), %tmm4
; CHECK-NEXT: t2rpntlvwz0t1 (%rsi,%rdx), %tmm6
; CHECK-NEXT: tilestored %tmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 1024-byte Folded Spill
; CHECK-NEXT: tilestored %tmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 1024-byte Folded Spill
; CHECK-NEXT: t2rpntlvwz1 (%rsi,%rdx), %tmm6
; CHECK-NEXT: tilestored %tmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 1024-byte Folded Spill
; CHECK-NEXT: tilestored %tmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 1024-byte Folded Spill
; CHECK-NEXT: t2rpntlvwz1t1 (%rsi,%rdx), %tmm6
; CHECK-NEXT: tilestored %tmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 1024-byte Folded Spill
; CHECK-NEXT: tilestored %tmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 1024-byte Folded Spill
; CHECK-NEXT: t2rpntlvwz0 (%rsi,%rdx), %tmm6
; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx)
; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx)
; CHECK-NEXT: movabsq $64, %rcx
; CHECK-NEXT: tileloadd 4032(%rsp,%rcx), %tmm4 # 1024-byte Folded Reload
; CHECK-NEXT: tileloadd 5056(%rsp,%rcx), %tmm5 # 1024-byte Folded Reload
; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx)
; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx)
; CHECK-NEXT: tileloadd 1984(%rsp,%rcx), %tmm4 # 1024-byte Folded Reload
; CHECK-NEXT: tileloadd 3008(%rsp,%rcx), %tmm5 # 1024-byte Folded Reload
; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx)
; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx)
; CHECK-NEXT: tileloadd -64(%rsp,%rcx), %tmm4 # 1024-byte Folded Reload
; CHECK-NEXT: tileloadd 960(%rsp,%rcx), %tmm5 # 1024-byte Folded Reload
; CHECK-NEXT: tilestored %tmm4, (%rsi,%rdx)
; CHECK-NEXT: tilestored %tmm5, (%rsi,%rdx)
; CHECK-NEXT: tilestored %tmm6, (%rsi,%rdx)
; CHECK-NEXT: tilestored %tmm7, (%rsi,%rdx)
; CHECK-NEXT: addq $6088, %rsp # imm = 0x17C8
; CHECK-NEXT: tilerelease
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride)
%b1 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
%b2 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0t1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
%b3 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
%b4 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1t1.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
%b5 = call { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16 8, i16 8, i16 8, i8* %base, i64 %stride)
%e11 = extractvalue { x86_amx, x86_amx } %b1, 0
%e12 = extractvalue { x86_amx, x86_amx } %b1, 1
%e21 = extractvalue { x86_amx, x86_amx } %b2, 0
%e22 = extractvalue { x86_amx, x86_amx } %b2, 1
%e31 = extractvalue { x86_amx, x86_amx } %b3, 0
%e32 = extractvalue { x86_amx, x86_amx } %b3, 1
%e41 = extractvalue { x86_amx, x86_amx } %b4, 0
%e42 = extractvalue { x86_amx, x86_amx } %b4, 1
%e51 = extractvalue { x86_amx, x86_amx } %b5, 0
%e52 = extractvalue { x86_amx, x86_amx } %b5, 1
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e11)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e12)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e21)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e22)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e31)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e32)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e41)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e42)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e51)
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %base, i64 %stride, x86_amx %e52)
ret void
}
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0.internal(i16, i16, i16, i8*, i64)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz0t1.internal(i16, i16, i16, i8*, i64)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1.internal(i16, i16, i16, i8*, i64)
declare { x86_amx, x86_amx } @llvm.x86.t2rpntlvwz1t1.internal(i16, i16, i16, i8*, i64)
declare x86_amx @llvm.x86.ttransposed.internal(i16, i16, x86_amx)
attributes #0 = { nounwind }