llvm-project/llvm/test/CodeGen/AMDGPU/scalar_to_vector.v8i16.ll
Jeffrey Byrnes 6118a254ff
[AMDGPU] Allocate AVRegClass last (#146606)
This changes the RC priorities such that AVRegClass is the least
prioritized. These registers are less constrained than the VRegClass and
ARegClass as they can be either agpr or vgpr. Thus, assigning them last
removes unnecessary constraints from VRegClass and ARegClass
assignments, and allows the RA to make smarter decisions about whether
to use vgpr / agpr for AVRegClass.

We only have 5 bits for RC priorities, and we still want to prioritize
larger RCs over smaller ones. Since this new prioritization uses the 5th
bit for AVRegClass vs ARegClass / VRegClass, we only have 4 bits to
encode the size priorities. Previously, each RC with a distinct size,
had a distinct priority. However, this PR groups together multiple sizes
to the same priority. Currently, this will have no effect on
prioritization in practice because we only have one actually defined RC
per group per vector register type.

For example, a register class with 15 or 16 32bit registers will have
the same size priority (14). However, we only have VReg_512 (VReg_480
doesn't exist), so only one actual RC in VRegClass has this priority.
Similarly, we give register class with 17-32+ 32 bit registers a size
priority of 15, but we only have VReg_1024.

The effect of this PR is to prioritize first the vector register type
(VReg & Areg have top priority, then AVReg), with the size of the
register class having second priority.

Passes PSDB.

---------

Co-authored-by: Matt Arsenault <Matthew.Arsenault@amd.com>
2025-07-25 13:46:41 -07:00

185 lines
7.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX900 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < %s | FileCheck -check-prefixes=GFX906 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 < %s | FileCheck -check-prefixes=GFX908 %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90A %s
define amdgpu_kernel void @scalar_to_vector_v8i16(<2 x i32> %in, ptr %out) #0 {
; GFX900-LABEL: scalar_to_vector_v8i16:
; GFX900: ; %bb.0: ; %entry
; GFX900-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX900-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GFX900-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX900-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX900-NEXT: s_waitcnt lgkmcnt(0)
; GFX900-NEXT: v_mov_b32_e32 v5, s3
; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
; GFX900-NEXT: v_mov_b32_e32 v0, s0
; GFX900-NEXT: v_mov_b32_e32 v1, s1
; GFX900-NEXT: v_mov_b32_e32 v2, s0
; GFX900-NEXT: v_mov_b32_e32 v3, s0
; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX900-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX900-NEXT: s_endpgm
;
; GFX906-LABEL: scalar_to_vector_v8i16:
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX906-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GFX906-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX906-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: v_mov_b32_e32 v5, s3
; GFX906-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
; GFX906-NEXT: v_mov_b32_e32 v0, s0
; GFX906-NEXT: v_mov_b32_e32 v1, s1
; GFX906-NEXT: v_mov_b32_e32 v2, s0
; GFX906-NEXT: v_mov_b32_e32 v3, s0
; GFX906-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX906-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX906-NEXT: s_endpgm
;
; GFX908-LABEL: scalar_to_vector_v8i16:
; GFX908: ; %bb.0: ; %entry
; GFX908-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX908-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GFX908-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX908-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v5, s3
; GFX908-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
; GFX908-NEXT: v_mov_b32_e32 v0, s0
; GFX908-NEXT: v_mov_b32_e32 v1, s1
; GFX908-NEXT: v_mov_b32_e32 v2, s0
; GFX908-NEXT: v_mov_b32_e32 v3, s0
; GFX908-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX908-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX908-NEXT: s_endpgm
;
; GFX90A-LABEL: scalar_to_vector_v8i16:
; GFX90A: ; %bb.0: ; %entry
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX90A-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX90A-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX90A-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s3
; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s2, v0
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: v_mov_b32_e32 v3, s1
; GFX90A-NEXT: v_mov_b32_e32 v4, s0
; GFX90A-NEXT: v_mov_b32_e32 v5, s0
; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX90A-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX90A-NEXT: s_endpgm
entry:
%val.1.i32 = extractelement <2 x i32> %in, i64 0
%val.2.vec2.i16 = bitcast i32 %val.1.i32 to <2 x i16>
%val.3.vec8.i16 = shufflevector <2 x i16> %val.2.vec2.i16, <2 x i16> %val.2.vec2.i16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
%val.4.vec4.i32 = shufflevector <2 x i32> %in, <2 x i32> %in, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%val.5.vec8.i16 = bitcast <4 x i32> %val.4.vec4.i32 to <8 x i16>
%val.6.vec8.i16 = shufflevector <8 x i16> %val.5.vec8.i16, <8 x i16> %val.3.vec8.i16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%out.gep = getelementptr inbounds <8 x i16>, ptr %out, i64 %tid.ext
store <8 x i16> %val.6.vec8.i16, ptr %out.gep, align 16
ret void
}
define amdgpu_kernel void @scalar_to_vector_v8f16(<2 x float> %in, ptr %out) #0 {
; GFX900-LABEL: scalar_to_vector_v8f16:
; GFX900: ; %bb.0: ; %entry
; GFX900-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX900-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GFX900-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX900-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX900-NEXT: s_waitcnt lgkmcnt(0)
; GFX900-NEXT: v_mov_b32_e32 v5, s3
; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
; GFX900-NEXT: v_mov_b32_e32 v0, s0
; GFX900-NEXT: v_mov_b32_e32 v1, s1
; GFX900-NEXT: v_mov_b32_e32 v3, s0
; GFX900-NEXT: v_mov_b32_e32 v2, s0
; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX900-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX900-NEXT: s_endpgm
;
; GFX906-LABEL: scalar_to_vector_v8f16:
; GFX906: ; %bb.0: ; %entry
; GFX906-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX906-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GFX906-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX906-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX906-NEXT: s_waitcnt lgkmcnt(0)
; GFX906-NEXT: v_mov_b32_e32 v5, s3
; GFX906-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
; GFX906-NEXT: v_mov_b32_e32 v0, s0
; GFX906-NEXT: v_mov_b32_e32 v1, s1
; GFX906-NEXT: v_mov_b32_e32 v3, s0
; GFX906-NEXT: v_mov_b32_e32 v2, s0
; GFX906-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX906-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX906-NEXT: s_endpgm
;
; GFX908-LABEL: scalar_to_vector_v8f16:
; GFX908: ; %bb.0: ; %entry
; GFX908-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX908-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GFX908-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX908-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v5, s3
; GFX908-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
; GFX908-NEXT: v_mov_b32_e32 v0, s0
; GFX908-NEXT: v_mov_b32_e32 v1, s1
; GFX908-NEXT: v_mov_b32_e32 v3, s0
; GFX908-NEXT: v_mov_b32_e32 v2, s0
; GFX908-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX908-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX908-NEXT: s_endpgm
;
; GFX90A-LABEL: scalar_to_vector_v8f16:
; GFX90A: ; %bb.0: ; %entry
; GFX90A-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX90A-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX90A-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX90A-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s3
; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s2, v0
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: v_mov_b32_e32 v3, s1
; GFX90A-NEXT: v_mov_b32_e32 v5, s0
; GFX90A-NEXT: v_mov_b32_e32 v4, s0
; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX90A-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX90A-NEXT: s_endpgm
entry:
%val.1.float = extractelement <2 x float> %in, i64 0
%val.2.vec2.half = bitcast float %val.1.float to <2 x half>
%val.3.vec8.half = shufflevector <2 x half> %val.2.vec2.half, <2 x half> %val.2.vec2.half, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
%val.4.vec4.float = shufflevector <2 x float> %in, <2 x float> %in, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%val.5.vec8.half = bitcast <4 x float> %val.4.vec4.float to <8 x half>
%val.6.vec8.half = shufflevector <8 x half> %val.5.vec8.half, <8 x half> %val.3.vec8.half, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%tid.ext = sext i32 %tid to i64
%out.gep = getelementptr inbounds <8 x half>, ptr %out, i64 %tid.ext
store <8 x half> %val.6.vec8.half, ptr %out.gep, align 16
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }