llvm-project/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
Björn Pettersson 445973cace
[LegalizeTypes] Handle non byte-sized elt types when splitting INSERT/EXTRACT_VECTOR_ELT (#93357)
DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT and
DAGTypeLegalizer::SplitVecRes_EXTRACT_VECTOR_ELT did not handle
non byte-sized elements properly. In fact, it only dealt with
elements smaller than 8 bits (as well as byte-sized elements).

This patch generalizes the support for non byte-sized element by
always widening the the vector elements to next "round integer type"
(a power of 2 bit size). This should make sure that we can access a
single element via a simple byte-addressed scalar load/store.

Also removing a suspicious CustomLowerNode call from
SplitVecRes_INSERT_VECTOR_ELT. Considering that it did not reset
the Lo/Hi out arguments before the return I think that
DAGTypeLegalizer::SplitVectorResult could be fooled into registering
the input vector as being the result. This should however not have
caused any problems since DAGTypeLegalizer::SplitVectorResult is
doing the same CustomLowerNode call, making the code removed by
this patch redundant.
2024-06-13 11:09:18 +02:00

38 lines
1.5 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck %s
; FIXME: Inefficient codegen which skips an optimization of load +
; extractelement when the vector element type is not byte-sized.
define i1 @extractloadi1(ptr %ptr, i32 %idx) {
; CHECK-LABEL: extractloadi1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_ubyte v0, v[0:1]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_lshrrev_b32_e32 v1, 2, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v3, 5, v0
; CHECK-NEXT: v_and_b32_e32 v4, 2, v0
; CHECK-NEXT: v_lshrrev_b32_e32 v5, 6, v0
; CHECK-NEXT: v_lshrrev_b32_e32 v6, 4, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v7, 3, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v8, 1, v0
; CHECK-NEXT: v_or_b32_e32 v1, v1, v3
; CHECK-NEXT: v_and_b32_e32 v3, 0x100, v7
; CHECK-NEXT: v_and_b32_e32 v7, 0x100, v8
; CHECK-NEXT: v_lshlrev_b32_e32 v4, 7, v4
; CHECK-NEXT: v_or_b32_e32 v3, v6, v3
; CHECK-NEXT: v_or_b32_e32 v5, v5, v7
; CHECK-NEXT: v_or_b32_e32 v0, v0, v4
; CHECK-NEXT: v_and_b32_e32 v1, 0x103, v1
; CHECK-NEXT: v_lshlrev_b32_e32 v4, 16, v5
; CHECK-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; CHECK-NEXT: v_or_b32_e32 v1, v3, v4
; CHECK-NEXT: v_or_b32_e32 v0, v0, v5
; CHECK-NEXT: v_lshlrev_b32_e32 v2, 3, v2
; CHECK-NEXT: v_lshr_b64 v[0:1], v[0:1], v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%val = load <8 x i1>, ptr %ptr
%ret = extractelement <8 x i1> %val, i32 %idx
ret i1 %ret
}