llvm-project/llvm/test/CodeGen/AArch64/sve-load-compare-store.ll
Benjamin Maxwell 34d88cf6cf [DAG] Allow folding AND of anyext masked_load with >1 user to zext version
This now allows folding an AND of a anyext masked_load to a
zext_masked_load even if the masked load has multiple users.  Doing is
eliminates some redundant ANDs/MOVs for certain AArch64 SVE code.

I'm not sure if there's any cases where doing this could negatively the
other users of the masked_load.  Looking at other optimizations of
masked loads, most don't apply if the load is used more than once, so it
doesn't look like this would interfere.

Reviewed By: c-rhodes

Differential Revision: https://reviews.llvm.org/D137844
2022-11-18 10:38:09 +00:00

31 lines
1.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s
define void @sve_load_compare_store(ptr noalias nocapture noundef readonly %a, ptr noalias nocapture noundef %b) {
; CHECK-LABEL: sve_load_compare_store:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #0
; CHECK-NEXT: st1b { z0.s }, p0, [x1]
; CHECK-NEXT: ret
entry:
%0 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
%1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %0)
%2 = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr %a, i32 1, <vscale x 4 x i1> %1, <vscale x 4 x i16> zeroinitializer)
%3 = zext <vscale x 4 x i16> %2 to <vscale x 4 x i32>
%4 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %1, <vscale x 4 x i32> %3, <vscale x 4 x i32> zeroinitializer)
%5 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %4)
%6 = trunc <vscale x 4 x i16> %2 to <vscale x 4 x i8>
tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> %6, ptr %b, i32 1, <vscale x 4 x i1> %4)
ret void
}
declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>)
declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
declare i1 @llvm.aarch64.sve.ptest.any.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i16>)
declare void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8>, ptr, i32 immarg, <vscale x 4 x i1>)