[RISCV][GISEL] Legalize G_ZEXT, G_SEXT, and G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP for scalable vector types

This patch legalizes G_ZEXT, G_SEXT, and G_ANYEXT. If the type is a
legal mask type, then the instruction is legalized as the element-wise
select, where the condition on the select is the mask typed source
operand, and the true and false values are 1 or -1 (for
zero/any-extension and sign extension) and zero. If the type is a legal integer
or vector integer type, then the instruction is marked as legal.

The legalization of the extends may introduce a G_SPLAT_VECTOR, which
needs to be legalized in this patch for the extend test cases to pass.

A G_SPLAT_VECTOR is legal if the vector type is a legal integer or
floating point vector type and the source operand is sXLen type. This is
because the SelectionDAG patterns only support sXLen typed
ISD::SPLAT_VECTORS, and we'd like to reuse those patterns. A
G_SPLAT_VECTOR is cutom legalized if it has a legal s1 element vector
type and s1 scalar operand. It is legalized to G_VMSET_VL or G_VMCLR_VL
if the splat is all ones or all zeros respectivley. In the case of a
non-constant mask splat, we legalize by promoting the scalar value to
s8.

In order to get the s8 element vector back into s1 vector, we use a
G_ICMP. In order for the splat vector and extend tests to pass, we also
need to legalize G_ICMP in this patch.

A G_ICMP is legal if the destination type is a legal bool vector and the LHS and
RHS are legal integer vector types.
This commit is contained in:
Michael Maitland 2024-03-07 13:40:30 -08:00
parent 029e1d7515
commit 8aa3a77eaf
14 changed files with 7436 additions and 16 deletions

View File

@ -3006,6 +3006,15 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI); Observer.changedInstr(MI);
return Legalized; return Legalized;
} }
case TargetOpcode::G_SPLAT_VECTOR: {
if (TypeIdx != 1)
return UnableToLegalize;
Observer.changingInstr(MI);
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
Observer.changedInstr(MI);
return Legalized;
}
} }
} }

View File

@ -1278,7 +1278,7 @@ MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
return DstTy.isScalar(); return DstTy.isScalar();
else else
return DstTy.isVector() && return DstTy.isVector() &&
DstTy.getNumElements() == Op0Ty.getNumElements(); DstTy.getElementCount() == Op0Ty.getElementCount();
}() && "Type Mismatch"); }() && "Type Mismatch");
break; break;
} }

View File

@ -139,20 +139,21 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, s32, sXLen) .clampScalar(0, s32, sXLen)
.minScalarSameAs(1, 0); .minScalarSameAs(1, 0);
auto &ExtActions =
getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
.legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
if (ST.is64Bit()) { if (ST.is64Bit()) {
getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) ExtActions.legalFor({{sXLen, s32}});
.legalFor({{sXLen, s32}})
.maxScalar(0, sXLen);
getActionDefinitionsBuilder(G_SEXT_INREG) getActionDefinitionsBuilder(G_SEXT_INREG)
.customFor({sXLen}) .customFor({sXLen})
.maxScalar(0, sXLen) .maxScalar(0, sXLen)
.lower(); .lower();
} else { } else {
getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}).maxScalar(0, sXLen);
getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower(); getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
} }
ExtActions.customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
.maxScalar(0, sXLen);
// Merge/Unmerge // Merge/Unmerge
for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
@ -235,7 +236,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder(G_ICMP) getActionDefinitionsBuilder(G_ICMP)
.legalFor({{sXLen, sXLen}, {sXLen, p0}}) .legalFor({{sXLen, sXLen}, {sXLen, p0}})
.widenScalarToNextPow2(1) .legalIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
.widenScalarOrEltToNextPow2OrMinSize(1, 8)
.clampScalar(1, sXLen, sXLen) .clampScalar(1, sXLen, sXLen)
.clampScalar(0, sXLen, sXLen); .clampScalar(0, sXLen, sXLen);
@ -418,6 +421,29 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, sXLen, sXLen) .clampScalar(0, sXLen, sXLen)
.customFor({sXLen}); .customFor({sXLen});
auto &SplatActions =
getActionDefinitionsBuilder(G_SPLAT_VECTOR)
.legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
typeIs(1, sXLen)))
.customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST), typeIs(1, s1)));
// Handle case of s64 element vectors on RV32. If the subtarget does not have
// f64, then try to lower it to G_SPLAT_VECTOR_SPLIT_64_VL. If the subtarget
// does have f64, then we don't know whether the type is an f64 or an i64,
// so mark the G_SPLAT_VECTOR as legal and decide later what to do with it,
// depending on how the instructions it consumes are legalized. They are not
// legalized yet since legalization is in reverse postorder, so we cannot
// make the decision at this moment.
if (XLen == 32) {
if (ST.hasVInstructionsF64() && ST.hasStdExtD())
SplatActions.legalIf(all(
typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
else if (ST.hasVInstructionsI64())
SplatActions.customIf(all(
typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
}
SplatActions.clampScalar(1, sXLen, sXLen);
getLegacyLegalizerInfo().computeTables(); getLegacyLegalizerInfo().computeTables();
} }
@ -576,7 +602,145 @@ bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3)); auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3));
MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val)); MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val));
} }
MI.eraseFromParent();
return true;
}
// Custom-lower extensions from mask vectors by using a vselect either with 1
// for zero/any-extension or -1 for sign-extension:
// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
// Note that any-extension is lowered identically to zero-extension.
bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
MachineIRBuilder &MIB) const {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
Opc == TargetOpcode::G_ANYEXT);
MachineRegisterInfo &MRI = *MIB.getMRI();
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
LLT DstEltTy = DstTy.getElementType();
auto SplatZero = MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, 0));
auto SplatTrue =
MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, ExtTrueVal));
MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);
MI.eraseFromParent();
return true;
}
/// Return the type of the mask type suitable for masking the provided
/// vector type. This is simply an i1 element type vector of the same
/// (possibly scalable) length.
static LLT getMaskTypeFor(LLT VecTy) {
assert(VecTy.isVector());
ElementCount EC = VecTy.getElementCount();
return LLT::vector(EC, LLT::scalar(1));
}
/// Creates an all ones mask suitable for masking a vector of type VecTy with
/// vector length VL.
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL,
MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) {
LLT MaskTy = getMaskTypeFor(VecTy);
return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
}
/// Gets the two common "VL" operands: an all-ones mask and the vector length.
/// VecTy is a scalable vector type.
static std::pair<MachineInstrBuilder, Register>
buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) {
LLT VecTy = Dst.getLLTTy(MRI);
assert(VecTy.isScalableVector() && "Expecting scalable container type");
Register VL(RISCV::X0);
MachineInstrBuilder Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
return {Mask, VL};
}
static MachineInstrBuilder
buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo,
Register Hi, Register VL, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) {
// TODO: If the Hi bits of the splat are undefined, then it's fine to just
// splat Lo even if it might be sign extended. I don't think we have
// introduced a case where we're build a s64 where the upper bits are undef
// yet.
// Fall back to a stack store and stride x0 vector load.
// TODO: need to lower G_SPLAT_VECTOR_SPLIT_I64. This is done in
// preprocessDAG in SDAG.
return MIB.buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
{Passthru, Lo, Hi, VL});
}
static MachineInstrBuilder
buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
const SrcOp &Scalar, Register VL,
MachineIRBuilder &MIB, MachineRegisterInfo &MRI) {
assert(Scalar.getLLTTy(MRI) == LLT::scalar(64) && "Unexpected VecTy!");
auto Unmerge = MIB.buildUnmerge(LLT::scalar(32), Scalar);
return buildSplatPartsS64WithVL(Dst, Passthru, Unmerge.getReg(0),
Unmerge.getReg(1), VL, MIB, MRI);
}
// Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
// legal equivalently-sized i8 type, so we can use that as a go-between.
// Splats of s1 types that have constant value can be legalized as VMSET_VL or
// VMCLR_VL.
bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
MachineIRBuilder &MIB) const {
assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
MachineRegisterInfo &MRI = *MIB.getMRI();
Register Dst = MI.getOperand(0).getReg();
Register SplatVal = MI.getOperand(1).getReg();
LLT VecTy = MRI.getType(Dst);
LLT XLenTy(STI.getXLenVT());
// Handle case of s64 element vectors on rv32
if (XLenTy.getSizeInBits() == 32 &&
VecTy.getElementType().getSizeInBits() == 64) {
auto [_, VL] = buildDefaultVLOps(Dst, MIB, MRI);
buildSplatSplitS64WithVL(Dst, MIB.buildUndef(VecTy), SplatVal, VL, MIB,
MRI);
MI.eraseFromParent();
return true;
}
// All-zeros or all-ones splats are handled specially.
MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);
if (isAllOnesOrAllOnesSplat(SplatValMI, MRI)) {
auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
MI.eraseFromParent();
return true;
}
if (isNullOrNullSplat(SplatValMI, MRI)) {
auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
MI.eraseFromParent();
return true;
}
// Handle non-constant mask splat (i.e. not sure if it's all zeros or all
// ones) by promoting it to an s8 splat.
LLT InterEltTy = LLT::scalar(8);
LLT InterTy = VecTy.changeElementType(InterEltTy);
auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);
auto And =
MIB.buildAnd(InterEltTy, ZExtSplatVal, MIB.buildConstant(InterEltTy, 1));
auto LHS = MIB.buildSplatVector(InterTy, And);
auto ZeroSplat =
MIB.buildSplatVector(InterTy, MIB.buildConstant(InterEltTy, 0));
MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, LHS, ZeroSplat);
MI.eraseFromParent(); MI.eraseFromParent();
return true; return true;
} }
@ -640,6 +804,12 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeVAStart(MI, MIRBuilder); return legalizeVAStart(MI, MIRBuilder);
case TargetOpcode::G_VSCALE: case TargetOpcode::G_VSCALE:
return legalizeVScale(MI, MIRBuilder); return legalizeVScale(MI, MIRBuilder);
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ANYEXT:
return legalizeExt(MI, MIRBuilder);
case TargetOpcode::G_SPLAT_VECTOR:
return legalizeSplatVector(MI, MIRBuilder);
} }
llvm_unreachable("expected switch to return"); llvm_unreachable("expected switch to return");

View File

@ -43,6 +43,8 @@ private:
bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const; bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const; bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
}; };
} // end namespace llvm } // end namespace llvm
#endif #endif

View File

@ -32,3 +32,28 @@ def G_READ_VLENB : RISCVGenericInstruction {
let hasSideEffects = false; let hasSideEffects = false;
} }
def : GINodeEquiv<G_READ_VLENB, riscv_read_vlenb>; def : GINodeEquiv<G_READ_VLENB, riscv_read_vlenb>;
// Pseudo equivalent to a RISCVISD::VMCLR_VL
def G_VMCLR_VL : RISCVGenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type1:$vl);
let hasSideEffects = false;
}
def : GINodeEquiv<G_VMCLR_VL, riscv_vmclr_vl>;
// Pseudo equivalent to a RISCVISD::VMSET_VL
def G_VMSET_VL : RISCVGenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type1:$vl);
let hasSideEffects = false;
}
def : GINodeEquiv<G_VMSET_VL, riscv_vmset_vl>;
// Pseudo equivalent to a RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL. There is no
// record to mark as equivalent to using GINodeEquiv because it gets lowered
// before instruction selection.
def G_SPLAT_VECTOR_SPLIT_I64_VL : RISCVGenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$passthru, type1:$hi, type1:$lo, type2:$vl);
let hasSideEffects = false;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,810 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV32 %s
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=RV64 %s
---
name: icmp_nxv1i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv1i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 1 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv1i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 1 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv2i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv2i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 2 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv2i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 2 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv4i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv4i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 4 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv4i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 4 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv8i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv8i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 8 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv8i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 8 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv16i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv16i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 16 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv16i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 16 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv32i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv32i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 32 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv32i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 32 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv64i1
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv64i1
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 64 x s8>), [[SELECT1]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv64i1
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
; RV64-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
; RV64-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 64 x s8>), [[SELECT1]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
%1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 64 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv1i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv1i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv1i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv2i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv2i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv2i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv4i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv4i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv4i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv8i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv8i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv8i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv16i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv16i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv16i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv32i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv32i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv32i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv64i8
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv64i8
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv64i8
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
%1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 64 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv1i16
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv1i16
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv1i16
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
%1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv2i16
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv2i16
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv2i16
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
%1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv4i16
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv4i16
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv4i16
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
%1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv8i16
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv8i16
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv8i16
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
%1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv16i16
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv16i16
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv16i16
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
%1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv32i16
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv32i16
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv32i16
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
%1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv1i32
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv1i32
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv1i32
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
%1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv2i32
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv2i32
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv2i32
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
%1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv4i32
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv4i32
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv4i32
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
%1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv8i32
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv8i32
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv8i32
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
%1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv16i32
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv16i32
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv16i32
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
%1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv1i64
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv1i64
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv1i64
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
%1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv2i64
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv2i64
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv2i64
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
%1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv4i64
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv4i64
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv4i64
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
%1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v8
...
---
name: icmp_nxv8i64
legalized: true
tracksRegLiveness: true
body: |
bb.0.entry:
; RV32-LABEL: name: icmp_nxv8i64
; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: icmp_nxv8i64
; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
%1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
$v8 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v8
...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,694 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
---
name: splatvector_nxv1i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv1i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv1i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv1i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 1 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv2i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv2i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv2i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv2i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 2 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 2 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv4i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv4i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv4i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv4i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 4 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 4 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv8i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv8i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv8i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv8i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 8 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 8 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 8 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv16i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv16i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv16i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv16i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 16 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 16 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 16 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv32i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv32i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv32i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv32i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv32i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv32i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 32 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 32 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 32 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv64i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv64i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 64 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv64i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv64i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 64 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv64i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv64i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[AND1]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 64 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 64 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s32) = COPY $x10
%1:_(s1) = G_TRUNC %0(s32)
%2:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 64 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv1i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 1 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 2 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv4i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 4 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv8i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 8 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv16i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8m2 = COPY %2(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv1i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8 = COPY %2(<vscale x 1 x s16>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8 = COPY %2(<vscale x 2 x s16>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv4i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8 = COPY %2(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv8i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8m2 = COPY %2(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv16i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8m4 = COPY %2(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
...
---
name: splatvector_nxv1i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %2(<vscale x 1 x s32>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %2(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv4i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m2 = COPY %2(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv8i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m4 = COPY %2(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
...
---
name: splatvector_nxv16i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m8 = COPY %2(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
...

View File

@ -0,0 +1,817 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
---
name: splatvector_nxv1i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv1i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 1 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv1i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv1i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 1 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv2i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv2i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 2 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv2i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv2i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 2 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 2 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv4i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv4i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 4 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv4i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv4i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 4 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 4 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv8i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv8i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 8 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv8i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv8i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 8 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 8 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 8 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv16i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv16i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 16 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv16i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv16i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 16 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 16 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 16 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv32i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv32i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv32i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv32i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 32 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv32i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv32i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 32 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 32 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 32 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv64i1_0
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv64i1_0
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 0
%1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 64 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv64i1_1
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv64i1_1
; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s1) = G_CONSTANT i1 1
%1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
$v0 = COPY %1(<vscale x 64 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv64i1_2
legalized: false
tracksRegLiveness: true
body: |
bb.1:
liveins: $x10
; CHECK-LABEL: name: splatvector_nxv64i1_2
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[AND1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 64 x s8>), [[SPLAT_VECTOR1]]
; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 64 x s1>)
; CHECK-NEXT: PseudoRET implicit $v0
%0:_(s64) = COPY $x10
%1:_(s1) = G_TRUNC %0(s64)
%2:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %1(s1)
$v0 = COPY %2(<vscale x 64 x s1>)
PseudoRET implicit $v0
...
---
name: splatvector_nxv1i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 1 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 2 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv4i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 4 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv8i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8 = COPY %2(<vscale x 8 x s8>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv16i8
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i8
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
%1:_(s8) = G_CONSTANT i8 0
%2:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %1(s8)
$v8m2 = COPY %2(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv1i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8 = COPY %2(<vscale x 1 x s16>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8 = COPY %2(<vscale x 2 x s16>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv4i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8 = COPY %2(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv8i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8m2 = COPY %2(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv16i16
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i16
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
%1:_(s16) = G_CONSTANT i16 0
%2:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %1(s16)
$v8m4 = COPY %2(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
...
---
name: splatvector_nxv1i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %2(<vscale x 1 x s32>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %2(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv4i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m2 = COPY %2(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv8i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m4 = COPY %2(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
...
---
name: splatvector_nxv16i32
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv16i32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m8 = COPY %2(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
...
---
name: splatvector_nxv1i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv1i64
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8
%0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
%1:_(s64) = G_CONSTANT i64 0
%2:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8 = COPY %2(<vscale x 1 x s64>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv2i64
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
%1:_(s64) = G_CONSTANT i64 0
%2:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m2 = COPY %2(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv4i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv4i64
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
%1:_(s64) = G_CONSTANT i64 0
%2:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m4 = COPY %2(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
...
---
name: splatvector_nxv8i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; CHECK-LABEL: name: splatvector_nxv8i64
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
%1:_(s64) = G_CONSTANT i64 0
%2:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m8 = COPY %2(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
...

View File

@ -0,0 +1,116 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck --check-prefix=HasF64 %s
# RUN: llc -mtriple=riscv32 -mattr=+Zve64x -run-pass=legalizer %s -o - | FileCheck --check-prefix=NoF64 %s
---
name: splatvector_nxv1i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; HasF64-LABEL: name: splatvector_nxv1i64
; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; HasF64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; HasF64-NEXT: PseudoRET implicit $v8
;
; NoF64-LABEL: name: splatvector_nxv1i64
; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
; NoF64-NEXT: $v8 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 1 x s64>)
; NoF64-NEXT: PseudoRET implicit $v8
%0:_(s64) = G_IMPLICIT_DEF
%1:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %0(s64)
$v8 = COPY %1(<vscale x 1 x s64>)
PseudoRET implicit $v8
...
---
name: splatvector_nxv2i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; HasF64-LABEL: name: splatvector_nxv2i64
; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; HasF64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; HasF64-NEXT: PseudoRET implicit $v8m2
;
; NoF64-LABEL: name: splatvector_nxv2i64
; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
; NoF64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 2 x s64>)
; NoF64-NEXT: PseudoRET implicit $v8m2
%0:_(s64) = G_IMPLICIT_DEF
%1:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %0(s64)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: splatvector_nxv4i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; HasF64-LABEL: name: splatvector_nxv4i64
; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; HasF64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; HasF64-NEXT: PseudoRET implicit $v8m4
;
; NoF64-LABEL: name: splatvector_nxv4i64
; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
; NoF64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 4 x s64>)
; NoF64-NEXT: PseudoRET implicit $v8m4
%0:_(s64) = G_IMPLICIT_DEF
%1:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %0(s64)
$v8m4 = COPY %1(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
...
---
name: splatvector_nxv8i64
legalized: false
tracksRegLiveness: true
body: |
bb.1:
; HasF64-LABEL: name: splatvector_nxv8i64
; HasF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; HasF64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[DEF]](s32), [[DEF1]](s32)
; HasF64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; HasF64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; HasF64-NEXT: PseudoRET implicit $v8m8
;
; NoF64-LABEL: name: splatvector_nxv8i64
; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
; NoF64-NEXT: [[DEF2:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF2]], [[DEF]](s32), [[DEF1]], $x0
; NoF64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 8 x s64>)
; NoF64-NEXT: PseudoRET implicit $v8m8
%0:_(s64) = G_IMPLICIT_DEF
%1:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %0(s64)
$v8m8 = COPY %1(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
...

File diff suppressed because it is too large Load Diff

View File

@ -24,17 +24,22 @@ body: |
%4:_(<2 x s32>) = G_IMPLICIT_DEF %4:_(<2 x s32>) = G_IMPLICIT_DEF
%5:_(s1) = G_FCMP floatpred(oeq), %3, %4 %5:_(s1) = G_FCMP floatpred(oeq), %3, %4
; mismatched element count ; mismatched fixed element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of ; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%6:_(<2 x s32>) = G_IMPLICIT_DEF %6:_(<2 x s32>) = G_IMPLICIT_DEF
%7:_(<2 x s32>) = G_IMPLICIT_DEF %7:_(<2 x s32>) = G_IMPLICIT_DEF
%8:_(<4 x s1>) = G_FCMP floatpred(oeq), %6, %7 %8:_(<4 x s1>) = G_FCMP floatpred(oeq), %6, %7
; mismatched scalable element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%9:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
%10:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
%11:_(<vscale x 4 x s1>) = G_FCMP floatpred(oeq), %9, %10
; mismatched scalar element type ; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction *** ; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
%9:_(s32) = G_FCONSTANT float 0.0 %12:_(s32) = G_FCONSTANT float 0.0
%10:_(s64) = G_FCONSTANT float 1.0 %13:_(s64) = G_FCONSTANT float 1.0
%11:_(s1) = G_FCMP floatpred(oeq), %9, %10 %14:_(s1) = G_FCMP floatpred(oeq), %12, %13
... ...

View File

@ -24,17 +24,22 @@ body: |
%4:_(<2 x s32>) = G_IMPLICIT_DEF %4:_(<2 x s32>) = G_IMPLICIT_DEF
%5:_(s1) = G_ICMP intpred(eq), %3, %4 %5:_(s1) = G_ICMP intpred(eq), %3, %4
; mismatched element count ; mismatched fixed element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of ; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%6:_(<2 x s32>) = G_IMPLICIT_DEF %6:_(<2 x s32>) = G_IMPLICIT_DEF
%7:_(<2 x s32>) = G_IMPLICIT_DEF %7:_(<2 x s32>) = G_IMPLICIT_DEF
%8:_(<4 x s1>) = G_ICMP intpred(eq), %6, %7 %8:_(<4 x s1>) = G_ICMP intpred(eq), %6, %7
; mismatched scalable element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%9:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
%10:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
%11:_(<vscale x 4 x s1>) = G_ICMP intpred(eq), %9, %10
; mismatched scalar element type ; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction *** ; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
%9:_(s32) = G_CONSTANT i32 0 %12:_(s32) = G_CONSTANT i32 0
%10:_(s64) = G_CONSTANT i32 1 %13:_(s64) = G_CONSTANT i32 1
%11:_(s1) = G_ICMP intpred(eq), %9, %10 %14:_(s1) = G_ICMP intpred(eq), %12, %13
... ...