[LoongArch] Don't crash on instruction prefetch intrinsics (#135760)

Instead of failing to select during isel, drop the intrinsic in
lowering.

Similar as the X86's PR. Seeing: https://reviews.llvm.org/D151050.

Fixes #134624

(cherry picked from commit dfb5b6e27ca3f8b79ebd3346d11b3088c1600b81)
This commit is contained in:
leecheechen 2025-04-16 14:12:00 +08:00 committed by Tom Stellard
parent 89adc2d4f9
commit 581772ed07
3 changed files with 49 additions and 1 deletions

View File

@ -99,7 +99,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
// Expand bitreverse.i16 with native-width bitrev and shift for now, before
// we get to know which of sll and revb.2h is faster.
@ -459,10 +459,24 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerBITREVERSE(Op, DAG);
case ISD::SCALAR_TO_VECTOR:
return lowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::PREFETCH:
return lowerPREFETCH(Op, DAG);
}
return SDValue();
}
SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op,
SelectionDAG &DAG) const {
unsigned IsData = Op.getConstantOperandVal(4);
// We don't support non-data prefetch.
// Just preserve the chain.
if (!IsData)
return Op.getOperand(0);
return Op;
}
SDValue
LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {

View File

@ -337,6 +337,7 @@ private:
SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBITREVERSE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;

View File

@ -0,0 +1,33 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
declare void @llvm.prefetch(ptr, i32, i32, i32) nounwind
define dso_local void @prefetch_no_offset(ptr %ptr) nounwind {
; LA32-LABEL: prefetch_no_offset:
; LA32: # %bb.0: # %entry
; LA32-NEXT: ret
;
; LA64-LABEL: prefetch_no_offset:
; LA64: # %bb.0: # %entry
; LA64-NEXT: ret
entry:
tail call void @llvm.prefetch(ptr %ptr, i32 0, i32 3, i32 0)
ret void
}
define dso_local void @prefetch_with_offset(ptr %ptr) nounwind {
; LA32-LABEL: prefetch_with_offset:
; LA32: # %bb.0: # %entry
; LA32-NEXT: ret
;
; LA64-LABEL: prefetch_with_offset:
; LA64: # %bb.0: # %entry
; LA64-NEXT: ret
entry:
%addr = getelementptr i8, ptr %ptr, i64 200
tail call void @llvm.prefetch(ptr %addr, i32 0, i32 3, i32 0)
ret void
}