Fix typo "instrinsic" (#112899)
This commit is contained in:
parent
62e2c7fb2d
commit
922992a22f
@ -169,7 +169,7 @@ static VectorTypeModifier getTupleVTM(unsigned NF) {
|
||||
|
||||
static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) {
|
||||
// We need a special rule for segment load/store since the data width is not
|
||||
// encoded in the instrinsic name itself.
|
||||
// encoded in the intrinsic name itself.
|
||||
const StringRef IRName = RVVI->getIRName();
|
||||
constexpr unsigned RVV_VTA = 0x1;
|
||||
constexpr unsigned RVV_VMA = 0x2;
|
||||
@ -192,7 +192,7 @@ static unsigned getIndexedLoadStorePtrIdx(const RVVIntrinsic *RVVI) {
|
||||
static unsigned getSegInstLog2SEW(StringRef InstName) {
|
||||
// clang-format off
|
||||
// We need a special rule for indexed segment load/store since the data width
|
||||
// is not encoded in the instrinsic name itself.
|
||||
// is not encoded in the intrinsic name itself.
|
||||
if (InstName.starts_with("vloxseg") || InstName.starts_with("vluxseg") ||
|
||||
InstName.starts_with("vsoxseg") || InstName.starts_with("vsuxseg"))
|
||||
return (unsigned)-1;
|
||||
|
||||
@ -53,7 +53,7 @@ eN
|
||||
</td>
|
||||
<td>fdec,
|
||||
<p>
|
||||
fall-instrinsics
|
||||
fall-intrinsics
|
||||
</td>
|
||||
<td><a href="https://www-01.ibm.com/support/docview.wss?uid=swg27024803&aid=1#page=297">qxlf77</a>,
|
||||
<p>
|
||||
|
||||
@ -107,7 +107,7 @@ The denorm value is a nonstandard extension.
|
||||
|
||||
#if 0
|
||||
ieee_round_type values
|
||||
The values are those of the llvm.get.rounding instrinsic, which is assumed by
|
||||
The values are those of the llvm.get.rounding intrinsic, which is assumed by
|
||||
ieee_arithmetic module rounding procedures.
|
||||
#endif
|
||||
#define _FORTRAN_RUNTIME_IEEE_TO_ZERO 0
|
||||
|
||||
@ -1690,7 +1690,7 @@ std::optional<SpecificCall> IntrinsicInterface::Match(
|
||||
// MAX and MIN (and others that map to them) allow their last argument to
|
||||
// be repeated indefinitely. The actualForDummy vector is sized
|
||||
// and null-initialized to the non-repeated dummy argument count
|
||||
// for other instrinsics.
|
||||
// for other intrinsics.
|
||||
bool isMaxMin{dummyArgPatterns > 0 &&
|
||||
dummy[dummyArgPatterns - 1].optionality == Optionality::repeats};
|
||||
std::vector<ActualArgument *> actualForDummy(
|
||||
|
||||
@ -284,7 +284,7 @@ struct ForcedSpacing16 {
|
||||
}
|
||||
};
|
||||
|
||||
/// Generate call to Exponent instrinsic runtime routine.
|
||||
/// Generate call to Exponent intrinsic runtime routine.
|
||||
mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
|
||||
mlir::Location loc, mlir::Type resultType,
|
||||
mlir::Value x) {
|
||||
@ -320,7 +320,7 @@ mlir::Value fir::runtime::genExponent(fir::FirOpBuilder &builder,
|
||||
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
|
||||
}
|
||||
|
||||
/// Generate call to Fraction instrinsic runtime routine.
|
||||
/// Generate call to Fraction intrinsic runtime routine.
|
||||
mlir::Value fir::runtime::genFraction(fir::FirOpBuilder &builder,
|
||||
mlir::Location loc, mlir::Value x) {
|
||||
mlir::func::FuncOp func;
|
||||
@ -596,7 +596,7 @@ mlir::Value fir::runtime::genSelectedRealKind(fir::FirOpBuilder &builder,
|
||||
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
|
||||
}
|
||||
|
||||
/// Generate call to Set_exponent instrinsic runtime routine.
|
||||
/// Generate call to Set_exponent intrinsic runtime routine.
|
||||
mlir::Value fir::runtime::genSetExponent(fir::FirOpBuilder &builder,
|
||||
mlir::Location loc, mlir::Value x,
|
||||
mlir::Value i) {
|
||||
|
||||
@ -1513,7 +1513,7 @@ mlir::Value fir::runtime::genSum(fir::FirOpBuilder &builder, mlir::Location loc,
|
||||
|
||||
// The IAll, IAny and IParity intrinsics have essentially the same
|
||||
// implementation. This macro will generate the function body given the
|
||||
// instrinsic name.
|
||||
// intrinsic name.
|
||||
#define GEN_IALL_IANY_IPARITY(F) \
|
||||
mlir::Value fir::runtime::JOIN2(gen, F)( \
|
||||
fir::FirOpBuilder & builder, mlir::Location loc, mlir::Value arrayBox, \
|
||||
|
||||
@ -97,7 +97,7 @@ if (LLDB_ENABLE_PYTHON OR LLDB_ENABLE_LUA)
|
||||
add_subdirectory(bindings)
|
||||
endif ()
|
||||
|
||||
# We need the headers generated by instrinsics_gen before we can compile
|
||||
# We need the headers generated by intrinsics_gen before we can compile
|
||||
# any source file in LLDB as the imported Clang modules might include
|
||||
# some of these generated headers. This approach is copied from Clang's main
|
||||
# CMakeLists.txt, so it should kept in sync the code in Clang which was added
|
||||
|
||||
@ -1115,7 +1115,7 @@ def int_amdgcn_s_buffer_load : DefaultAttrsIntrinsic <
|
||||
// it is const 0. A struct intrinsic with constant 0 index is different to the
|
||||
// corresponding raw intrinsic on gfx9+ because the behavior of bound checking
|
||||
// and swizzling changes depending on whether idxen is set in the instruction.
|
||||
// These instrinsics also keep the offset and soffset arguments separate as
|
||||
// These intrinsics also keep the offset and soffset arguments separate as
|
||||
// they behave differently in bounds checking and swizzling.
|
||||
|
||||
// The versions of these intrinsics that take <4 x i32> arguments are deprecated
|
||||
|
||||
@ -118,7 +118,7 @@ public:
|
||||
|
||||
/// Rewrite debug value intrinsics to conform to a new SSA form.
|
||||
///
|
||||
/// This will scout out all the debug value instrinsics associated with
|
||||
/// This will scout out all the debug value intrinsics associated with
|
||||
/// the instruction. Anything outside of its block will have its
|
||||
/// value set to the new SSA value if available, and undef if not.
|
||||
void UpdateDebugValues(Instruction *I);
|
||||
|
||||
@ -580,7 +580,7 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
|
||||
auto *ActiveBits = B.CreatePHI(WaveTy, 2, "ActiveBits");
|
||||
ActiveBits->addIncoming(Ballot, EntryBB);
|
||||
|
||||
// Use llvm.cttz instrinsic to find the lowest remaining active lane.
|
||||
// Use llvm.cttz intrinsic to find the lowest remaining active lane.
|
||||
auto *FF1 =
|
||||
B.CreateIntrinsic(Intrinsic::cttz, WaveTy, {ActiveBits, B.getTrue()});
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@
|
||||
/// each of the preceding fields which are relevant for a given instruction
|
||||
/// in the opcode space.
|
||||
///
|
||||
/// Currently, the policy is represented via the following instrinsic families:
|
||||
/// Currently, the policy is represented via the following intrinsic families:
|
||||
/// * _MASK - Can represent all three policy states for both tail and mask. If
|
||||
/// passthrough is IMPLICIT_DEF (or NoReg), then represents "undefined".
|
||||
/// Otherwise, policy operand and tablegen flags drive the interpretation.
|
||||
|
||||
@ -708,7 +708,7 @@ defm "" : ReplaceLane<I64x2, 30>;
|
||||
defm "" : ReplaceLane<F32x4, 32>;
|
||||
defm "" : ReplaceLane<F64x2, 34>;
|
||||
|
||||
// For now use an instrinsic for f16x8.replace_lane instead of ReplaceLane above
|
||||
// For now use an intrinsic for f16x8.replace_lane instead of ReplaceLane above
|
||||
// since LLVM IR generated with half type arguments is not well supported and
|
||||
// creates conversions from f16->f32.
|
||||
defm REPLACE_LANE_F16x8 :
|
||||
|
||||
@ -54,7 +54,7 @@ define <vscale x 32 x i8> @ld2.nxv32i8_no_eltty(<vscale x 16 x i1> %Pg, i8 *%bas
|
||||
ret <vscale x 32 x i8> %res
|
||||
}
|
||||
|
||||
; ldN instrinsic name with only output type
|
||||
; ldN intrinsic name with only output type
|
||||
define <vscale x 32 x i8> @ld2.nxv32i8_no_predty_pty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
|
||||
; CHECK-LABEL: @ld2.nxv32i8_no_predty_pty
|
||||
; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
|
||||
; Test vector add reduction instrinsic
|
||||
; Test vector add reduction intrinsic
|
||||
;
|
||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z16 | FileCheck %s
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ exit: ; preds = %bb.f4, %bb.f3, %bb.
|
||||
ret void, !dbg !29
|
||||
}
|
||||
|
||||
; This is testing for debug value instrinsics outside of the threaded block pointing to a value
|
||||
; This is testing for debug value intrinsics outside of the threaded block pointing to a value
|
||||
; inside to correctly take any new definitions.
|
||||
define void @test2(i32 %cond1, i32 %cond2) !dbg !5 {
|
||||
; CHECK: bb.f3
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
; RUN: opt -S -passes=sroa %s | FileCheck %s
|
||||
; With fake use instrinsics generated for small aggregates, check that when
|
||||
; With fake use intrinsics generated for small aggregates, check that when
|
||||
; SROA slices the aggregate, we generate individual fake use intrinsics for
|
||||
; the individual values.
|
||||
|
||||
|
||||
@ -487,7 +487,7 @@ TEST(RandomIRBuilderTest, findSourceAndSink) {
|
||||
ASSERT_TRUE(DT.dominates(Insts[IP - 1], Sink));
|
||||
}
|
||||
}
|
||||
TEST(RandomIRBuilderTest, sinkToInstrinsic) {
|
||||
TEST(RandomIRBuilderTest, sinkToIntrinsic) {
|
||||
const char *Source = "\n\
|
||||
declare double @llvm.sqrt.f64(double %Val) \n\
|
||||
declare void @llvm.ubsantrap(i8 immarg) cold noreturn nounwind \n\
|
||||
|
||||
@ -678,7 +678,7 @@ lowerReductionWithStartValue(ConversionPatternRewriter &rewriter, Location loc,
|
||||
vectorOperand, fmf);
|
||||
}
|
||||
|
||||
/// Overloaded methods to lower a *predicated* reduction to an llvm instrinsic
|
||||
/// Overloaded methods to lower a *predicated* reduction to an llvm intrinsic
|
||||
/// that requires a start value. This start value format spans across fp
|
||||
/// reductions without mask and all the masked reduction intrinsics.
|
||||
template <class LLVMVPRedIntrinOp, class ReductionNeutral>
|
||||
|
||||
@ -2180,7 +2180,7 @@ ModuleImport::processDebugIntrinsic(llvm::DbgVariableIntrinsic *dbgIntr,
|
||||
return emitError(loc) << "failed to convert a debug intrinsic operand: "
|
||||
<< diag(*dbgIntr);
|
||||
|
||||
// Ensure that the debug instrinsic is inserted right after its operand is
|
||||
// Ensure that the debug intrinsic is inserted right after its operand is
|
||||
// defined. Otherwise, the operand might not necessarily dominate the
|
||||
// intrinsic. If the defining operation is a terminator, insert the intrinsic
|
||||
// into a dominated block.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user