[LLVM] Make more use of IRBuilder::CreateIntrinsic. NFC. (#112706)

Convert many instances of:
  Fn = Intrinsic::getOrInsertDeclaration(...);
  CreateCall(Fn, ...)
to the equivalent CreateIntrinsic call.
This commit is contained in:
Jay Foad 2024-10-17 16:20:43 +01:00 committed by GitHub
parent 8c7f80f775
commit 85c17e4092
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
43 changed files with 251 additions and 453 deletions

View File

@ -235,13 +235,12 @@ Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
// TODO add caching
// Scalable vector %evl conversion.
if (ElemCount.isScalable()) {
auto *M = Builder.GetInsertBlock()->getModule();
Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
Function *ActiveMaskFunc = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::get_active_lane_mask, {BoolVecTy, EVLParam->getType()});
// `get_active_lane_mask` performs an implicit less-than comparison.
Value *ConstZero = Builder.getInt32(0);
return Builder.CreateCall(ActiveMaskFunc, {ConstZero, EVLParam});
return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
{BoolVecTy, EVLParam->getType()},
{ConstZero, EVLParam});
}
// Fixed vector %evl conversion.
@ -299,18 +298,18 @@ Value *CachingVPExpander::expandPredicationToIntCall(
case Intrinsic::umin: {
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
Value *NewOp = Builder.CreateIntrinsic(
UnpredicatedIntrinsicID, {VPI.getType()}, {Op0, Op1},
/*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
case Intrinsic::bswap:
case Intrinsic::bitreverse: {
Value *Op = VPI.getOperand(0);
Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op}, VPI.getName());
Value *NewOp =
Builder.CreateIntrinsic(UnpredicatedIntrinsicID, {VPI.getType()}, {Op},
/*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
@ -327,9 +326,9 @@ Value *CachingVPExpander::expandPredicationToFPCall(
case Intrinsic::fabs:
case Intrinsic::sqrt: {
Value *Op0 = VPI.getOperand(0);
Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op0}, VPI.getName());
Value *NewOp =
Builder.CreateIntrinsic(UnpredicatedIntrinsicID, {VPI.getType()}, {Op0},
/*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
@ -337,9 +336,9 @@ Value *CachingVPExpander::expandPredicationToFPCall(
case Intrinsic::minnum: {
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
Value *NewOp = Builder.CreateIntrinsic(
UnpredicatedIntrinsicID, {VPI.getType()}, {Op0, Op1},
/*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
@ -592,12 +591,10 @@ bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
if (StaticElemCount.isScalable()) {
// TODO add caching
auto *M = VPI.getModule();
Function *VScaleFunc =
Intrinsic::getOrInsertDeclaration(M, Intrinsic::vscale, Int32Ty);
IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
Value *VScale = Builder.CreateIntrinsic(Intrinsic::vscale, Int32Ty, {},
/*FMFSource=*/nullptr, "vscale");
MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
/*NUW*/ true, /*NSW*/ false);
} else {

View File

@ -512,8 +512,7 @@ Value* HardwareLoop::InsertIterationSetup(Value *LoopCountInit) {
: Intrinsic::test_set_loop_iterations)
: (UsePhi ? Intrinsic::start_loop_iterations
: Intrinsic::set_loop_iterations);
Function *LoopIter = Intrinsic::getOrInsertDeclaration(M, ID, Ty);
Value *LoopSetup = Builder.CreateCall(LoopIter, LoopCountInit);
Value *LoopSetup = Builder.CreateIntrinsic(ID, Ty, LoopCountInit);
// Use the return value of the intrinsic to control the entry of the loop.
if (UseLoopGuard) {
@ -541,10 +540,9 @@ void HardwareLoop::InsertLoopDec() {
Attribute::StrictFP))
CondBuilder.setIsFPConstrained(true);
Function *DecFunc = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::loop_decrement, LoopDecrement->getType());
Value *Ops[] = { LoopDecrement };
Value *NewCond = CondBuilder.CreateCall(DecFunc, Ops);
Value *NewCond = CondBuilder.CreateIntrinsic(Intrinsic::loop_decrement,
LoopDecrement->getType(), Ops);
Value *OldCond = ExitBranch->getCondition();
ExitBranch->setCondition(NewCond);
@ -565,10 +563,9 @@ Instruction* HardwareLoop::InsertLoopRegDec(Value *EltsRem) {
Attribute::StrictFP))
CondBuilder.setIsFPConstrained(true);
Function *DecFunc = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::loop_decrement_reg, {EltsRem->getType()});
Value *Ops[] = { EltsRem, LoopDecrement };
Value *Call = CondBuilder.CreateCall(DecFunc, Ops);
Value *Call = CondBuilder.CreateIntrinsic(Intrinsic::loop_decrement_reg,
{EltsRem->getType()}, Ops);
LLVM_DEBUG(dbgs() << "HWLoops: Inserted loop dec: " << *Call << "\n");
return cast<Instruction>(Call);

View File

@ -1757,8 +1757,7 @@ static Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
Type *Ty = CI.getType();
Value *Op0 = CI.getOperand(0);
Value *Op1 = CI.getOperand(1);
Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
Value *Res = Builder.CreateIntrinsic(IID, Ty, {Op0, Op1});
if (CI.arg_size() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
@ -1784,8 +1783,7 @@ static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
}
Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
Value *Res = Builder.CreateIntrinsic(IID, Ty, {Src, Src, Amt});
if (CI.arg_size() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
@ -1854,8 +1852,7 @@ static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
}
Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
Value *Res = Builder.CreateIntrinsic(IID, Ty, {Op0, Op1, Amt});
unsigned NumArgs = CI.arg_size();
if (NumArgs >= 4) { // For masked intrinsics.
@ -1915,9 +1912,8 @@ static Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr,
static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
Type *Ty = CI.getType();
Value *Op0 = CI.getArgOperand(0);
Function *F =
Intrinsic::getOrInsertDeclaration(CI.getModule(), Intrinsic::abs, Ty);
Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
Value *Res = Builder.CreateIntrinsic(Intrinsic::abs, Ty,
{Op0, Builder.getInt1(false)});
if (CI.arg_size() == 3)
Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
return Res;
@ -2009,9 +2005,8 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
// Replace a masked intrinsic with an older unmasked intrinsic.
static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
Intrinsic::ID IID) {
Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID);
Value *Rep = Builder.CreateCall(Intrin,
{ CI.getArgOperand(0), CI.getArgOperand(1) });
Value *Rep = Builder.CreateIntrinsic(
IID, {}, {CI.getArgOperand(0), CI.getArgOperand(1)});
return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
}
@ -2480,9 +2475,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
} else if (Name == "sse.sqrt.ss" || Name == "sse2.sqrt.sd") {
Value *Vec = CI->getArgOperand(0);
Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
Function *Intr = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::sqrt, Elt0->getType());
Elt0 = Builder.CreateCall(Intr, Elt0);
Elt0 = Builder.CreateIntrinsic(Intrinsic::sqrt, Elt0->getType(), Elt0);
Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
} else if (Name.starts_with("avx.sqrt.p") ||
Name.starts_with("sse2.sqrt.p") ||
@ -2770,9 +2763,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
: Intrinsic::x86_avx512_sitofp_round;
Function *F = Intrinsic::getOrInsertDeclaration(CI->getModule(), IID,
{DstTy, SrcTy});
Rep = Builder.CreateCall(F, {Rep, CI->getArgOperand(3)});
Rep = Builder.CreateIntrinsic(IID, {DstTy, SrcTy},
{Rep, CI->getArgOperand(3)});
} else {
Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
: Builder.CreateSIToFP(Rep, DstTy, "cvt");
@ -2813,9 +2805,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
ResultTy->getNumElements());
Function *ELd = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::masked_expandload, ResultTy);
Rep = Builder.CreateCall(ELd, {Ptr, MaskVec, CI->getOperand(1)});
Rep = Builder.CreateIntrinsic(Intrinsic::masked_expandload, ResultTy,
{Ptr, MaskVec, CI->getOperand(1)});
} else if (Name.starts_with("avx512.mask.compress.store.")) {
auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
Type *PtrTy = ResultTy->getElementType();
@ -2828,9 +2819,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
getX86MaskVec(Builder, CI->getArgOperand(2),
cast<FixedVectorType>(ResultTy)->getNumElements());
Function *CSt = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::masked_compressstore, ResultTy);
Rep = Builder.CreateCall(CSt, {CI->getArgOperand(1), Ptr, MaskVec});
Rep = Builder.CreateIntrinsic(Intrinsic::masked_compressstore, ResultTy,
{CI->getArgOperand(1), Ptr, MaskVec});
} else if (Name.starts_with("avx512.mask.compress.") ||
Name.starts_with("avx512.mask.expand.")) {
auto *ResultTy = cast<FixedVectorType>(CI->getType());
@ -2841,10 +2831,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
bool IsCompress = Name[12] == 'c';
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
Function *Intr =
Intrinsic::getOrInsertDeclaration(F->getParent(), IID, ResultTy);
Rep = Builder.CreateCall(Intr,
{CI->getOperand(0), CI->getOperand(1), MaskVec});
Rep = Builder.CreateIntrinsic(
IID, ResultTy, {CI->getOperand(0), CI->getOperand(1), MaskVec});
} else if (Name.starts_with("xop.vpcom")) {
bool IsSigned;
if (Name.ends_with("ub") || Name.ends_with("uw") || Name.ends_with("ud") ||
@ -2905,11 +2893,10 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
bool ZeroMask = Name[11] == 'z';
Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
} else if (Name == "sse42.crc32.64.8") {
Function *CRC32 = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::x86_sse42_crc32_32_8);
Value *Trunc0 =
Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
Rep = Builder.CreateIntrinsic(Intrinsic::x86_sse42_crc32_32_8, {},
{Trunc0, CI->getArgOperand(1)});
Rep = Builder.CreateZExt(Rep, CI->getType(), "");
} else if (Name.starts_with("avx.vbroadcast.s") ||
Name.starts_with("avx512.vbroadcast.s")) {
@ -3769,12 +3756,9 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_vfmadd_f64;
else
IID = Intrinsic::x86_avx512_vfmadd_f32;
Function *FMA = Intrinsic::getOrInsertDeclaration(CI->getModule(), IID);
Rep = Builder.CreateCall(FMA, Ops);
Rep = Builder.CreateIntrinsic(IID, {}, Ops);
} else {
Function *FMA = Intrinsic::getOrInsertDeclaration(
CI->getModule(), Intrinsic::fma, A->getType());
Rep = Builder.CreateCall(FMA, {A, B, C});
Rep = Builder.CreateIntrinsic(Intrinsic::fma, A->getType(), {A, B, C});
}
Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType())
@ -3827,9 +3811,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Rep = Builder.CreateIntrinsic(IID, {}, {A, B, C, CI->getArgOperand(4)});
} else {
Function *FMA = Intrinsic::getOrInsertDeclaration(
CI->getModule(), Intrinsic::fma, A->getType());
Rep = Builder.CreateCall(FMA, {A, B, C});
Rep = Builder.CreateIntrinsic(Intrinsic::fma, A->getType(), {A, B, C});
}
Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType())
@ -4088,8 +4070,8 @@ static Value *upgradeAArch64IntrinsicCall(StringRef Name, CallBase *CI,
Args[1] = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
GoodPredTy, Args[1]);
Function *NewF = Intrinsic::getOrInsertDeclaration(CI->getModule(), NewID);
return Builder.CreateCall(NewF, Args, CI->getName());
return Builder.CreateIntrinsic(NewID, {}, Args, /*FMFSource=*/nullptr,
CI->getName());
}
static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
@ -4171,8 +4153,8 @@ static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Ops.push_back(Op);
}
Function *Fn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
return Builder.CreateCall(Fn, Ops, CI->getName());
return Builder.CreateIntrinsic(ID, Tys, Ops, /*FMFSource=*/nullptr,
CI->getName());
}
llvm_unreachable("Unknown function for ARM CallBase upgrade.");
}

View File

@ -90,10 +90,8 @@ Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
if (cast<ConstantInt>(Scaling)->isZero())
return Scaling;
Module *M = GetInsertBlock()->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::vscale,
{Scaling->getType()});
CallInst *CI = CreateCall(TheFn, {}, {}, Name);
CallInst *CI =
CreateIntrinsic(Intrinsic::vscale, {Scaling->getType()}, {}, {}, Name);
return cast<ConstantInt>(Scaling)->isOne() ? CI : CreateMul(CI, Scaling);
}
@ -141,11 +139,8 @@ CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
MDNode *NoAliasTag) {
Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
Type *Tys[] = {Ptr->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn =
Intrinsic::getOrInsertDeclaration(M, Intrinsic::memset, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
CallInst *CI = CreateIntrinsic(Intrinsic::memset, Tys, Ops);
if (Align)
cast<MemSetInst>(CI)->setDestAlignment(*Align);
@ -170,11 +165,8 @@ CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
MDNode *NoAliasTag) {
Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
Type *Tys[] = {Dst->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn =
Intrinsic::getOrInsertDeclaration(M, Intrinsic::memset_inline, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
if (DstAlign)
cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
@ -198,11 +190,9 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
Type *Tys[] = {Ptr->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::memset_element_unordered_atomic, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
CallInst *CI =
CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
@ -228,10 +218,8 @@ CallInst *IRBuilderBase::CreateMemTransferInst(
"Unexpected intrinsic ID");
Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(M, IntrID, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
CallInst *CI = CreateIntrinsic(IntrID, Tys, Ops);
auto* MCI = cast<MemTransferInst>(CI);
if (DstAlign)
@ -266,11 +254,9 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
"Pointer alignment must be at least element size");
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::memcpy_element_unordered_atomic, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
CallInst *CI =
CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
// Set the alignment of the pointer args.
auto *AMCI = cast<AtomicMemCpyInst>(CI);
@ -382,11 +368,9 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
"Pointer alignment must be at least element size");
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::memmove_element_unordered_atomic, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
CallInst *CI =
CreateIntrinsic(Intrinsic::memmove_element_unordered_atomic, Tys, Ops);
// Set the alignment of the pointer args.
CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
@ -410,27 +394,19 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
}
CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
Module *M = GetInsertBlock()->getParent()->getParent();
Value *Ops[] = {Src};
Type *Tys[] = { Src->getType() };
auto Decl = Intrinsic::getOrInsertDeclaration(M, ID, Tys);
return CreateCall(Decl, Ops);
return CreateIntrinsic(ID, Tys, Ops);
}
CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
Module *M = GetInsertBlock()->getParent()->getParent();
Value *Ops[] = {Acc, Src};
auto Decl = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::vector_reduce_fadd, {Src->getType()});
return CreateCall(Decl, Ops);
return CreateIntrinsic(Intrinsic::vector_reduce_fadd, {Src->getType()}, Ops);
}
CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
Module *M = GetInsertBlock()->getParent()->getParent();
Value *Ops[] = {Acc, Src};
auto Decl = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::vector_reduce_fmul, {Src->getType()});
return CreateCall(Decl, Ops);
return CreateIntrinsic(Intrinsic::vector_reduce_fmul, {Src->getType()}, Ops);
}
CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
@ -490,10 +466,7 @@ CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
assert(Size->getType() == getInt64Ty() &&
"lifetime.start requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::lifetime_start, {Ptr->getType()});
return CreateCall(TheFn, Ops);
return CreateIntrinsic(Intrinsic::lifetime_start, {Ptr->getType()}, Ops);
}
CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
@ -505,10 +478,7 @@ CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
assert(Size->getType() == getInt64Ty() &&
"lifetime.end requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::lifetime_end, {Ptr->getType()});
return CreateCall(TheFn, Ops);
return CreateIntrinsic(Intrinsic::lifetime_end, {Ptr->getType()}, Ops);
}
CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
@ -524,10 +494,7 @@ CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
Value *Ops[] = {Size, Ptr};
// Fill in the single overloaded type: memory object type.
Type *ObjectPtr[1] = {Ptr->getType()};
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::invariant_start, ObjectPtr);
return CreateCall(TheFn, Ops);
return CreateIntrinsic(Intrinsic::invariant_start, ObjectPtr, Ops);
}
static MaybeAlign getAlign(Value *Ptr) {
@ -563,10 +530,8 @@ IRBuilderBase::CreateAssumption(Value *Cond,
}
Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
Module *M = BB->getModule();
auto *FnIntrinsic = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_noalias_scope_decl, {});
return CreateCall(FnIntrinsic, {Scope});
return CreateIntrinsic(Intrinsic::experimental_noalias_scope_decl, {},
{Scope});
}
/// Create a call to a Masked Load intrinsic.
@ -616,9 +581,7 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
ArrayRef<Value *> Ops,
ArrayRef<Type *> OverloadedTypes,
const Twine &Name) {
Module *M = BB->getParent()->getParent();
Function *TheFn = Intrinsic::getOrInsertDeclaration(M, Id, OverloadedTypes);
return CreateCall(TheFn, Ops, {}, Name);
return CreateIntrinsic(Id, OverloadedTypes, Ops, {}, Name);
}
/// Create a call to a Masked Gather intrinsic.
@ -875,42 +838,34 @@ InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
Type *ResultType, const Twine &Name) {
Intrinsic::ID ID = Intrinsic::experimental_gc_result;
Module *M = BB->getParent()->getParent();
Type *Types[] = {ResultType};
Function *FnGCResult = Intrinsic::getOrInsertDeclaration(M, ID, Types);
Value *Args[] = {Statepoint};
return CreateCall(FnGCResult, Args, {}, Name);
return CreateIntrinsic(ID, Types, Args, {}, Name);
}
CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
int BaseOffset, int DerivedOffset,
Type *ResultType, const Twine &Name) {
Module *M = BB->getParent()->getParent();
Type *Types[] = {ResultType};
Function *FnGCRelocate = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_gc_relocate, Types);
Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
return CreateCall(FnGCRelocate, Args, {}, Name);
return CreateIntrinsic(Intrinsic::experimental_gc_relocate, Types, Args, {},
Name);
}
CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
const Twine &Name) {
Module *M = BB->getParent()->getParent();
Type *PtrTy = DerivedPtr->getType();
Function *FnGCFindBase = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
return CreateCall(FnGCFindBase, {DerivedPtr}, {}, Name);
return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_base,
{PtrTy, PtrTy}, {DerivedPtr}, {}, Name);
}
CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
const Twine &Name) {
Module *M = BB->getParent()->getParent();
Type *PtrTy = DerivedPtr->getType();
Function *FnGCGetOffset = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
return CreateCall(FnGCGetOffset, {DerivedPtr}, {}, Name);
return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_offset, {PtrTy},
{DerivedPtr}, {}, Name);
}
CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
@ -1228,13 +1183,10 @@ Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
Type *ResultType = GetElementPtrInst::getGEPReturnType(Base, IdxList);
Module *M = BB->getParent()->getParent();
Function *FnPreserveArrayAccessIndex = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
Value *DimV = getInt32(Dimension);
CallInst *Fn =
CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
CreateIntrinsic(Intrinsic::preserve_array_access_index,
{ResultType, BaseType}, {Base, DimV, LastIndexV});
Fn->addParamAttr(
0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
if (DbgInfo)
@ -1249,13 +1201,9 @@ Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
"Invalid Base ptr type for preserve.union.access.index.");
auto *BaseType = Base->getType();
Module *M = BB->getParent()->getParent();
Function *FnPreserveUnionAccessIndex = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
Value *DIIndex = getInt32(FieldIndex);
CallInst *Fn =
CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
CallInst *Fn = CreateIntrinsic(Intrinsic::preserve_union_access_index,
{BaseType, BaseType}, {Base, DIIndex});
if (DbgInfo)
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
@ -1274,13 +1222,10 @@ Value *IRBuilderBase::CreatePreserveStructAccessIndex(
Type *ResultType =
GetElementPtrInst::getGEPReturnType(Base, {Zero, GEPIndex});
Module *M = BB->getParent()->getParent();
Function *FnPreserveStructAccessIndex = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
Value *DIIndex = getInt32(FieldIndex);
CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
{Base, GEPIndex, DIIndex});
CallInst *Fn =
CreateIntrinsic(Intrinsic::preserve_struct_access_index,
{ResultType, BaseType}, {Base, GEPIndex, DIIndex});
Fn->addParamAttr(
0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
if (DbgInfo)
@ -1291,10 +1236,8 @@ Value *IRBuilderBase::CreatePreserveStructAccessIndex(
Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) {
ConstantInt *TestV = getInt32(Test);
Module *M = BB->getParent()->getParent();
Function *FnIsFPClass = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::is_fpclass, {FPNum->getType()});
return CreateCall(FnIsFPClass, {FPNum, TestV});
return CreateIntrinsic(Intrinsic::is_fpclass, {FPNum->getType()},
{FPNum, TestV});
}
CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,

View File

@ -16457,10 +16457,9 @@ static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
Builder.CreateShuffleVector(TI->getOperand(0), ShuffleLanes), VecTy));
if (Parts.size() == 4) {
auto *F = Intrinsic::getOrInsertDeclaration(
TI->getModule(), Intrinsic::aarch64_neon_tbl4, VecTy);
Parts.push_back(ConstantVector::get(MaskConst));
Results.push_back(Builder.CreateCall(F, Parts));
Results.push_back(
Builder.CreateIntrinsic(Intrinsic::aarch64_neon_tbl4, VecTy, Parts));
Parts.clear();
}
@ -16487,9 +16486,8 @@ static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
break;
}
auto *F = Intrinsic::getOrInsertDeclaration(TI->getModule(), TblID, VecTy);
Parts.push_back(ConstantVector::get(MaskConst));
Results.push_back(Builder.CreateCall(F, Parts));
Results.push_back(Builder.CreateIntrinsic(TblID, VecTy, Parts));
}
// Extract the destination vector from TBL result(s) after combining them
@ -27252,9 +27250,9 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
if (ValueTy->getPrimitiveSizeInBits() == 128) {
Intrinsic::ID Int =
IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
Function *Ldxr = Intrinsic::getOrInsertDeclaration(M, Int);
Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
Value *LoHi =
Builder.CreateIntrinsic(Int, {}, Addr, /*FMFSource=*/nullptr, "lohi");
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
@ -27271,11 +27269,10 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
Type *Tys[] = { Addr->getType() };
Intrinsic::ID Int =
IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
Function *Ldxr = Intrinsic::getOrInsertDeclaration(M, Int, Tys);
const DataLayout &DL = M->getDataLayout();
IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
CallInst *CI = Builder.CreateCall(Ldxr, Addr);
CallInst *CI = Builder.CreateIntrinsic(Int, Tys, Addr);
CI->addParamAttr(0, Attribute::get(Builder.getContext(),
Attribute::ElementType, IntEltTy));
Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);

View File

@ -481,10 +481,9 @@ Instruction *AArch64StackTagging::insertBaseTaggedPointer(
assert(PrologueBB);
IRBuilder<> IRB(&PrologueBB->front());
Function *IRG_SP = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::aarch64_irg_sp);
Instruction *Base =
IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
IRB.CreateIntrinsic(Intrinsic::aarch64_irg_sp, {},
{Constant::getNullValue(IRB.getInt64Ty())});
Base->setName("basetag");
auto TargetTriple = Triple(M.getTargetTriple());
// This ABI will make it into Android API level 35.
@ -580,10 +579,9 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
NextTag = (NextTag + 1) % 16;
// Replace alloca with tagp(alloca).
IRBuilder<> IRB(Info.AI->getNextNode());
Function *TagP = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
Instruction *TagPCall =
IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
IRB.CreateIntrinsic(Intrinsic::aarch64_tagp, {Info.AI->getType()},
{Constant::getNullValue(Info.AI->getType()), Base,
ConstantInt::get(IRB.getInt64Ty(), Tag)});
if (Info.AI->hasName())
TagPCall->setName(Info.AI->getName() + ".tag");

View File

@ -407,14 +407,12 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
Value *const Identity) const {
Type *AtomicTy = V->getType();
Module *M = B.GetInsertBlock()->getModule();
Function *UpdateDPP = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::amdgcn_update_dpp, AtomicTy);
// Reduce within each row of 16 lanes.
for (unsigned Idx = 0; Idx < 4; Idx++) {
V = buildNonAtomicBinOp(
B, Op, V,
B.CreateCall(UpdateDPP,
B.CreateIntrinsic(Intrinsic::amdgcn_update_dpp, AtomicTy,
{Identity, V, B.getInt32(DPP::ROW_XMASK0 | 1 << Idx),
B.getInt32(0xf), B.getInt32(0xf), B.getFalse()}));
}

View File

@ -576,10 +576,9 @@ bool AMDGPUCodeGenPrepareImpl::promoteUniformBitreverseToI32(
Builder.SetCurrentDebugLocation(I.getDebugLoc());
Type *I32Ty = getI32Ty(Builder, I.getType());
Function *I32 =
Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::bitreverse, {I32Ty});
Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Value *ExtRes =
Builder.CreateIntrinsic(Intrinsic::bitreverse, {I32Ty}, {ExtOp});
Value *LShrOp =
Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
Value *TruncRes =
@ -1260,9 +1259,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
: Builder.CreateUIToFP(IB,F32Ty);
Function *RcpDecl = Intrinsic::getOrInsertDeclaration(
Mod, Intrinsic::amdgcn_rcp, Builder.getFloatTy());
Value *RCP = Builder.CreateCall(RcpDecl, { FB });
Value *RCP = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp,
Builder.getFloatTy(), {FB});
Value *FQM = Builder.CreateFMul(FA, RCP);
// fq = trunc(fqm);
@ -1455,9 +1453,7 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
// Initial estimate of inv(y).
Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
Function *Rcp =
Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
Value *RcpY = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp, F32Ty, {FloatY});
Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);

View File

@ -237,12 +237,10 @@ bool optimizeSection(ArrayRef<SmallVector<IntrinsicInst *, 4>> MergeableInsts) {
else
NewIntrinID = Intrinsic::amdgcn_image_msaa_load_2darraymsaa;
Function *NewIntrin = Intrinsic::getOrInsertDeclaration(
IIList.front()->getModule(), NewIntrinID, OverloadTys);
Args[ImageDimIntr->DMaskIndex] =
ConstantInt::get(DMask->getType(), NewMaskVal);
Args[FragIdIndex] = ConstantInt::get(FragId->getType(), NewFragIdVal);
CallInst *NewCall = B.CreateCall(NewIntrin, Args);
CallInst *NewCall = B.CreateIntrinsic(NewIntrinID, OverloadTys, Args);
LLVM_DEBUG(dbgs() << "Optimize: " << *NewCall << "\n");
NewCalls.push_back(NewCall);

View File

@ -130,10 +130,7 @@ static std::optional<Instruction *> modifyIntrinsicCall(
// Modify arguments and types
Func(Args, ArgTys);
Function *I =
Intrinsic::getOrInsertDeclaration(OldIntr.getModule(), NewIntr, ArgTys);
CallInst *NewCall = IC.Builder.CreateCall(I, Args);
CallInst *NewCall = IC.Builder.CreateIntrinsic(NewIntr, ArgTys, Args);
NewCall->takeName(&OldIntr);
NewCall->copyMetadata(OldIntr);
if (isa<FPMathOperator>(NewCall))
@ -891,12 +888,11 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
// register (which contains the bitmask of live threads). So a
// comparison that always returns true is the same as a read of the
// EXEC register.
Function *NewF = Intrinsic::getOrInsertDeclaration(
II.getModule(), Intrinsic::read_register, II.getType());
Metadata *MDArgs[] = {MDString::get(II.getContext(), "exec")};
MDNode *MD = MDNode::get(II.getContext(), MDArgs);
Value *Args[] = {MetadataAsValue::get(II.getContext(), MD)};
CallInst *NewCall = IC.Builder.CreateCall(NewF, Args);
CallInst *NewCall = IC.Builder.CreateIntrinsic(Intrinsic::read_register,
II.getType(), Args);
NewCall->addFnAttr(Attribute::Convergent);
NewCall->takeName(&II);
return IC.replaceInstUsesWith(II, NewCall);
@ -990,11 +986,10 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
} else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
break;
Function *NewF = Intrinsic::getOrInsertDeclaration(
II.getModule(), NewIID, {II.getType(), SrcLHS->getType()});
Value *Args[] = {SrcLHS, SrcRHS,
ConstantInt::get(CC->getType(), SrcPred)};
CallInst *NewCall = IC.Builder.CreateCall(NewF, Args);
CallInst *NewCall = IC.Builder.CreateIntrinsic(
NewIID, {II.getType(), SrcLHS->getType()}, Args);
NewCall->takeName(&II);
return IC.replaceInstUsesWith(II, NewCall);
}
@ -1402,9 +1397,8 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
Args[0] = IC.Builder.CreateShuffleVector(II.getOperand(0), EltMask);
}
Function *NewIntrin = Intrinsic::getOrInsertDeclaration(
II.getModule(), II.getIntrinsicID(), OverloadTys);
CallInst *NewCall = IC.Builder.CreateCall(NewIntrin, Args);
CallInst *NewCall =
IC.Builder.CreateIntrinsic(II.getIntrinsicID(), OverloadTys, Args);
NewCall->takeName(&II);
NewCall->copyMetadata(II);

View File

@ -529,13 +529,11 @@ public:
// block to spare deduplicating it later.
auto [It, Inserted] = tableKernelIndexCache.try_emplace(F);
if (Inserted) {
Function *Decl = Intrinsic::getOrInsertDeclaration(
&M, Intrinsic::amdgcn_lds_kernel_id, {});
auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
IRBuilder<> Builder(&*InsertAt);
It->second = Builder.CreateCall(Decl, {});
It->second =
Builder.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {}, {});
}
return It->second;

View File

@ -973,13 +973,10 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
if (!IsAMDHSA) {
Function *LocalSizeYFn = Intrinsic::getOrInsertDeclaration(
Mod, Intrinsic::r600_read_local_size_y);
Function *LocalSizeZFn = Intrinsic::getOrInsertDeclaration(
Mod, Intrinsic::r600_read_local_size_z);
CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
CallInst *LocalSizeY =
Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_y, {}, {});
CallInst *LocalSizeZ =
Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_z, {}, {});
ST.makeLIDRangeMetadata(LocalSizeY);
ST.makeLIDRangeMetadata(LocalSizeZ);
@ -1021,10 +1018,8 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
// hsa_signal_t completion_signal; // uint64_t wrapper
// } hsa_kernel_dispatch_packet_t
//
Function *DispatchPtrFn =
Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
CallInst *DispatchPtr =
Builder.CreateIntrinsic(Intrinsic::amdgcn_dispatch_ptr, {}, {});
DispatchPtr->addRetAttr(Attribute::NoAlias);
DispatchPtr->addRetAttr(Attribute::NonNull);
F.removeFnAttr("amdgpu-no-dispatch-ptr");
@ -1564,13 +1559,10 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
continue;
case Intrinsic::objectsize: {
Value *Src = Intr->getOperand(0);
Function *ObjectSize = Intrinsic::getOrInsertDeclaration(
Mod, Intrinsic::objectsize,
{Intr->getType(),
PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)});
CallInst *NewCall = Builder.CreateCall(
ObjectSize,
CallInst *NewCall = Builder.CreateIntrinsic(
Intrinsic::objectsize,
{Intr->getType(), PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)},
{Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
Intr->replaceAllUsesWith(NewCall);
Intr->eraseFromParent();

View File

@ -1055,9 +1055,7 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
SetVector<Instruction *> LDSInstructions;
getLDSMemoryInstructions(Func, LDSInstructions);
Function *Decl = Intrinsic::getOrInsertDeclaration(
&M, Intrinsic::amdgcn_lds_kernel_id, {});
auto *KernelId = IRB.CreateCall(Decl, {});
auto *KernelId = IRB.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {}, {});
GlobalVariable *LDSBaseTable = NKLDSParams.LDSBaseTable;
GlobalVariable *LDSOffsetTable = NKLDSParams.LDSOffsetTable;
auto &OrdereLDSGlobals = NKLDSParams.OrdereLDSGlobals;

View File

@ -21141,30 +21141,26 @@ bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
ARM_MB::MemBOpt Domain) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
// First, if the target has no DMB, see what fallback we can use.
if (!Subtarget->hasDataBarrier()) {
// Some ARMv6 cpus can support data barriers with an mcr instruction.
// Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
// here.
if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
Function *MCR = Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_mcr);
Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
Builder.getInt32(0), Builder.getInt32(7),
Builder.getInt32(10), Builder.getInt32(5)};
return Builder.CreateCall(MCR, args);
return Builder.CreateIntrinsic(Intrinsic::arm_mcr, {}, args);
} else {
// Instead of using barriers, atomic accesses on these subtargets use
// libcalls.
llvm_unreachable("makeDMB on a target so old that it has no barriers");
}
} else {
Function *DMB = Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_dmb);
// Only a full system barrier exists in the M-class architectures.
Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
Constant *CDomain = Builder.getInt32(Domain);
return Builder.CreateCall(DMB, CDomain);
return Builder.CreateIntrinsic(Intrinsic::arm_dmb, {}, CDomain);
}
}
@ -21417,9 +21413,9 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
if (ValueTy->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
Function *Ldrex = Intrinsic::getOrInsertDeclaration(M, Int);
Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
Value *LoHi =
Builder.CreateIntrinsic(Int, {}, Addr, /*FMFSource=*/nullptr, "lohi");
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
@ -21433,8 +21429,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
Type *Tys[] = { Addr->getType() };
Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
Function *Ldrex = Intrinsic::getOrInsertDeclaration(M, Int, Tys);
CallInst *CI = Builder.CreateCall(Ldrex, Addr);
CallInst *CI = Builder.CreateIntrinsic(Int, Tys, Addr);
CI->addParamAttr(
0, Attribute::get(M->getContext(), Attribute::ElementType, ValueTy));
@ -21460,14 +21455,13 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
if (Val->getType()->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
Function *Strex = Intrinsic::getOrInsertDeclaration(M, Int);
Type *Int32Ty = Type::getInt32Ty(M->getContext());
Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
if (!Subtarget->isLittle())
std::swap(Lo, Hi);
return Builder.CreateCall(Strex, {Lo, Hi, Addr});
return Builder.CreateIntrinsic(Int, {}, {Lo, Hi, Addr});
}
Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
@ -21600,14 +21594,13 @@ bool ARMTargetLowering::lowerInterleavedLoad(
static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
Intrinsic::arm_neon_vld3,
Intrinsic::arm_neon_vld4};
Function *VldnFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), LoadInts[Factor - 2], Tys);
SmallVector<Value *, 2> Ops;
Ops.push_back(BaseAddr);
Ops.push_back(Builder.getInt32(LI->getAlign().value()));
return Builder.CreateCall(VldnFunc, Ops, "vldN");
return Builder.CreateIntrinsic(LoadInts[Factor - 2], Tys, Ops,
/*FMFSource=*/nullptr, "vldN");
} else {
assert((Factor == 2 || Factor == 4) &&
"expected interleave factor of 2 or 4 for MVE");
@ -21615,12 +21608,11 @@ bool ARMTargetLowering::lowerInterleavedLoad(
Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
Type *PtrTy = Builder.getPtrTy(LI->getPointerAddressSpace());
Type *Tys[] = {VecTy, PtrTy};
Function *VldnFunc =
Intrinsic::getOrInsertDeclaration(LI->getModule(), LoadInts, Tys);
SmallVector<Value *, 2> Ops;
Ops.push_back(BaseAddr);
return Builder.CreateCall(VldnFunc, Ops, "vldN");
return Builder.CreateIntrinsic(LoadInts, Tys, Ops, /*FMFSource=*/nullptr,
"vldN");
}
};
@ -21761,14 +21753,11 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
Type *Tys[] = {PtrTy, SubVecTy};
Function *VstNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), StoreInts[Factor - 2], Tys);
SmallVector<Value *, 6> Ops;
Ops.push_back(BaseAddr);
append_range(Ops, Shuffles);
Ops.push_back(Builder.getInt32(SI->getAlign().value()));
Builder.CreateCall(VstNFunc, Ops);
Builder.CreateIntrinsic(StoreInts[Factor - 2], Tys, Ops);
} else {
assert((Factor == 2 || Factor == 4) &&
"expected interleave factor of 2 or 4 for MVE");
@ -21776,15 +21765,13 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
Type *Tys[] = {PtrTy, SubVecTy};
Function *VstNFunc =
Intrinsic::getOrInsertDeclaration(SI->getModule(), StoreInts, Tys);
SmallVector<Value *, 6> Ops;
Ops.push_back(BaseAddr);
append_range(Ops, Shuffles);
for (unsigned F = 0; F < Factor; F++) {
Ops.push_back(Builder.getInt32(F));
Builder.CreateCall(VstNFunc, Ops);
Builder.CreateIntrinsic(StoreInts, Tys, Ops);
Ops.pop_back();
}
}

View File

@ -401,8 +401,7 @@ void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
case 8: VCTPID = Intrinsic::arm_mve_vctp16; break;
case 16: VCTPID = Intrinsic::arm_mve_vctp8; break;
}
Function *VCTP = Intrinsic::getOrInsertDeclaration(M, VCTPID);
Value *VCTPCall = Builder.CreateCall(VCTP, Processed);
Value *VCTPCall = Builder.CreateIntrinsic(VCTPID, {}, Processed);
ActiveLaneMask->replaceAllUsesWith(VCTPCall);
// Add the incoming value to the new phi.

View File

@ -134,9 +134,8 @@ public:
/// piecemeal way - we can add the casts in to avoid updating all of the uses
/// or defs, and by the end all of the casts will be redundant.
Value *createTmpHandleCast(Value *V, Type *Ty) {
Function *CastFn = Intrinsic::getOrInsertDeclaration(
&M, Intrinsic::dx_cast_handle, {Ty, V->getType()});
CallInst *Cast = OpBuilder.getIRB().CreateCall(CastFn, {V});
CallInst *Cast = OpBuilder.getIRB().CreateIntrinsic(
Intrinsic::dx_cast_handle, {Ty, V->getType()}, {V});
CleanupCasts.push_back(Cast);
return Cast;
}

View File

@ -211,9 +211,8 @@ bool HexagonGenExtract::convert(Instruction *In) {
IRBuilder<> IRB(In);
Intrinsic::ID IntId = (BW == 32) ? Intrinsic::hexagon_S2_extractu
: Intrinsic::hexagon_S2_extractup;
Module *Mod = BB->getParent()->getParent();
Function *ExtF = Intrinsic::getOrInsertDeclaration(Mod, IntId);
Value *NewIn = IRB.CreateCall(ExtF, {BF, IRB.getInt32(W), IRB.getInt32(SR)});
Value *NewIn =
IRB.CreateIntrinsic(IntId, {}, {BF, IRB.getInt32(W), IRB.getInt32(SR)});
if (SL != 0)
NewIn = IRB.CreateShl(NewIn, SL, CSL->getName());
In->replaceAllUsesWith(NewIn);

View File

@ -3859,15 +3859,13 @@ void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder,
Type *ValueTy, Value *Addr,
AtomicOrdering Ord) const {
BasicBlock *BB = Builder.GetInsertBlock();
Module *M = BB->getParent()->getParent();
unsigned SZ = ValueTy->getPrimitiveSizeInBits();
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
: Intrinsic::hexagon_L4_loadd_locked;
Function *Fn = Intrinsic::getOrInsertDeclaration(M, IntID);
Value *Call = Builder.CreateCall(Fn, Addr, "larx");
Value *Call =
Builder.CreateIntrinsic(IntID, {}, Addr, /*FMFSource=*/nullptr, "larx");
return Builder.CreateBitCast(Call, ValueTy);
}
@ -3886,11 +3884,11 @@ Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
: Intrinsic::hexagon_S4_stored_locked;
Function *Fn = Intrinsic::getOrInsertDeclaration(M, IntID);
Val = Builder.CreateBitCast(Val, CastTy);
Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
Value *Call = Builder.CreateIntrinsic(IntID, {}, {Addr, Val},
/*FMFSource=*/nullptr, "stcx");
Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
return Ext;

View File

@ -2390,9 +2390,9 @@ auto HexagonVectorCombine::vralignb(IRBuilderBase &Builder, Value *Lo,
Type *Int64Ty = Type::getInt64Ty(F.getContext());
Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty, "cst");
Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty, "cst");
Function *FI = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::hexagon_S2_valignrb);
Value *Call = Builder.CreateCall(FI, {Hi64, Lo64, Amt}, "cup");
Value *Call = Builder.CreateIntrinsic(Intrinsic::hexagon_S2_valignrb, {},
{Hi64, Lo64, Amt},
/*FMFSource=*/nullptr, "cup");
return Builder.CreateBitCast(Call, Lo->getType(), "cst");
}
llvm_unreachable("Unexpected vector length");
@ -2587,9 +2587,8 @@ auto HexagonVectorCombine::createHvxIntrinsic(IRBuilderBase &Builder,
unsigned HwLen = HST.getVectorLength();
Intrinsic::ID TC = HwLen == 64 ? Intrinsic::hexagon_V6_pred_typecast
: Intrinsic::hexagon_V6_pred_typecast_128B;
Function *FI = Intrinsic::getOrInsertDeclaration(F.getParent(), TC,
{DestTy, Val->getType()});
return Builder.CreateCall(FI, {Val}, "cup");
return Builder.CreateIntrinsic(TC, {DestTy, Val->getType()}, {Val},
/*FMFSource=*/nullptr, "cup");
};
Function *IntrFn =

View File

@ -5807,10 +5807,8 @@ Value *LoongArchTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
Type *Tys[] = {AlignedAddr->getType()};
Function *MaskedCmpXchg =
Intrinsic::getOrInsertDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
Value *Result = Builder.CreateCall(
MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
Value *Result = Builder.CreateIntrinsic(
CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
return Result;
}

View File

@ -12180,9 +12180,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
//===----------------------------------------------------------------------===//
static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Function *Func = Intrinsic::getOrInsertDeclaration(M, Id);
return Builder.CreateCall(Func, {});
return Builder.CreateIntrinsic(Id, {}, {});
}
// The mappings for emitLeading/TrailingFence is taken from
@ -19002,13 +19000,13 @@ Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = Incr->getType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *RMW = Intrinsic::getOrInsertDeclaration(
M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation()));
Type *Int64Ty = Type::getInt64Ty(M->getContext());
Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo");
Value *IncrHi =
Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, "incr_hi");
Value *LoHi = Builder.CreateCall(RMW, {AlignedAddr, IncrLo, IncrHi});
Value *LoHi = Builder.CreateIntrinsic(
getIntrinsicForAtomicRMWBinOp128(AI->getOperation()), {},
{AlignedAddr, IncrLo, IncrHi});
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
Lo = Builder.CreateZExt(Lo, ValTy, "lo64");

View File

@ -20719,10 +20719,8 @@ Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
}
Type *Tys[] = {AlignedAddr->getType()};
Function *MaskedCmpXchg =
Intrinsic::getOrInsertDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
Value *Result = Builder.CreateCall(
MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
Value *Result = Builder.CreateIntrinsic(
CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
if (XLen == 64)
Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
return Result;
@ -21335,14 +21333,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
auto *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), FixedVlsegIntrIds[Factor - 2],
{VTy, LI->getPointerOperandType(), XLenTy});
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
CallInst *VlsegN =
Builder.CreateCall(VlsegNFunc, {LI->getPointerOperand(), VL});
CallInst *VlsegN = Builder.CreateIntrinsic(
FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
{LI->getPointerOperand(), VL});
for (unsigned i = 0; i < Shuffles.size(); i++) {
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@ -21436,11 +21431,11 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
Type *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), FixedVlsegIntrIds[Factor - 2],
{ResVTy, LI->getPointerOperandType(), XLenTy});
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
Return = Builder.CreateCall(VlsegNFunc, {LI->getPointerOperand(), VL});
Return =
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
{ResVTy, LI->getPointerOperandType(), XLenTy},
{LI->getPointerOperand(), VL});
} else {
static const Intrinsic::ID IntrIds[] = {
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@ -21456,21 +21451,19 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
NumElts * SEW / 8),
Factor);
Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), IntrIds[Factor - 2], {VecTupTy, XLenTy});
Value *VL = Constant::getAllOnesValue(XLenTy);
Value *Vlseg = Builder.CreateCall(
VlsegNFunc, {PoisonValue::get(VecTupTy), LI->getPointerOperand(), VL,
Value *Vlseg = Builder.CreateIntrinsic(
IntrIds[Factor - 2], {VecTupTy, XLenTy},
{PoisonValue::get(VecTupTy), LI->getPointerOperand(), VL,
ConstantInt::get(XLenTy, Log2_64(SEW))});
SmallVector<Type *, 2> AggrTypes{Factor, ResVTy};
Return = PoisonValue::get(StructType::get(LI->getContext(), AggrTypes));
Function *VecExtractFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), Intrinsic::riscv_tuple_extract, {ResVTy, VecTupTy});
for (unsigned i = 0; i < Factor; ++i) {
Value *VecExtract =
Builder.CreateCall(VecExtractFunc, {Vlseg, Builder.getInt32(i)});
Value *VecExtract = Builder.CreateIntrinsic(
Intrinsic::riscv_tuple_extract, {ResVTy, VecTupTy},
{Vlseg, Builder.getInt32(i)});
Return = Builder.CreateInsertValue(Return, VecExtract, i);
}
}
@ -21502,11 +21495,10 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
Type *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), FixedVssegIntrIds[Factor - 2],
{InVTy, SI->getPointerOperandType(), XLenTy});
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
Builder.CreateCall(VssegNFunc, {II->getArgOperand(0), II->getArgOperand(1),
Builder.CreateIntrinsic(FixedVssegIntrIds[Factor - 2],
{InVTy, SI->getPointerOperandType(), XLenTy},
{II->getArgOperand(0), II->getArgOperand(1),
SI->getPointerOperand(), VL});
} else {
static const Intrinsic::ID IntrIds[] = {
@ -21528,13 +21520,11 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
Value *VL = Constant::getAllOnesValue(XLenTy);
Function *VecInsertFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), Intrinsic::riscv_tuple_insert, {VecTupTy, InVTy});
Value *StoredVal = PoisonValue::get(VecTupTy);
for (unsigned i = 0; i < Factor; ++i)
StoredVal =
Builder.CreateCall(VecInsertFunc, {StoredVal, II->getArgOperand(i),
Builder.getInt32(i)});
StoredVal = Builder.CreateIntrinsic(
Intrinsic::riscv_tuple_insert, {VecTupTy, InVTy},
{StoredVal, II->getArgOperand(i), Builder.getInt32(i)});
Builder.CreateCall(VssegNFunc, {StoredVal, SI->getPointerOperand(), VL,
ConstantInt::get(XLenTy, Log2_64(SEW))});

View File

@ -366,11 +366,10 @@ bool SystemZTDCPass::runOnFunction(Function &F) {
if (!Worthy)
continue;
// Call the intrinsic, compare result with 0.
Function *TDCFunc = Intrinsic::getOrInsertDeclaration(
&M, Intrinsic::s390_tdc, V->getType());
IRBuilder<> IRB(I);
Value *MaskVal = ConstantInt::get(Type::getInt64Ty(Ctx), Mask);
Instruction *TDC = IRB.CreateCall(TDCFunc, {V, MaskVal});
Instruction *TDC =
IRB.CreateIntrinsic(Intrinsic::s390_tdc, V->getType(), {V, MaskVal});
Value *ICmp = IRB.CreateICmp(CmpInst::ICMP_NE, TDC, Zero32);
I->replaceAllUsesWith(ICmp);
}

View File

@ -31190,7 +31190,6 @@ void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
LLVMContext &Ctx = AI->getContext();
Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
PointerType::getUnqual(Ctx));
Function *BitTest = nullptr;
Value *Result = nullptr;
auto BitTested = FindSingleBitChange(AI->getValOperand());
assert(BitTested.first != nullptr);
@ -31198,15 +31197,10 @@ void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
BitTest = Intrinsic::getOrInsertDeclaration(AI->getModule(), IID_C,
AI->getType());
unsigned Imm = llvm::countr_zero(C->getZExtValue());
Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
Result = Builder.CreateIntrinsic(IID_C, AI->getType(),
{Addr, Builder.getInt8(Imm)});
} else {
BitTest = Intrinsic::getOrInsertDeclaration(AI->getModule(), IID_I,
AI->getType());
assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
Value *SI = BitTested.first;
@ -31223,7 +31217,7 @@ void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
// << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
// favor of just a raw BT{S|R|C}.
Result = Builder.CreateCall(BitTest, {Addr, BitPos});
Result = Builder.CreateIntrinsic(IID_I, AI->getType(), {Addr, BitPos});
Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
// If the result is only used for zero/non-zero status then we don't need to
@ -31364,12 +31358,11 @@ void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
IID = Intrinsic::x86_atomic_xor_cc;
break;
}
Function *CmpArith =
Intrinsic::getOrInsertDeclaration(AI->getModule(), IID, AI->getType());
Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
PointerType::getUnqual(Ctx));
Value *Call = Builder.CreateCall(
CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
Value *Call = Builder.CreateIntrinsic(
IID, AI->getType(),
{Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
ICI->replaceAllUsesWith(Result);
ICI->eraseFromParent();

View File

@ -1875,10 +1875,7 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
// If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
Value *Args[] = {Op0, CILength, CIIndex};
Module *M = II.getModule();
Function *F =
Intrinsic::getOrInsertDeclaration(M, Intrinsic::x86_sse4a_extrqi);
return Builder.CreateCall(F, Args);
return Builder.CreateIntrinsic(Intrinsic::x86_sse4a_extrqi, {}, Args);
}
}
@ -1975,10 +1972,7 @@ static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
Value *Args[] = {Op0, Op1, CILength, CIIndex};
Module *M = II.getModule();
Function *F =
Intrinsic::getOrInsertDeclaration(M, Intrinsic::x86_sse4a_insertqi);
return Builder.CreateCall(F, Args);
return Builder.CreateIntrinsic(Intrinsic::x86_sse4a_insertqi, {}, Args);
}
return nullptr;

View File

@ -157,9 +157,7 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
for (User *U : Users) {
Instruction *Inst = cast<Instruction>(U);
IRBuilder<> Builder(Inst);
Function *GetID = Intrinsic::getOrInsertDeclaration(GV->getParent(),
Intrinsic::xcore_getid);
Value *ThreadID = Builder.CreateCall(GetID, {});
Value *ThreadID = Builder.CreateIntrinsic(Intrinsic::xcore_getid, {}, {});
Value *Addr = Builder.CreateInBoundsGEP(NewGV->getValueType(), NewGV,
{Builder.getInt64(0), ThreadID});
U->replaceUsesOfWith(GV, Addr);

View File

@ -172,9 +172,8 @@ static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
// %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
// -->
// llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
Function *F =
Intrinsic::getOrInsertDeclaration(Phi.getModule(), IID, Phi.getType());
Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
Phi.replaceAllUsesWith(
Builder.CreateIntrinsic(IID, Phi.getType(), {ShVal0, ShVal1, ShAmt}));
return true;
}
@ -332,9 +331,8 @@ static bool tryToRecognizePopCount(Instruction &I) {
m_SpecificInt(Mask55)))) {
LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
IRBuilder<> Builder(&I);
Function *Func = Intrinsic::getOrInsertDeclaration(
I.getModule(), Intrinsic::ctpop, I.getType());
I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
I.replaceAllUsesWith(
Builder.CreateIntrinsic(Intrinsic::ctpop, I.getType(), {Root}));
++NumPopCountRecognized;
return true;
}
@ -399,9 +397,8 @@ static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI) {
return false;
IRBuilder<> Builder(&I);
Function *Fn = Intrinsic::getOrInsertDeclaration(
I.getModule(), Intrinsic::fptosi_sat, {SatTy, FpTy});
Value *Sat = Builder.CreateCall(Fn, In);
Value *Sat =
Builder.CreateIntrinsic(Intrinsic::fptosi_sat, {SatTy, FpTy}, In);
I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
return true;
}
@ -412,9 +409,6 @@ static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI) {
static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
TargetLibraryInfo &TLI, AssumptionCache &AC,
DominatorTree &DT) {
Module *M = Call->getModule();
// If (1) this is a sqrt libcall, (2) we can assume that NAN is not created
// (because NNAN or the operand arg must not be less than -0.0) and (2) we
// would not end up lowering to a libcall anyway (which could change the value
@ -432,8 +426,8 @@ static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
IRBuilderBase::FastMathFlagGuard Guard(Builder);
Builder.setFastMathFlags(Call->getFastMathFlags());
Function *Sqrt = Intrinsic::getOrInsertDeclaration(M, Intrinsic::sqrt, Ty);
Value *NewSqrt = Builder.CreateCall(Sqrt, Arg, "sqrt");
Value *NewSqrt = Builder.CreateIntrinsic(Intrinsic::sqrt, Ty, Arg,
/*FMFSource=*/nullptr, "sqrt");
Call->replaceAllUsesWith(NewSqrt);
// Explicitly erase the old call because a call with side effects is not

View File

@ -125,12 +125,11 @@ void CrossDSOCFI::buildCFICheck(Module &M) {
ConstantInt *CaseTypeId = ConstantInt::get(Type::getInt64Ty(Ctx), TypeId);
BasicBlock *TestBB = BasicBlock::Create(Ctx, "test", F);
IRBuilder<> IRBTest(TestBB);
Function *BitsetTestFn =
Intrinsic::getOrInsertDeclaration(&M, Intrinsic::type_test);
Value *Test = IRBTest.CreateCall(
BitsetTestFn, {&Addr, MetadataAsValue::get(
Ctx, ConstantAsMetadata::get(CaseTypeId))});
Value *Test = IRBTest.CreateIntrinsic(
Intrinsic::type_test, {},
{&Addr,
MetadataAsValue::get(Ctx, ConstantAsMetadata::get(CaseTypeId))});
BranchInst *BI = IRBTest.CreateCondBr(Test, ExitBB, TrapBB);
BI->setMetadata(LLVMContext::MD_prof, VeryLikelyWeights);

View File

@ -3105,8 +3105,7 @@ static Instruction *matchOrConcat(Instruction &Or,
Value *NewUpper = Builder.CreateZExt(Hi, Ty);
NewUpper = Builder.CreateShl(NewUpper, HalfWidth);
Value *BinOp = Builder.CreateOr(NewLower, NewUpper);
Function *F = Intrinsic::getOrInsertDeclaration(Or.getModule(), id, Ty);
return Builder.CreateCall(F, BinOp);
return Builder.CreateIntrinsic(id, Ty, BinOp);
};
// BSWAP: Push the concat down, swapping the lower/upper sources.

View File

@ -647,9 +647,8 @@ static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
// ctpop(x | -x) -> bitwidth - cttz(x, false)
if (Op0->hasOneUse() &&
match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
Function *F =
Intrinsic::getOrInsertDeclaration(II.getModule(), Intrinsic::cttz, Ty);
auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
auto *Cttz = IC.Builder.CreateIntrinsic(Intrinsic::cttz, Ty,
{X, IC.Builder.getFalse()});
auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
}
@ -1182,11 +1181,9 @@ Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
return nullptr;
// Finally create and return the sat intrinsic, truncated to the new type
Function *F = Intrinsic::getOrInsertDeclaration(MinMax1.getModule(),
IntrinsicID, NewTy);
Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy);
Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy);
Value *Sat = Builder.CreateCall(F, {AT, BT});
Value *Sat = Builder.CreateIntrinsic(IntrinsicID, NewTy, {AT, BT});
return CastInst::Create(Instruction::SExt, Sat, Ty);
}

View File

@ -4790,12 +4790,10 @@ Value *InstCombinerImpl::foldMultiplicationOverflowCheck(ICmpInst &I) {
if (MulHadOtherUses)
Builder.SetInsertPoint(Mul);
Function *F = Intrinsic::getOrInsertDeclaration(
I.getModule(),
CallInst *Call = Builder.CreateIntrinsic(
Div->getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
: Intrinsic::smul_with_overflow,
X->getType());
CallInst *Call = Builder.CreateCall(F, {X, Y}, "mul");
X->getType(), {X, Y}, /*FMFSource=*/nullptr, "mul");
// If the multiplication was used elsewhere, to ensure that we don't leave
// "duplicate" instructions, replace uses of that original multiplication
@ -6334,9 +6332,9 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
MulA = Builder.CreateZExt(A, MulType);
if (WidthB < MulWidth)
MulB = Builder.CreateZExt(B, MulType);
Function *F = Intrinsic::getOrInsertDeclaration(
I.getModule(), Intrinsic::umul_with_overflow, MulType);
CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
CallInst *Call =
Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
{MulA, MulB}, /*FMFSource=*/nullptr, "umul");
IC.addToWorklist(MulInstr);
// If there are uses of mul result other than the comparison, we know that

View File

@ -1109,11 +1109,8 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
// this purpose.
if (!isa<ReturnInst>(InstBefore)) {
Function *DynamicAreaOffsetFunc = Intrinsic::getOrInsertDeclaration(
InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
{IntptrTy});
Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
Value *DynamicAreaOffset = IRB.CreateIntrinsic(
Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
DynamicAreaOffset);

View File

@ -194,14 +194,13 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
IRB.SetInsertPoint(TrapBB);
Intrinsic::ID IntrID = DebugTrapBB ? Intrinsic::ubsantrap : Intrinsic::trap;
auto *F = Intrinsic::getOrInsertDeclaration(Fn->getParent(), IntrID);
CallInst *TrapCall;
if (DebugTrapBB) {
TrapCall =
IRB.CreateCall(F, ConstantInt::get(IRB.getInt8Ty(), Fn->size()));
TrapCall = IRB.CreateIntrinsic(
IntrID, {}, ConstantInt::get(IRB.getInt8Ty(), Fn->size()));
} else {
TrapCall = IRB.CreateCall(F, {});
TrapCall = IRB.CreateIntrinsic(IntrID, {}, {});
}
TrapCall->setDoesNotReturn();

View File

@ -2853,9 +2853,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *S2Conv =
IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
Value *V2 = I.getOperand(2);
Function *Intrin = Intrinsic::getOrInsertDeclaration(
I.getModule(), I.getIntrinsicID(), S2Conv->getType());
Value *Shift = IRB.CreateCall(Intrin, {S0, S1, V2});
Value *Shift = IRB.CreateIntrinsic(I.getIntrinsicID(), S2Conv->getType(),
{S0, S1, V2});
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
setOriginForNaryOp(I);
}
@ -3057,9 +3056,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *Op = I.getArgOperand(0);
Type *OpType = Op->getType();
Function *BswapFunc = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::bswap, ArrayRef(&OpType, 1));
setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
setShadow(&I, IRB.CreateIntrinsic(Intrinsic::bswap, ArrayRef(&OpType, 1),
getShadow(Op)));
setOrigin(&I, getOrigin(Op));
}
@ -3287,11 +3285,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
S2_ext = IRB.CreateBitCast(S2_ext, getMMXVectorTy(64));
}
Function *ShadowFn = Intrinsic::getOrInsertDeclaration(
F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
Value *S =
IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
Value *S = IRB.CreateIntrinsic(getSignedPackIntrinsic(I.getIntrinsicID()),
{}, {S1_ext, S2_ext}, /*FMFSource=*/nullptr,
"_msprop_vector_pack");
if (MMXEltSizeInBits)
S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);

View File

@ -1056,11 +1056,10 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
if (Options.StackDepth && IsEntryBB && !IsLeafFunc) {
// Check stack depth. If it's the deepest so far, record it.
Module *M = F.getParent();
Function *GetFrameAddr = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::frameaddress,
IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
auto FrameAddrPtr =
IRB.CreateCall(GetFrameAddr, {Constant::getNullValue(Int32Ty)});
auto FrameAddrPtr = IRB.CreateIntrinsic(
Intrinsic::frameaddress,
IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()),
{Constant::getNullValue(Int32Ty)});
auto FrameAddrInt = IRB.CreatePtrToInt(FrameAddrPtr, IntptrTy);
auto LowestStack = IRB.CreateLoad(IntptrTy, SanCovLowestStack);
auto IsStackLower = IRB.CreateICmpULT(FrameAddrInt, LowestStack);

View File

@ -403,15 +403,11 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, P.InsertPt);
IRBuilder<> Builder(P.InsertPt);
Module *M = BB->getParent()->getParent();
Type *I32 = Type::getInt32Ty(BB->getContext());
Function *PrefetchFunc = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::prefetch, PrefPtrValue->getType());
Builder.CreateCall(
PrefetchFunc,
{PrefPtrValue,
ConstantInt::get(I32, P.Writes),
ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)});
Builder.CreateIntrinsic(Intrinsic::prefetch, PrefPtrValue->getType(),
{PrefPtrValue, ConstantInt::get(I32, P.Writes),
ConstantInt::get(I32, 3),
ConstantInt::get(I32, 1)});
++NumPrefetches;
LLVM_DEBUG(dbgs() << " Access: "
<< *P.MemI->getOperand(isa<LoadInst>(P.MemI) ? 0 : 1)

View File

@ -978,10 +978,10 @@ static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
assert(match(Br->getCondition(), m_Zero()) &&
"Expected branch condition to be false");
IRBuilder<> Builder(Br);
Function *F = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::umul_with_overflow, FI.OuterTripCount->getType());
Value *Call = Builder.CreateCall(F, {FI.OuterTripCount, FI.InnerTripCount},
"flatten.mul");
Value *Call = Builder.CreateIntrinsic(
Intrinsic::umul_with_overflow, FI.OuterTripCount->getType(),
{FI.OuterTripCount, FI.InnerTripCount},
/*FMFSource=*/nullptr, "flatten.mul");
FI.NewTripCount = Builder.CreateExtractValue(Call, 0, "flatten.tripcount");
Value *Overflow = Builder.CreateExtractValue(Call, 1, "flatten.overflow");
Br->setCondition(Overflow);

View File

@ -2121,9 +2121,7 @@ static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
Value *Ops[] = {Val};
Type *Tys[] = {Val->getType()};
Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
Function *Func = Intrinsic::getOrInsertDeclaration(M, Intrinsic::ctpop, Tys);
CallInst *CI = IRBuilder.CreateCall(Func, Ops);
CallInst *CI = IRBuilder.CreateIntrinsic(Intrinsic::ctpop, Tys, Ops);
CI->setDebugLoc(DL);
return CI;
@ -2135,9 +2133,7 @@ static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
Value *Ops[] = {Val, IRBuilder.getInt1(ZeroCheck)};
Type *Tys[] = {Val->getType()};
Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
Function *Func = Intrinsic::getOrInsertDeclaration(M, IID, Tys);
CallInst *CI = IRBuilder.CreateCall(Func, Ops);
CallInst *CI = IRBuilder.CreateIntrinsic(IID, Tys, Ops);
CI->setDebugLoc(DL);
return CI;

View File

@ -1290,9 +1290,8 @@ public:
if (AllowContraction) {
// Use fmuladd for floating point operations and let the backend decide
// if that's profitable.
Function *FMulAdd = Intrinsic::getOrInsertDeclaration(
Func.getParent(), Intrinsic::fmuladd, A->getType());
return Builder.CreateCall(FMulAdd, {A, B, Sum});
return Builder.CreateIntrinsic(Intrinsic::fmuladd, A->getType(),
{A, B, Sum});
}
NumComputeOps += getNumOps(A->getType());
Value *Mul = Builder.CreateFMul(A, B);

View File

@ -2057,7 +2057,6 @@ void llvm::updateProfileCallee(
static void
inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
const SmallVectorImpl<ReturnInst *> &Returns) {
Module *Mod = CB.getModule();
assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
IsUnsafeClaimRV = !IsRetainRV;
@ -2089,9 +2088,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
// call.
if (IsUnsafeClaimRV) {
Builder.SetInsertPoint(II);
Function *IFn =
Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::objc_release);
Builder.CreateCall(IFn, RetOpnd, "");
Builder.CreateIntrinsic(Intrinsic::objc_release, {}, RetOpnd);
}
II->eraseFromParent();
InsertRetainCall = false;
@ -2125,9 +2122,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
// matching autoreleaseRV or an annotated call in the callee. Emit a call
// to objc_retain.
Builder.SetInsertPoint(RI);
Function *IFn =
Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::objc_retain);
Builder.CreateCall(IFn, RetOpnd, "");
Builder.CreateIntrinsic(Intrinsic::objc_retain, {}, RetOpnd);
}
}
}

View File

@ -268,12 +268,11 @@ bool isLifetimeIntrinsic(Value *V) {
Value *readRegister(IRBuilder<> &IRB, StringRef Name) {
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ReadRegister = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::read_register, IRB.getIntPtrTy(M->getDataLayout()));
MDNode *MD =
MDNode::get(M->getContext(), {MDString::get(M->getContext(), Name)});
Value *Args[] = {MetadataAsValue::get(M->getContext(), MD)};
return IRB.CreateCall(ReadRegister, Args);
return IRB.CreateIntrinsic(Intrinsic::read_register,
IRB.getIntPtrTy(M->getDataLayout()), Args);
}
Value *getPC(const Triple &TargetTriple, IRBuilder<> &IRB) {
@ -287,11 +286,9 @@ Value *getPC(const Triple &TargetTriple, IRBuilder<> &IRB) {
Value *getFP(IRBuilder<> &IRB) {
Function *F = IRB.GetInsertBlock()->getParent();
Module *M = F->getParent();
auto *GetStackPointerFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::frameaddress,
IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
return IRB.CreatePtrToInt(
IRB.CreateCall(GetStackPointerFn,
IRB.CreateIntrinsic(Intrinsic::frameaddress,
IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()),
{Constant::getNullValue(IRB.getInt32Ty())}),
IRB.getIntPtrTy(M->getDataLayout()));
}

View File

@ -2139,10 +2139,9 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
MulV = TruncTripCount;
OfMul = ConstantInt::getFalse(MulV->getContext());
} else {
auto *MulF = Intrinsic::getOrInsertDeclaration(
Loc->getModule(), Intrinsic::umul_with_overflow, Ty);
CallInst *Mul =
Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
CallInst *Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
{AbsStep, TruncTripCount},
/*FMFSource=*/nullptr, "mul");
MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
}

View File

@ -1958,10 +1958,9 @@ static Value *optimizeDoubleFP(CallInst *CI, IRBuilderBase &B,
// g((double) float) -> (double) gf(float)
Value *R;
if (IsIntrinsic) {
Module *M = CI->getModule();
Intrinsic::ID IID = CalleeFn->getIntrinsicID();
Function *Fn = Intrinsic::getOrInsertDeclaration(M, IID, B.getFloatTy());
R = isBinary ? B.CreateCall(Fn, V) : B.CreateCall(Fn, V[0]);
R = isBinary ? B.CreateIntrinsic(IID, B.getFloatTy(), V)
: B.CreateIntrinsic(IID, B.getFloatTy(), V[0]);
} else {
AttributeList CalleeAttrs = CalleeFn->getAttributes();
R = isBinary ? emitBinaryFloatFnCall(V[0], V[1], TLI, CalleeName, B,