llvm-project/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
Eli Friedman 7ac1c7bead Recommit [ScalarEvolution] Make getMinusSCEV() fail for unrelated pointers.
As part of making ScalarEvolution's handling of pointers consistent, we
want to forbid multiplying a pointer by -1 (or any other value). This
means we can't blindly subtract pointers.

There are a few ways we could deal with this:
1. We could completely forbid subtracting pointers in getMinusSCEV()
2. We could forbid subracting pointers with different pointer bases
(this patch).
3. We could try to ptrtoint pointer operands.

The option in this patch is more friendly to non-integral pointers: code
that works with normal pointers will also work with non-integral
pointers. And it seems like there are very few places that actually
benefit from the third option.

As a minimal patch, the ScalarEvolution implementation of getMinusSCEV
still ends up subtracting pointers if they have the same base.  This
should eliminate the shared pointer base, but eventually we'll need to
rewrite it to avoid negating the pointer base. I plan to do this as a
separate step to allow measuring the compile-time impact.

This doesn't cause obvious functional changes in most cases; the one
case that is significantly affected is ICmpZero handling in LSR (which
is the source of almost all the test changes).  The resulting changes
seem okay to me, but suggestions welcome.  As an alternative, I tried
explicitly ptrtoint'ing the operands, but the result doesn't seem
obviously better.

I deleted the test lsr-undef-in-binop.ll becuase I couldn't figure out
how to repair it to test what it was actually trying to test.

Recommitting with fix to MemoryDepChecker::isDependent.

Differential Revision: https://reviews.llvm.org/D104806
2021-07-06 12:16:05 -07:00

361 lines
14 KiB
C++

//===----------------------- AlignmentFromAssumptions.cpp -----------------===//
// Set Load/Store Alignments From Assumptions
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a ScalarEvolution-based transformation to set
// the alignments of load, stores and memory intrinsics based on the truth
// expressions of assume intrinsics. The primary motivation is to handle
// complex alignment assumptions that apply to vector loads and stores that
// appear after vectorization and unrolling.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Instructions.h"
#include "llvm/InitializePasses.h"
#include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#define AA_NAME "alignment-from-assumptions"
#define DEBUG_TYPE AA_NAME
using namespace llvm;
STATISTIC(NumLoadAlignChanged,
"Number of loads changed by alignment assumptions");
STATISTIC(NumStoreAlignChanged,
"Number of stores changed by alignment assumptions");
STATISTIC(NumMemIntAlignChanged,
"Number of memory intrinsics changed by alignment assumptions");
namespace {
struct AlignmentFromAssumptions : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
AlignmentFromAssumptions() : FunctionPass(ID) {
initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<ScalarEvolutionWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
AU.addPreserved<AAResultsWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
AU.addPreserved<LoopInfoWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
AU.addPreserved<ScalarEvolutionWrapperPass>();
}
AlignmentFromAssumptionsPass Impl;
};
}
char AlignmentFromAssumptions::ID = 0;
static const char aip_name[] = "Alignment from assumptions";
INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME,
aip_name, false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME,
aip_name, false, false)
FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
return new AlignmentFromAssumptions();
}
// Given an expression for the (constant) alignment, AlignSCEV, and an
// expression for the displacement between a pointer and the aligned address,
// DiffSCEV, compute the alignment of the displaced pointer if it can be reduced
// to a constant. Using SCEV to compute alignment handles the case where
// DiffSCEV is a recurrence with constant start such that the aligned offset
// is constant. e.g. {16,+,32} % 32 -> 16.
static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
const SCEV *AlignSCEV,
ScalarEvolution *SE) {
// DiffUnits = Diff % int64_t(Alignment)
const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is "
<< *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
if (const SCEVConstant *ConstDUSCEV =
dyn_cast<SCEVConstant>(DiffUnitsSCEV)) {
int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue();
// If the displacement is an exact multiple of the alignment, then the
// displaced pointer has the same alignment as the aligned pointer, so
// return the alignment value.
if (!DiffUnits)
return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue();
// If the displacement is not an exact multiple, but the remainder is a
// constant, then return this remainder (but only if it is a power of 2).
uint64_t DiffUnitsAbs = std::abs(DiffUnits);
if (isPowerOf2_64(DiffUnitsAbs))
return Align(DiffUnitsAbs);
}
return None;
}
// There is an address given by an offset OffSCEV from AASCEV which has an
// alignment AlignSCEV. Use that information, if possible, to compute a new
// alignment for Ptr.
static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
const SCEV *OffSCEV, Value *Ptr,
ScalarEvolution *SE) {
const SCEV *PtrSCEV = SE->getSCEV(Ptr);
// On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes
// (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV
// may disagree. Trunc/extend so they agree.
PtrSCEV = SE->getTruncateOrZeroExtend(
PtrSCEV, SE->getEffectiveSCEVType(AASCEV->getType()));
const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV);
if (isa<SCEVCouldNotCompute>(DiffSCEV))
return Align(1);
// On 32-bit platforms, DiffSCEV might now have type i32 -- we've always
// sign-extended OffSCEV to i64, so make sure they agree again.
DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType());
// What we really want to know is the overall offset to the aligned
// address. This address is displaced by the provided offset.
DiffSCEV = SE->getAddExpr(DiffSCEV, OffSCEV);
LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to "
<< *AlignSCEV << " and offset " << *OffSCEV
<< " using diff " << *DiffSCEV << "\n");
if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) {
LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n");
return *NewAlignment;
}
if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
// The relative offset to the alignment assumption did not yield a constant,
// but we should try harder: if we assume that a is 32-byte aligned, then in
// for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
// 32-byte aligned, but instead alternate between 32 and 16-byte alignment.
// As a result, the new alignment will not be a constant, but can still
// be improved over the default (of 4) to 16.
const SCEV *DiffStartSCEV = DiffARSCEV->getStart();
const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE);
LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start "
<< *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
// Now compute the new alignment using the displacement to the value in the
// first iteration, and also the alignment using the per-iteration delta.
// If these are the same, then use that answer. Otherwise, use the smaller
// one, but only if it divides the larger one.
MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
MaybeAlign NewIncAlignment =
getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment)
<< "\n");
LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment)
<< "\n");
if (!NewAlignment || !NewIncAlignment)
return Align(1);
const Align NewAlign = *NewAlignment;
const Align NewIncAlign = *NewIncAlignment;
if (NewAlign > NewIncAlign) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
<< DebugStr(NewIncAlign) << "\n");
return NewIncAlign;
}
if (NewIncAlign > NewAlign) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
<< "\n");
return NewAlign;
}
assert(NewIncAlign == NewAlign);
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
<< "\n");
return NewAlign;
}
return Align(1);
}
bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
unsigned Idx,
Value *&AAPtr,
const SCEV *&AlignSCEV,
const SCEV *&OffSCEV) {
Type *Int64Ty = Type::getInt64Ty(I->getContext());
OperandBundleUse AlignOB = I->getOperandBundleAt(Idx);
if (AlignOB.getTagName() != "align")
return false;
assert(AlignOB.Inputs.size() >= 2);
AAPtr = AlignOB.Inputs[0].get();
// TODO: Consider accumulating the offset to the base.
AAPtr = AAPtr->stripPointerCastsSameRepresentation();
AlignSCEV = SE->getSCEV(AlignOB.Inputs[1].get());
AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty);
if (AlignOB.Inputs.size() == 3)
OffSCEV = SE->getSCEV(AlignOB.Inputs[2].get());
else
OffSCEV = SE->getZero(Int64Ty);
OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty);
return true;
}
bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall,
unsigned Idx) {
Value *AAPtr;
const SCEV *AlignSCEV, *OffSCEV;
if (!extractAlignmentInfo(ACall, Idx, AAPtr, AlignSCEV, OffSCEV))
return false;
// Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't
// affect other users.
if (isa<ConstantData>(AAPtr))
return false;
const SCEV *AASCEV = SE->getSCEV(AAPtr);
// Apply the assumption to all other users of the specified pointer.
SmallPtrSet<Instruction *, 32> Visited;
SmallVector<Instruction*, 16> WorkList;
for (User *J : AAPtr->users()) {
if (J == ACall)
continue;
if (Instruction *K = dyn_cast<Instruction>(J))
WorkList.push_back(K);
}
while (!WorkList.empty()) {
Instruction *J = WorkList.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
if (!isValidAssumeForContext(ACall, J, DT))
continue;
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
LI->getPointerOperand(), SE);
if (NewAlignment > LI->getAlign()) {
LI->setAlignment(NewAlignment);
++NumLoadAlignChanged;
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
if (!isValidAssumeForContext(ACall, J, DT))
continue;
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
SI->getPointerOperand(), SE);
if (NewAlignment > SI->getAlign()) {
SI->setAlignment(NewAlignment);
++NumStoreAlignChanged;
}
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
if (!isValidAssumeForContext(ACall, J, DT))
continue;
Align NewDestAlignment =
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
<< "\n";);
if (NewDestAlignment > *MI->getDestAlign()) {
MI->setDestAlignment(NewDestAlignment);
++NumMemIntAlignChanged;
}
// For memory transfers, there is also a source alignment that
// can be set.
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
Align NewSrcAlignment =
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment)
<< "\n";);
if (NewSrcAlignment > *MTI->getSourceAlign()) {
MTI->setSourceAlignment(NewSrcAlignment);
++NumMemIntAlignChanged;
}
}
}
// Now that we've updated that use of the pointer, look for other uses of
// the pointer to update.
Visited.insert(J);
for (User *UJ : J->users()) {
Instruction *K = cast<Instruction>(UJ);
if (!Visited.count(K))
WorkList.push_back(K);
}
}
return true;
}
bool AlignmentFromAssumptions::runOnFunction(Function &F) {
if (skipFunction(F))
return false;
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return Impl.runImpl(F, AC, SE, DT);
}
bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,
ScalarEvolution *SE_,
DominatorTree *DT_) {
SE = SE_;
DT = DT_;
bool Changed = false;
for (auto &AssumeVH : AC.assumptions())
if (AssumeVH) {
CallInst *Call = cast<CallInst>(AssumeVH);
for (unsigned Idx = 0; Idx < Call->getNumOperandBundles(); Idx++)
Changed |= processAssumption(Call, Idx);
}
return Changed;
}
PreservedAnalyses
AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) {
AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F);
ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
if (!runImpl(F, AC, &SE, &DT))
return PreservedAnalyses::all();
PreservedAnalyses PA;
PA.preserveSet<CFGAnalyses>();
PA.preserve<ScalarEvolutionAnalysis>();
return PA;
}