llvm-project/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp
Andrzej Warzynski 092601d4ba [flang] Remove 'using namespace mlir;` from header files
Currently, CGOps.h and FIROps.h contain `using namespace mlir;`. Every
file that includes one of these header files (directly and transitively)
will have the MLIR namespace enabled. With name-clashes within
sub-projects (LLVM and MLIR, MLIR and Flang), this is not desired. Also,
it is not possible to "un-use" a namespace once it is "used". Instead,
we should try to limit `using namespace` to implementation files (i.e.
*.cpp).

This patch removes `using namespace mlir;` from header files and adjusts
other files accordingly. In header and TableGen files, extra namespace
qualifier is added when referring to symbols defined in MLIR. Similar
approach is adopted in source files that didn't require many changes. In
files that would require a lot of changes, `using namespace mlir;` is
added instead.

Differential Revision: https://reviews.llvm.org/D120897
2022-03-09 10:19:51 +00:00

133 lines
4.3 KiB
C++

//===- MemRefDataFlowOpt.cpp - Memory DataFlow Optimization pass ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "PassDetail.h"
#include "flang/Optimizer/Dialect/FIRDialect.h"
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Dialect/FIRType.h"
#include "flang/Optimizer/Transforms/Passes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/Operation.h"
#include "mlir/Transforms/Passes.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#define DEBUG_TYPE "fir-memref-dataflow-opt"
using namespace mlir;
namespace {
template <typename OpT>
static std::vector<OpT> getSpecificUsers(mlir::Value v) {
std::vector<OpT> ops;
for (mlir::Operation *user : v.getUsers())
if (auto op = dyn_cast<OpT>(user))
ops.push_back(op);
return ops;
}
/// This is based on MLIR's MemRefDataFlowOpt which is specialized on AffineRead
/// and AffineWrite interface
template <typename ReadOp, typename WriteOp>
class LoadStoreForwarding {
public:
LoadStoreForwarding(mlir::DominanceInfo *di) : domInfo(di) {}
// FIXME: This algorithm has a bug. It ignores escaping references between a
// store and a load.
llvm::Optional<WriteOp> findStoreToForward(ReadOp loadOp,
std::vector<WriteOp> &&storeOps) {
llvm::SmallVector<WriteOp> candidateSet;
for (auto storeOp : storeOps)
if (domInfo->dominates(storeOp, loadOp))
candidateSet.push_back(storeOp);
if (candidateSet.empty())
return {};
llvm::Optional<WriteOp> nearestStore;
for (auto candidate : candidateSet) {
auto nearerThan = [&](WriteOp otherStore) {
if (candidate == otherStore)
return false;
bool rv = domInfo->properlyDominates(candidate, otherStore);
if (rv) {
LLVM_DEBUG(llvm::dbgs()
<< "candidate " << candidate << " is not the nearest to "
<< loadOp << " because " << otherStore << " is closer\n");
}
return rv;
};
if (!llvm::any_of(candidateSet, nearerThan)) {
nearestStore = mlir::cast<WriteOp>(candidate);
break;
}
}
if (!nearestStore) {
LLVM_DEBUG(
llvm::dbgs()
<< "load " << loadOp << " has " << candidateSet.size()
<< " store candidates, but this algorithm can't find a best.\n");
}
return nearestStore;
}
llvm::Optional<ReadOp> findReadForWrite(WriteOp storeOp,
std::vector<ReadOp> &&loadOps) {
for (auto &loadOp : loadOps) {
if (domInfo->dominates(storeOp, loadOp))
return loadOp;
}
return {};
}
private:
mlir::DominanceInfo *domInfo;
};
class MemDataFlowOpt : public fir::MemRefDataFlowOptBase<MemDataFlowOpt> {
public:
void runOnOperation() override {
mlir::FuncOp f = getOperation();
auto *domInfo = &getAnalysis<mlir::DominanceInfo>();
LoadStoreForwarding<fir::LoadOp, fir::StoreOp> lsf(domInfo);
f.walk([&](fir::LoadOp loadOp) {
auto maybeStore = lsf.findStoreToForward(
loadOp, getSpecificUsers<fir::StoreOp>(loadOp.getMemref()));
if (maybeStore) {
auto storeOp = maybeStore.getValue();
LLVM_DEBUG(llvm::dbgs() << "FlangMemDataFlowOpt: In " << f.getName()
<< " erasing load " << loadOp
<< " with value from " << storeOp << '\n');
loadOp.getResult().replaceAllUsesWith(storeOp.getValue());
loadOp.erase();
}
});
f.walk([&](fir::AllocaOp alloca) {
for (auto &storeOp : getSpecificUsers<fir::StoreOp>(alloca.getResult())) {
if (!lsf.findReadForWrite(
storeOp, getSpecificUsers<fir::LoadOp>(storeOp.getMemref()))) {
LLVM_DEBUG(llvm::dbgs() << "FlangMemDataFlowOpt: In " << f.getName()
<< " erasing store " << storeOp << '\n');
storeOp.erase();
}
}
});
}
};
} // namespace
std::unique_ptr<mlir::Pass> fir::createMemDataFlowOptPass() {
return std::make_unique<MemDataFlowOpt>();
}