
The MLIR classes Type/Attribute/Operation/Op/Value support cast/dyn_cast/isa/dyn_cast_or_null functionality through llvm's doCast functionality in addition to defining methods with the same name. This change begins the migration of uses of the method to the corresponding function call as has been decided as more consistent. Note that there still exist classes that only define methods directly, such as AffineExpr, and this does not include work currently to support a functional cast/isa call. Context: - https://mlir.llvm.org/deprecation/ at "Use the free function variants for dyn_cast/cast/isa/…" - Original discussion at https://discourse.llvm.org/t/preferred-casting-style-going-forward/68443 Implementation: This patch updates all remaining uses of the deprecated functionality in mlir/. This was done with clang-tidy as described below and further modifications to GPUBase.td and OpenMPOpsInterfaces.td. Steps are described per line, as comments are removed by git: 0. Retrieve the change from the following to build clang-tidy with an additional check: main...tpopp:llvm-project:tidy-cast-check 1. Build clang-tidy 2. Run clang-tidy over your entire codebase while disabling all checks and enabling the one relevant one. Run on all header files also. 3. Delete .inc files that were also modified, so the next build rebuilds them to a pure state. ``` ninja -C $BUILD_DIR clang-tidy run-clang-tidy -clang-tidy-binary=$BUILD_DIR/bin/clang-tidy -checks='-*,misc-cast-functions'\ -header-filter=mlir/ mlir/* -fix rm -rf $BUILD_DIR/tools/mlir/**/*.inc ``` Differential Revision: https://reviews.llvm.org/D151542
330 lines
12 KiB
C++
330 lines
12 KiB
C++
//===- MemRefMemorySlot.cpp - Memory Slot Interfaces ------------*- C++ -*-===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file implements Mem2Reg-related interfaces for MemRef dialect
|
|
// operations.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "mlir/Dialect/MemRef/IR/MemRefMemorySlot.h"
|
|
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
|
#include "mlir/IR/BuiltinDialect.h"
|
|
#include "mlir/IR/BuiltinTypes.h"
|
|
#include "mlir/IR/Matchers.h"
|
|
#include "mlir/IR/PatternMatch.h"
|
|
#include "mlir/IR/Value.h"
|
|
#include "mlir/Interfaces/InferTypeOpInterface.h"
|
|
#include "mlir/Interfaces/MemorySlotInterfaces.h"
|
|
#include "mlir/Support/LogicalResult.h"
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
#include "llvm/ADT/TypeSwitch.h"
|
|
|
|
using namespace mlir;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Utilities
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// Walks over the indices of the elements of a tensor of a given `shape` by
|
|
/// updating `index` in place to the next index. This returns failure if the
|
|
/// provided index was the last index.
|
|
static LogicalResult nextIndex(ArrayRef<int64_t> shape,
|
|
MutableArrayRef<int64_t> index) {
|
|
for (size_t i = 0; i < shape.size(); ++i) {
|
|
index[i]++;
|
|
if (index[i] < shape[i])
|
|
return success();
|
|
index[i] = 0;
|
|
}
|
|
return failure();
|
|
}
|
|
|
|
/// Calls `walker` for each index within a tensor of a given `shape`, providing
|
|
/// the index as an array attribute of the coordinates.
|
|
template <typename CallableT>
|
|
static void walkIndicesAsAttr(MLIRContext *ctx, ArrayRef<int64_t> shape,
|
|
CallableT &&walker) {
|
|
Type indexType = IndexType::get(ctx);
|
|
SmallVector<int64_t> shapeIter(shape.size(), 0);
|
|
do {
|
|
SmallVector<Attribute> indexAsAttr;
|
|
for (int64_t dim : shapeIter)
|
|
indexAsAttr.push_back(IntegerAttr::get(indexType, dim));
|
|
walker(ArrayAttr::get(ctx, indexAsAttr));
|
|
} while (succeeded(nextIndex(shape, shapeIter)));
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Interfaces for AllocaOp
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
static bool isSupportedElementType(Type type) {
|
|
return llvm::isa<MemRefType>(type) ||
|
|
OpBuilder(type.getContext()).getZeroAttr(type);
|
|
}
|
|
|
|
SmallVector<MemorySlot> memref::AllocaOp::getPromotableSlots() {
|
|
MemRefType type = getType();
|
|
if (!isSupportedElementType(type.getElementType()))
|
|
return {};
|
|
if (!type.hasStaticShape())
|
|
return {};
|
|
// Make sure the memref contains only a single element.
|
|
if (type.getNumElements() != 1)
|
|
return {};
|
|
|
|
return {MemorySlot{getResult(), type.getElementType()}};
|
|
}
|
|
|
|
Value memref::AllocaOp::getDefaultValue(const MemorySlot &slot,
|
|
RewriterBase &rewriter) {
|
|
assert(isSupportedElementType(slot.elemType));
|
|
// TODO: support more types.
|
|
return TypeSwitch<Type, Value>(slot.elemType)
|
|
.Case([&](MemRefType t) {
|
|
return rewriter.create<memref::AllocaOp>(getLoc(), t);
|
|
})
|
|
.Default([&](Type t) {
|
|
return rewriter.create<arith::ConstantOp>(getLoc(), t,
|
|
rewriter.getZeroAttr(t));
|
|
});
|
|
}
|
|
|
|
void memref::AllocaOp::handlePromotionComplete(const MemorySlot &slot,
|
|
Value defaultValue,
|
|
RewriterBase &rewriter) {
|
|
if (defaultValue.use_empty())
|
|
rewriter.eraseOp(defaultValue.getDefiningOp());
|
|
rewriter.eraseOp(*this);
|
|
}
|
|
|
|
void memref::AllocaOp::handleBlockArgument(const MemorySlot &slot,
|
|
BlockArgument argument,
|
|
RewriterBase &rewriter) {}
|
|
|
|
SmallVector<DestructurableMemorySlot>
|
|
memref::AllocaOp::getDestructurableSlots() {
|
|
MemRefType memrefType = getType();
|
|
auto destructurable = llvm::dyn_cast<DestructurableTypeInterface>(memrefType);
|
|
if (!destructurable)
|
|
return {};
|
|
|
|
Optional<DenseMap<Attribute, Type>> destructuredType =
|
|
destructurable.getSubelementIndexMap();
|
|
if (!destructuredType)
|
|
return {};
|
|
|
|
DenseMap<Attribute, Type> indexMap;
|
|
for (auto const &[index, type] : *destructuredType)
|
|
indexMap.insert({index, MemRefType::get({}, type)});
|
|
|
|
return {DestructurableMemorySlot{{getMemref(), memrefType}, indexMap}};
|
|
}
|
|
|
|
DenseMap<Attribute, MemorySlot>
|
|
memref::AllocaOp::destructure(const DestructurableMemorySlot &slot,
|
|
const SmallPtrSetImpl<Attribute> &usedIndices,
|
|
RewriterBase &rewriter) {
|
|
rewriter.setInsertionPointAfter(*this);
|
|
|
|
DenseMap<Attribute, MemorySlot> slotMap;
|
|
|
|
auto memrefType = llvm::cast<DestructurableTypeInterface>(getType());
|
|
for (Attribute usedIndex : usedIndices) {
|
|
Type elemType = memrefType.getTypeAtIndex(usedIndex);
|
|
MemRefType elemPtr = MemRefType::get({}, elemType);
|
|
auto subAlloca = rewriter.create<memref::AllocaOp>(getLoc(), elemPtr);
|
|
slotMap.try_emplace<MemorySlot>(usedIndex,
|
|
{subAlloca.getResult(), elemType});
|
|
}
|
|
|
|
return slotMap;
|
|
}
|
|
|
|
void memref::AllocaOp::handleDestructuringComplete(
|
|
const DestructurableMemorySlot &slot, RewriterBase &rewriter) {
|
|
assert(slot.ptr == getResult());
|
|
rewriter.eraseOp(*this);
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Interfaces for LoadOp/StoreOp
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
bool memref::LoadOp::loadsFrom(const MemorySlot &slot) {
|
|
return getMemRef() == slot.ptr;
|
|
}
|
|
|
|
Value memref::LoadOp::getStored(const MemorySlot &slot) { return {}; }
|
|
|
|
bool memref::LoadOp::canUsesBeRemoved(
|
|
const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
|
|
SmallVectorImpl<OpOperand *> &newBlockingUses) {
|
|
if (blockingUses.size() != 1)
|
|
return false;
|
|
Value blockingUse = (*blockingUses.begin())->get();
|
|
return blockingUse == slot.ptr && getMemRef() == slot.ptr &&
|
|
getResult().getType() == slot.elemType;
|
|
}
|
|
|
|
DeletionKind memref::LoadOp::removeBlockingUses(
|
|
const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
|
|
RewriterBase &rewriter, Value reachingDefinition) {
|
|
// `canUsesBeRemoved` checked this blocking use must be the loaded slot
|
|
// pointer.
|
|
rewriter.replaceAllUsesWith(getResult(), reachingDefinition);
|
|
return DeletionKind::Delete;
|
|
}
|
|
|
|
/// Returns the index of a memref in attribute form, given its indices.
|
|
static Attribute getAttributeIndexFromIndexOperands(MLIRContext *ctx,
|
|
ValueRange indices) {
|
|
SmallVector<Attribute> index;
|
|
for (Value coord : indices) {
|
|
IntegerAttr coordAttr;
|
|
if (!matchPattern(coord, m_Constant<IntegerAttr>(&coordAttr)))
|
|
return {};
|
|
index.push_back(coordAttr);
|
|
}
|
|
return ArrayAttr::get(ctx, index);
|
|
}
|
|
|
|
bool memref::LoadOp::canRewire(const DestructurableMemorySlot &slot,
|
|
SmallPtrSetImpl<Attribute> &usedIndices,
|
|
SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
|
|
if (slot.ptr != getMemRef())
|
|
return false;
|
|
Attribute index =
|
|
getAttributeIndexFromIndexOperands(getContext(), getIndices());
|
|
if (!index)
|
|
return false;
|
|
usedIndices.insert(index);
|
|
return true;
|
|
}
|
|
|
|
DeletionKind memref::LoadOp::rewire(const DestructurableMemorySlot &slot,
|
|
DenseMap<Attribute, MemorySlot> &subslots,
|
|
RewriterBase &rewriter) {
|
|
Attribute index =
|
|
getAttributeIndexFromIndexOperands(getContext(), getIndices());
|
|
const MemorySlot &memorySlot = subslots.at(index);
|
|
rewriter.updateRootInPlace(*this, [&]() {
|
|
setMemRef(memorySlot.ptr);
|
|
getIndicesMutable().clear();
|
|
});
|
|
return DeletionKind::Keep;
|
|
}
|
|
|
|
bool memref::StoreOp::loadsFrom(const MemorySlot &slot) { return false; }
|
|
|
|
Value memref::StoreOp::getStored(const MemorySlot &slot) {
|
|
if (getMemRef() != slot.ptr)
|
|
return {};
|
|
return getValue();
|
|
}
|
|
|
|
bool memref::StoreOp::canUsesBeRemoved(
|
|
const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
|
|
SmallVectorImpl<OpOperand *> &newBlockingUses) {
|
|
if (blockingUses.size() != 1)
|
|
return false;
|
|
Value blockingUse = (*blockingUses.begin())->get();
|
|
return blockingUse == slot.ptr && getMemRef() == slot.ptr &&
|
|
getValue() != slot.ptr && getValue().getType() == slot.elemType;
|
|
}
|
|
|
|
DeletionKind memref::StoreOp::removeBlockingUses(
|
|
const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
|
|
RewriterBase &rewriter, Value reachingDefinition) {
|
|
return DeletionKind::Delete;
|
|
}
|
|
|
|
bool memref::StoreOp::canRewire(const DestructurableMemorySlot &slot,
|
|
SmallPtrSetImpl<Attribute> &usedIndices,
|
|
SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
|
|
if (slot.ptr != getMemRef() || getValue() == slot.ptr)
|
|
return false;
|
|
Attribute index =
|
|
getAttributeIndexFromIndexOperands(getContext(), getIndices());
|
|
if (!index || !slot.elementPtrs.contains(index))
|
|
return false;
|
|
usedIndices.insert(index);
|
|
return true;
|
|
}
|
|
|
|
DeletionKind memref::StoreOp::rewire(const DestructurableMemorySlot &slot,
|
|
DenseMap<Attribute, MemorySlot> &subslots,
|
|
RewriterBase &rewriter) {
|
|
Attribute index =
|
|
getAttributeIndexFromIndexOperands(getContext(), getIndices());
|
|
const MemorySlot &memorySlot = subslots.at(index);
|
|
rewriter.updateRootInPlace(*this, [&]() {
|
|
setMemRef(memorySlot.ptr);
|
|
getIndicesMutable().clear();
|
|
});
|
|
return DeletionKind::Keep;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Interfaces for destructurable types
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
namespace {
|
|
|
|
struct MemRefDestructurableTypeExternalModel
|
|
: public DestructurableTypeInterface::ExternalModel<
|
|
MemRefDestructurableTypeExternalModel, MemRefType> {
|
|
std::optional<DenseMap<Attribute, Type>>
|
|
getSubelementIndexMap(Type type) const {
|
|
auto memrefType = llvm::cast<MemRefType>(type);
|
|
constexpr int64_t maxMemrefSizeForDestructuring = 16;
|
|
if (!memrefType.hasStaticShape() ||
|
|
memrefType.getNumElements() > maxMemrefSizeForDestructuring ||
|
|
memrefType.getNumElements() == 1)
|
|
return {};
|
|
|
|
DenseMap<Attribute, Type> destructured;
|
|
walkIndicesAsAttr(
|
|
memrefType.getContext(), memrefType.getShape(), [&](Attribute index) {
|
|
destructured.insert({index, memrefType.getElementType()});
|
|
});
|
|
|
|
return destructured;
|
|
}
|
|
|
|
Type getTypeAtIndex(Type type, Attribute index) const {
|
|
auto memrefType = llvm::cast<MemRefType>(type);
|
|
auto coordArrAttr = llvm::dyn_cast<ArrayAttr>(index);
|
|
if (!coordArrAttr || coordArrAttr.size() != memrefType.getShape().size())
|
|
return {};
|
|
|
|
Type indexType = IndexType::get(memrefType.getContext());
|
|
for (const auto &[coordAttr, dimSize] :
|
|
llvm::zip(coordArrAttr, memrefType.getShape())) {
|
|
auto coord = llvm::dyn_cast<IntegerAttr>(coordAttr);
|
|
if (!coord || coord.getType() != indexType || coord.getInt() < 0 ||
|
|
coord.getInt() >= dimSize)
|
|
return {};
|
|
}
|
|
|
|
return memrefType.getElementType();
|
|
}
|
|
};
|
|
|
|
} // namespace
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Register external models
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
void mlir::memref::registerMemorySlotExternalModels(DialectRegistry ®istry) {
|
|
registry.addExtension(+[](MLIRContext *ctx, BuiltinDialect *dialect) {
|
|
MemRefType::attachInterface<MemRefDestructurableTypeExternalModel>(*ctx);
|
|
});
|
|
}
|