[mlir] Use has_value instead of hasValue (NFC)
This commit is contained in:
parent
a5ee62a141
commit
491d27013d
@ -96,7 +96,7 @@ public:
|
||||
}
|
||||
|
||||
/// Returns true if the value of this lattice hasn't yet been initialized.
|
||||
bool isUninitialized() const override { return !optimisticValue.hasValue(); }
|
||||
bool isUninitialized() const override { return !optimisticValue.has_value(); }
|
||||
/// Force the initialization of the element by setting it to its pessimistic
|
||||
/// fixpoint.
|
||||
ChangeResult defaultInitialize() override {
|
||||
|
@ -406,7 +406,7 @@ public:
|
||||
/// Returns true if the pos^th variable has an associated Value.
|
||||
inline bool hasValue(unsigned pos) const {
|
||||
assert(pos < getNumDimAndSymbolVars() && "Invalid position");
|
||||
return values[pos].hasValue();
|
||||
return values[pos].has_value();
|
||||
}
|
||||
|
||||
/// Returns true if at least one variable has an associated Value.
|
||||
|
@ -568,8 +568,8 @@ class MMA_SYNC_INTR {
|
||||
# op[1].ptx_elt_type # "\" == eltypeB && "
|
||||
# " \"" # op[2].ptx_elt_type # "\" == eltypeC && "
|
||||
# " \"" # op[3].ptx_elt_type # "\" == eltypeD "
|
||||
# " && (sat.hasValue() ? " # sat # " == static_cast<int>(*sat) : true)"
|
||||
# !if(!ne(b1op, ""), " && (b1Op.hasValue() ? MMAB1Op::" # b1op # " == b1Op.getValue() : true)", "") # ")\n"
|
||||
# " && (sat.has_value() ? " # sat # " == static_cast<int>(*sat) : true)"
|
||||
# !if(!ne(b1op, ""), " && (b1Op.has_value() ? MMAB1Op::" # b1op # " == b1Op.getValue() : true)", "") # ")\n"
|
||||
# " return " #
|
||||
MMA_SYNC_NAME<layoutA, layoutB, b1op, sat, op[0], op[1], op[2], op[3]>.id # ";",
|
||||
"") // if supported
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
OptionalParseResult(llvm::NoneType) : impl(llvm::None) {}
|
||||
|
||||
/// Returns true if we contain a valid ParseResult value.
|
||||
bool hasValue() const { return impl.hasValue(); }
|
||||
bool hasValue() const { return impl.has_value(); }
|
||||
|
||||
/// Access the internal ParseResult value.
|
||||
ParseResult getValue() const { return impl.getValue(); }
|
||||
|
@ -90,12 +90,12 @@ public:
|
||||
FailureOr(const FailureOr<U> &other)
|
||||
: Optional<T>(failed(other) ? Optional<T>() : Optional<T>(*other)) {}
|
||||
|
||||
operator LogicalResult() const { return success(this->hasValue()); }
|
||||
operator LogicalResult() const { return success(this->has_value()); }
|
||||
|
||||
private:
|
||||
/// Hide the bool conversion as it easily creates confusion.
|
||||
using Optional<T>::operator bool;
|
||||
using Optional<T>::hasValue;
|
||||
using Optional<T>::has_value;
|
||||
};
|
||||
|
||||
/// This class represents success/failure for parsing-like operations that find
|
||||
|
@ -263,12 +263,12 @@ public:
|
||||
explicit ArgOrType(TypeConstraint constraint)
|
||||
: index(None), constraint(constraint) {}
|
||||
bool isArg() const {
|
||||
assert(constraint.hasValue() ^ index.hasValue());
|
||||
return index.hasValue();
|
||||
assert(constraint.has_value() ^ index.has_value());
|
||||
return index.has_value();
|
||||
}
|
||||
bool isType() const {
|
||||
assert(constraint.hasValue() ^ index.hasValue());
|
||||
return constraint.hasValue();
|
||||
assert(constraint.has_value() ^ index.has_value());
|
||||
return constraint.has_value();
|
||||
}
|
||||
|
||||
int getArg() const { return *index; }
|
||||
|
@ -101,7 +101,7 @@ void IntegerRangeAnalysis::visitOperation(
|
||||
bool isYieldedResult = llvm::any_of(v.getUsers(), [](Operation *op) {
|
||||
return op->hasTrait<OpTrait::IsTerminator>();
|
||||
});
|
||||
if (isYieldedResult && oldRange.hasValue() &&
|
||||
if (isYieldedResult && oldRange.has_value() &&
|
||||
!(lattice->getValue() == *oldRange)) {
|
||||
LLVM_DEBUG(llvm::dbgs() << "Loop variant loop result detected\n");
|
||||
changed |= lattice->markPessimisticFixpoint();
|
||||
@ -162,7 +162,7 @@ void IntegerRangeAnalysis::visitNonControlFlowArguments(
|
||||
auto getLoopBoundFromFold = [&](Optional<OpFoldResult> loopBound,
|
||||
Type boundType, bool getUpper) {
|
||||
unsigned int width = ConstantIntRanges::getStorageBitwidth(boundType);
|
||||
if (loopBound.hasValue()) {
|
||||
if (loopBound.has_value()) {
|
||||
if (loopBound->is<Attribute>()) {
|
||||
if (auto bound =
|
||||
loopBound->get<Attribute>().dyn_cast_or_null<IntegerAttr>())
|
||||
|
@ -1998,14 +1998,14 @@ IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
|
||||
int64_t lbFloorDivisor, otherLbFloorDivisor;
|
||||
for (unsigned d = 0, e = getNumDimVars(); d < e; ++d) {
|
||||
auto extent = getConstantBoundOnDimSize(d, &lb, &lbFloorDivisor, &ub);
|
||||
if (!extent.hasValue())
|
||||
if (!extent.has_value())
|
||||
// TODO: symbolic extents when necessary.
|
||||
// TODO: handle union if a dimension is unbounded.
|
||||
return failure();
|
||||
|
||||
auto otherExtent = otherCst.getConstantBoundOnDimSize(
|
||||
d, &otherLb, &otherLbFloorDivisor, &otherUb);
|
||||
if (!otherExtent.hasValue() || lbFloorDivisor != otherLbFloorDivisor)
|
||||
if (!otherExtent.has_value() || lbFloorDivisor != otherLbFloorDivisor)
|
||||
// TODO: symbolic extents when necessary.
|
||||
return failure();
|
||||
|
||||
@ -2026,7 +2026,7 @@ IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
|
||||
// Uncomparable - check for constant lower/upper bounds.
|
||||
auto constLb = getConstantBound(BoundType::LB, d);
|
||||
auto constOtherLb = otherCst.getConstantBound(BoundType::LB, d);
|
||||
if (!constLb.hasValue() || !constOtherLb.hasValue())
|
||||
if (!constLb.has_value() || !constOtherLb.has_value())
|
||||
return failure();
|
||||
std::fill(minLb.begin(), minLb.end(), 0);
|
||||
minLb.back() = std::min(constLb.getValue(), constOtherLb.getValue());
|
||||
@ -2042,7 +2042,7 @@ IntegerRelation::unionBoundingBox(const IntegerRelation &otherCst) {
|
||||
// Uncomparable - check for constant lower/upper bounds.
|
||||
auto constUb = getConstantBound(BoundType::UB, d);
|
||||
auto constOtherUb = otherCst.getConstantBound(BoundType::UB, d);
|
||||
if (!constUb.hasValue() || !constOtherUb.hasValue())
|
||||
if (!constUb.has_value() || !constOtherUb.has_value())
|
||||
return failure();
|
||||
std::fill(maxUb.begin(), maxUb.end(), 0);
|
||||
maxUb.back() = std::max(constUb.getValue(), constOtherUb.getValue());
|
||||
|
@ -68,7 +68,7 @@ LogicalResult ScalarOpToLibmCall<Op, TypeResolver>::matchAndRewrite(
|
||||
Op op, PatternRewriter &rewriter) const {
|
||||
auto module = SymbolTable::getNearestSymbolTable(op);
|
||||
auto isDouble = TypeResolver()(op.getType());
|
||||
if (!isDouble.hasValue())
|
||||
if (!isDouble.has_value())
|
||||
return failure();
|
||||
|
||||
auto name = isDouble.value() ? doubleFunc : floatFunc;
|
||||
|
@ -645,7 +645,7 @@ public:
|
||||
ModuleOp module = op->getParentOfType<ModuleOp>();
|
||||
IntegerAttr executionModeAttr = op.execution_modeAttr();
|
||||
std::string moduleName;
|
||||
if (module.getName().hasValue())
|
||||
if (module.getName().has_value())
|
||||
moduleName = "_" + module.getName().getValue().str();
|
||||
else
|
||||
moduleName = "";
|
||||
@ -1585,7 +1585,7 @@ void mlir::encodeBindAttribute(ModuleOp module) {
|
||||
if (descriptorSet && binding) {
|
||||
// Encode these numbers into the variable's symbolic name. If the
|
||||
// SPIR-V module has a name, add it at the beginning.
|
||||
auto moduleAndName = spvModule.getName().hasValue()
|
||||
auto moduleAndName = spvModule.getName().has_value()
|
||||
? spvModule.getName().getValue().str() + "_" +
|
||||
op.sym_name().str()
|
||||
: op.sym_name().str();
|
||||
|
@ -1164,7 +1164,7 @@ public:
|
||||
|
||||
auto dynamicDimsOr =
|
||||
checkHasDynamicBatchDims(rewriter, op, {input, op.output()});
|
||||
if (!dynamicDimsOr.hasValue())
|
||||
if (!dynamicDimsOr.has_value())
|
||||
return failure();
|
||||
SmallVector<Value> dynamicDims = dynamicDimsOr.getValue();
|
||||
|
||||
@ -1356,7 +1356,7 @@ public:
|
||||
|
||||
auto dynamicDimsOr =
|
||||
checkHasDynamicBatchDims(rewriter, op, {input, op.output()});
|
||||
if (!dynamicDimsOr.hasValue())
|
||||
if (!dynamicDimsOr.has_value())
|
||||
return failure();
|
||||
SmallVector<Value> dynamicDims = dynamicDimsOr.getValue();
|
||||
|
||||
@ -2051,7 +2051,7 @@ public:
|
||||
|
||||
auto dynamicDimsOr =
|
||||
checkHasDynamicBatchDims(rewriter, op, {input, indices, op.output()});
|
||||
if (!dynamicDimsOr.hasValue())
|
||||
if (!dynamicDimsOr.has_value())
|
||||
return failure();
|
||||
SmallVector<Value> dynamicDims = dynamicDimsOr.getValue();
|
||||
|
||||
|
@ -694,7 +694,7 @@ public:
|
||||
|
||||
auto dynamicDimsOr =
|
||||
checkHasDynamicBatchDims(rewriter, op, {input, op.output()});
|
||||
if (!dynamicDimsOr.hasValue())
|
||||
if (!dynamicDimsOr.has_value())
|
||||
return failure();
|
||||
SmallVector<Value> dynamicDims = dynamicDimsOr.getValue();
|
||||
|
||||
@ -771,7 +771,7 @@ public:
|
||||
|
||||
auto dynamicDimsOr =
|
||||
checkHasDynamicBatchDims(rewriter, op, {input, op.output()});
|
||||
if (!dynamicDimsOr.hasValue())
|
||||
if (!dynamicDimsOr.has_value())
|
||||
return failure();
|
||||
SmallVector<Value> dynamicDims = dynamicDimsOr.getValue();
|
||||
|
||||
|
@ -90,7 +90,7 @@ static void getXferIndices(OpBuilder &b, OpTy xferOp, Value iv,
|
||||
indices.append(prevIndices.begin(), prevIndices.end());
|
||||
|
||||
Location loc = xferOp.getLoc();
|
||||
bool isBroadcast = !dim.hasValue();
|
||||
bool isBroadcast = !dim.has_value();
|
||||
if (!isBroadcast) {
|
||||
AffineExpr d0, d1;
|
||||
bindDims(xferOp.getContext(), d0, d1);
|
||||
|
@ -322,7 +322,7 @@ unsigned FlatAffineValueConstraints::insertVar(VarKind kind, unsigned pos,
|
||||
|
||||
bool FlatAffineValueConstraints::hasValues() const {
|
||||
return llvm::find_if(values, [](Optional<Value> var) {
|
||||
return var.hasValue();
|
||||
return var.has_value();
|
||||
}) != values.end();
|
||||
}
|
||||
|
||||
@ -402,11 +402,11 @@ static void mergeAndAlignVars(unsigned offset, FlatAffineValueConstraints *a,
|
||||
|
||||
assert(std::all_of(a->getMaybeValues().begin() + offset,
|
||||
a->getMaybeValues().end(),
|
||||
[](Optional<Value> var) { return var.hasValue(); }));
|
||||
[](Optional<Value> var) { return var.has_value(); }));
|
||||
|
||||
assert(std::all_of(b->getMaybeValues().begin() + offset,
|
||||
b->getMaybeValues().end(),
|
||||
[](Optional<Value> var) { return var.hasValue(); }));
|
||||
[](Optional<Value> var) { return var.has_value(); }));
|
||||
|
||||
SmallVector<Value, 4> aDimValues;
|
||||
a->getValues(offset, a->getNumDimVars(), &aDimValues);
|
||||
@ -1009,7 +1009,7 @@ void FlatAffineValueConstraints::getSliceBounds(
|
||||
|
||||
auto lbConst = getConstantBound(BoundType::LB, pos);
|
||||
auto ubConst = getConstantBound(BoundType::UB, pos);
|
||||
if (lbConst.hasValue() && ubConst.hasValue()) {
|
||||
if (lbConst.has_value() && ubConst.has_value()) {
|
||||
// Detect equality to a constant.
|
||||
if (lbConst.getValue() == ubConst.getValue()) {
|
||||
memo[pos] = getAffineConstantExpr(lbConst.getValue(), context);
|
||||
@ -1120,7 +1120,7 @@ void FlatAffineValueConstraints::getSliceBounds(
|
||||
LLVM_DEBUG(llvm::dbgs()
|
||||
<< "WARNING: Potentially over-approximating slice lb\n");
|
||||
auto lbConst = getConstantBound(BoundType::LB, pos + offset);
|
||||
if (lbConst.hasValue()) {
|
||||
if (lbConst.has_value()) {
|
||||
lbMap = AffineMap::get(
|
||||
numMapDims, numMapSymbols,
|
||||
getAffineConstantExpr(lbConst.getValue(), context));
|
||||
@ -1130,7 +1130,7 @@ void FlatAffineValueConstraints::getSliceBounds(
|
||||
LLVM_DEBUG(llvm::dbgs()
|
||||
<< "WARNING: Potentially over-approximating slice ub\n");
|
||||
auto ubConst = getConstantBound(BoundType::UB, pos + offset);
|
||||
if (ubConst.hasValue()) {
|
||||
if (ubConst.has_value()) {
|
||||
ubMap =
|
||||
AffineMap::get(numMapDims, numMapSymbols,
|
||||
getAffineConstantExpr(
|
||||
@ -1673,12 +1673,12 @@ void FlatAffineRelation::compose(const FlatAffineRelation &other) {
|
||||
|
||||
// Add and match domain of `rel` to domain of `this`.
|
||||
for (unsigned i = 0, e = rel.getNumDomainDims(); i < e; ++i)
|
||||
if (relMaybeValues[i].hasValue())
|
||||
if (relMaybeValues[i].has_value())
|
||||
setValue(i, relMaybeValues[i].getValue());
|
||||
// Add and match range of `this` to range of `rel`.
|
||||
for (unsigned i = 0, e = getNumRangeDims(); i < e; ++i) {
|
||||
unsigned rangeIdx = rel.getNumDomainDims() + i;
|
||||
if (thisMaybeValues[rangeIdx].hasValue())
|
||||
if (thisMaybeValues[rangeIdx].has_value())
|
||||
rel.setValue(rangeIdx, thisMaybeValues[rangeIdx].getValue());
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
|
||||
Optional<uint64_t> tripCount;
|
||||
for (auto resultExpr : map.getResults()) {
|
||||
if (auto constExpr = resultExpr.dyn_cast<AffineConstantExpr>()) {
|
||||
if (tripCount.hasValue())
|
||||
if (tripCount.has_value())
|
||||
tripCount = std::min(tripCount.getValue(),
|
||||
static_cast<uint64_t>(constExpr.getValue()));
|
||||
else
|
||||
@ -132,12 +132,12 @@ uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
|
||||
// Trip count is not a known constant; return its largest known divisor.
|
||||
thisGcd = resultExpr.getLargestKnownDivisor();
|
||||
}
|
||||
if (gcd.hasValue())
|
||||
if (gcd.has_value())
|
||||
gcd = llvm::GreatestCommonDivisor64(gcd.getValue(), thisGcd);
|
||||
else
|
||||
gcd = thisGcd;
|
||||
}
|
||||
assert(gcd.hasValue() && "value expected per above logic");
|
||||
assert(gcd.has_value() && "value expected per above logic");
|
||||
return gcd.getValue();
|
||||
}
|
||||
|
||||
|
@ -375,7 +375,7 @@ Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
|
||||
SmallVector<int64_t, 4> lb;
|
||||
Optional<int64_t> diff =
|
||||
cstWithShapeBounds.getConstantBoundOnDimSize(d, &lb, &lbDivisor);
|
||||
if (diff.hasValue()) {
|
||||
if (diff.has_value()) {
|
||||
diffConstant = diff.getValue();
|
||||
assert(diffConstant >= 0 && "Dim size bound can't be negative");
|
||||
assert(lbDivisor > 0);
|
||||
@ -1012,7 +1012,7 @@ bool mlir::buildSliceTripCountMap(
|
||||
continue;
|
||||
}
|
||||
Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
|
||||
if (maybeConstTripCount.hasValue()) {
|
||||
if (maybeConstTripCount.has_value()) {
|
||||
(*tripCountMap)[op] = maybeConstTripCount.getValue();
|
||||
continue;
|
||||
}
|
||||
@ -1020,7 +1020,7 @@ bool mlir::buildSliceTripCountMap(
|
||||
}
|
||||
Optional<uint64_t> tripCount = getConstDifference(lbMap, ubMap);
|
||||
// Slice bounds are created with a constant ub - lb difference.
|
||||
if (!tripCount.hasValue())
|
||||
if (!tripCount.has_value())
|
||||
return false;
|
||||
(*tripCountMap)[op] = tripCount.getValue();
|
||||
}
|
||||
@ -1320,7 +1320,7 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,
|
||||
int64_t totalSizeInBytes = 0;
|
||||
for (const auto ®ion : regions) {
|
||||
Optional<int64_t> size = region.second->getRegionSize();
|
||||
if (!size.hasValue())
|
||||
if (!size.has_value())
|
||||
return None;
|
||||
totalSizeInBytes += size.getValue();
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ static bool isDimOpValidSymbol(OpTy dimOp, Region *region) {
|
||||
// The dim op is also okay if its operand memref is a view/subview whose
|
||||
// corresponding size is a valid symbol.
|
||||
Optional<int64_t> index = dimOp.getConstantIndex();
|
||||
assert(index.hasValue() &&
|
||||
assert(index.has_value() &&
|
||||
"expect only `dim` operations with a constant index");
|
||||
int64_t i = index.getValue();
|
||||
return TypeSwitch<Operation *, bool>(dimOp.getSource().getDefiningOp())
|
||||
@ -1892,12 +1892,13 @@ struct AffineForEmptyLoopFolder : public OpRewritePattern<AffineForOp> {
|
||||
}
|
||||
// Bail out when the trip count is unknown and the loop returns any value
|
||||
// defined outside of the loop or any iterArg out of order.
|
||||
if (!tripCount.hasValue() &&
|
||||
if (!tripCount.has_value() &&
|
||||
(hasValDefinedOutsideLoop || iterArgsNotInOrder))
|
||||
return failure();
|
||||
// Bail out when the loop iterates more than once and it returns any iterArg
|
||||
// out of order.
|
||||
if (tripCount.hasValue() && tripCount.getValue() >= 2 && iterArgsNotInOrder)
|
||||
if (tripCount.has_value() && tripCount.getValue() >= 2 &&
|
||||
iterArgsNotInOrder)
|
||||
return failure();
|
||||
rewriter.replaceOp(forOp, replacements);
|
||||
return success();
|
||||
@ -1930,14 +1931,14 @@ OperandRange AffineForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
|
||||
void AffineForOp::getSuccessorRegions(
|
||||
Optional<unsigned> index, ArrayRef<Attribute> operands,
|
||||
SmallVectorImpl<RegionSuccessor> ®ions) {
|
||||
assert((!index.hasValue() || index.getValue() == 0) &&
|
||||
assert((!index.has_value() || index.getValue() == 0) &&
|
||||
"expected loop region");
|
||||
// The loop may typically branch back to its body or to the parent operation.
|
||||
// If the predecessor is the parent op and the trip count is known to be at
|
||||
// least one, branch into the body using the iterator arguments. And in cases
|
||||
// we know the trip count is zero, it can only branch back to its parent.
|
||||
Optional<uint64_t> tripCount = getTrivialConstantTripCount(*this);
|
||||
if (!index.hasValue() && tripCount.hasValue()) {
|
||||
if (!index.has_value() && tripCount.has_value()) {
|
||||
if (tripCount.getValue() > 0) {
|
||||
regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs()));
|
||||
return;
|
||||
|
@ -142,7 +142,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
|
||||
Optional<int64_t> footprint =
|
||||
getMemoryFootprintBytes(forOp,
|
||||
/*memorySpace=*/0);
|
||||
return (footprint.hasValue() &&
|
||||
return (footprint.has_value() &&
|
||||
static_cast<uint64_t>(footprint.getValue()) >
|
||||
fastMemCapacityBytes);
|
||||
};
|
||||
|
@ -441,8 +441,8 @@ public:
|
||||
++pos;
|
||||
}
|
||||
|
||||
if (firstSrcDepPos.hasValue()) {
|
||||
if (lastDstDepPos.hasValue()) {
|
||||
if (firstSrcDepPos.has_value()) {
|
||||
if (lastDstDepPos.has_value()) {
|
||||
if (firstSrcDepPos.getValue() <= lastDstDepPos.getValue()) {
|
||||
// No valid insertion point exists which preserves dependences.
|
||||
return nullptr;
|
||||
@ -944,7 +944,7 @@ static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
|
||||
uint64_t bufSize =
|
||||
getMemRefEltSizeInBytes(oldMemRefType) * numElements.getValue();
|
||||
unsigned newMemSpace;
|
||||
if (bufSize <= localBufSizeThreshold && fastMemorySpace.hasValue()) {
|
||||
if (bufSize <= localBufSizeThreshold && fastMemorySpace.has_value()) {
|
||||
newMemSpace = fastMemorySpace.getValue();
|
||||
} else {
|
||||
newMemSpace = oldMemRefType.getMemorySpaceAsInt();
|
||||
@ -1141,7 +1141,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
|
||||
|
||||
Optional<int64_t> maybeSrcWriteRegionSizeBytes =
|
||||
srcWriteRegion.getRegionSize();
|
||||
if (!maybeSrcWriteRegionSizeBytes.hasValue())
|
||||
if (!maybeSrcWriteRegionSizeBytes.has_value())
|
||||
return false;
|
||||
int64_t srcWriteRegionSizeBytes = maybeSrcWriteRegionSizeBytes.getValue();
|
||||
|
||||
@ -1183,7 +1183,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
|
||||
|
||||
Optional<int64_t> maybeSliceWriteRegionSizeBytes =
|
||||
sliceWriteRegion.getRegionSize();
|
||||
if (!maybeSliceWriteRegionSizeBytes.hasValue() ||
|
||||
if (!maybeSliceWriteRegionSizeBytes.has_value() ||
|
||||
maybeSliceWriteRegionSizeBytes.getValue() == 0) {
|
||||
LLVM_DEBUG(llvm::dbgs()
|
||||
<< "Failed to get slice write region size at loopDepth: " << i
|
||||
|
@ -405,7 +405,7 @@ checkTilingLegalityImpl(MutableArrayRef<mlir::AffineForOp> origLoops) {
|
||||
LLVM_DEBUG(dstAccess.opInst->dump(););
|
||||
for (unsigned k = 0, e = depComps.size(); k < e; k++) {
|
||||
DependenceComponent depComp = depComps[k];
|
||||
if (depComp.lb.hasValue() && depComp.ub.hasValue() &&
|
||||
if (depComp.lb.has_value() && depComp.ub.has_value() &&
|
||||
depComp.lb.getValue() < depComp.ub.getValue() &&
|
||||
depComp.ub.getValue() < 0) {
|
||||
LLVM_DEBUG(llvm::dbgs()
|
||||
@ -974,7 +974,7 @@ void mlir::getTileableBands(func::FuncOp f,
|
||||
/// Unrolls this loop completely.
|
||||
LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
|
||||
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
|
||||
if (mayBeConstantTripCount.hasValue()) {
|
||||
if (mayBeConstantTripCount.has_value()) {
|
||||
uint64_t tripCount = mayBeConstantTripCount.getValue();
|
||||
if (tripCount == 0)
|
||||
return success();
|
||||
@ -990,7 +990,7 @@ LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
|
||||
LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp,
|
||||
uint64_t unrollFactor) {
|
||||
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
|
||||
if (mayBeConstantTripCount.hasValue() &&
|
||||
if (mayBeConstantTripCount.has_value() &&
|
||||
mayBeConstantTripCount.getValue() < unrollFactor)
|
||||
return loopUnrollByFactor(forOp, mayBeConstantTripCount.getValue());
|
||||
return loopUnrollByFactor(forOp, unrollFactor);
|
||||
@ -1150,7 +1150,7 @@ LogicalResult mlir::loopUnrollByFactor(
|
||||
LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp,
|
||||
uint64_t unrollJamFactor) {
|
||||
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
|
||||
if (mayBeConstantTripCount.hasValue() &&
|
||||
if (mayBeConstantTripCount.has_value() &&
|
||||
mayBeConstantTripCount.getValue() < unrollJamFactor)
|
||||
return loopUnrollJamByFactor(forOp, mayBeConstantTripCount.getValue());
|
||||
return loopUnrollJamByFactor(forOp, unrollJamFactor);
|
||||
@ -1573,7 +1573,7 @@ AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) {
|
||||
assert(depComps.size() >= maxLoopDepth);
|
||||
for (unsigned j = 0; j < maxLoopDepth; ++j) {
|
||||
DependenceComponent &depComp = depComps[j];
|
||||
assert(depComp.lb.hasValue() && depComp.ub.hasValue());
|
||||
assert(depComp.lb.has_value() && depComp.ub.has_value());
|
||||
if (depComp.lb.getValue() != 0 || depComp.ub.getValue() != 0)
|
||||
isParallelLoop[j] = false;
|
||||
}
|
||||
@ -2406,12 +2406,12 @@ LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin,
|
||||
block->walk(begin, end, [&](Operation *opInst) {
|
||||
// Gather regions to allocate to buffers in faster memory space.
|
||||
if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) {
|
||||
if ((filterMemRef.hasValue() && filterMemRef != loadOp.getMemRef()) ||
|
||||
if ((filterMemRef.has_value() && filterMemRef != loadOp.getMemRef()) ||
|
||||
(loadOp.getMemRefType().getMemorySpaceAsInt() !=
|
||||
copyOptions.slowMemorySpace))
|
||||
return;
|
||||
} else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) {
|
||||
if ((filterMemRef.hasValue() && filterMemRef != storeOp.getMemRef()) ||
|
||||
if ((filterMemRef.has_value() && filterMemRef != storeOp.getMemRef()) ||
|
||||
storeOp.getMemRefType().getMemorySpaceAsInt() !=
|
||||
copyOptions.slowMemorySpace)
|
||||
return;
|
||||
|
@ -567,11 +567,11 @@ bufferization::getBufferType(Value value, const BufferizationOptions &options) {
|
||||
|
||||
// If we still do not know the memory space, use the default memory space (if
|
||||
// any).
|
||||
if (!memorySpace.hasValue())
|
||||
if (!memorySpace.has_value())
|
||||
memorySpace = options.defaultMemorySpace;
|
||||
|
||||
// If we still do not know the memory space, report a failure.
|
||||
if (!memorySpace.hasValue())
|
||||
if (!memorySpace.has_value())
|
||||
return op->emitError("could not infer memory space");
|
||||
|
||||
return getMemRefType(value, options, /*layout=*/{}, *memorySpace);
|
||||
|
@ -172,12 +172,12 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
|
||||
|
||||
// Compute memory space of this allocation.
|
||||
unsigned memorySpace;
|
||||
if (getMemorySpace().hasValue()) {
|
||||
if (getMemorySpace().has_value()) {
|
||||
memorySpace = *getMemorySpace();
|
||||
} else if (getCopy()) {
|
||||
memorySpace =
|
||||
copyBuffer.getType().cast<BaseMemRefType>().getMemorySpaceAsInt();
|
||||
} else if (options.defaultMemorySpace.hasValue()) {
|
||||
} else if (options.defaultMemorySpace.has_value()) {
|
||||
memorySpace = *options.defaultMemorySpace;
|
||||
} else {
|
||||
return op->emitError("could not infer memory space");
|
||||
@ -470,11 +470,11 @@ struct SimplifyClones : public OpRewritePattern<CloneOp> {
|
||||
llvm::Optional<Operation *> maybeCloneDeallocOp =
|
||||
memref::findDealloc(cloneOp.getOutput());
|
||||
// Skip if either of them has > 1 deallocate operations.
|
||||
if (!maybeCloneDeallocOp.hasValue())
|
||||
if (!maybeCloneDeallocOp.has_value())
|
||||
return failure();
|
||||
llvm::Optional<Operation *> maybeSourceDeallocOp =
|
||||
memref::findDealloc(source);
|
||||
if (!maybeSourceDeallocOp.hasValue())
|
||||
if (!maybeSourceDeallocOp.has_value())
|
||||
return failure();
|
||||
Operation *cloneDeallocOp = *maybeCloneDeallocOp;
|
||||
Operation *sourceDeallocOp = *maybeSourceDeallocOp;
|
||||
|
@ -158,9 +158,9 @@ bufferization::getGlobalFor(arith::ConstantOp constantOp, uint64_t alignment) {
|
||||
auto globalOp = dyn_cast<memref::GlobalOp>(&op);
|
||||
if (!globalOp)
|
||||
continue;
|
||||
if (!globalOp.getInitialValue().hasValue())
|
||||
if (!globalOp.getInitialValue().has_value())
|
||||
continue;
|
||||
uint64_t opAlignment = globalOp.getAlignment().hasValue()
|
||||
uint64_t opAlignment = globalOp.getAlignment().has_value()
|
||||
? globalOp.getAlignment().getValue()
|
||||
: 0;
|
||||
Attribute initialValue = globalOp.getInitialValue().getValue();
|
||||
|
@ -114,7 +114,7 @@ static FuncOpAnalysisState getFuncOpAnalysisState(const AnalysisState &state,
|
||||
Optional<const FuncAnalysisState *> maybeState =
|
||||
state.getDialectState<FuncAnalysisState>(
|
||||
func::FuncDialect::getDialectNamespace());
|
||||
if (!maybeState.hasValue())
|
||||
if (!maybeState.has_value())
|
||||
return FuncOpAnalysisState::NotAnalyzed;
|
||||
const auto &analyzedFuncOps = maybeState.getValue()->analyzedFuncOps;
|
||||
auto it = analyzedFuncOps.find(funcOp);
|
||||
|
@ -75,7 +75,7 @@ void gpu::SerializeToBlobPass::runOnOperation() {
|
||||
Optional<std::string> maybeTargetISA =
|
||||
translateToISA(*llvmModule, *targetMachine);
|
||||
|
||||
if (!maybeTargetISA.hasValue())
|
||||
if (!maybeTargetISA.has_value())
|
||||
return signalPassFailure();
|
||||
|
||||
std::string targetISA = std::move(maybeTargetISA.getValue());
|
||||
|
@ -237,7 +237,7 @@ ParseResult AllocaOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
|
||||
Optional<NamedAttribute> alignmentAttr =
|
||||
result.attributes.getNamed("alignment");
|
||||
if (alignmentAttr.hasValue()) {
|
||||
if (alignmentAttr.has_value()) {
|
||||
auto alignmentInt =
|
||||
alignmentAttr.getValue().getValue().dyn_cast<IntegerAttr>();
|
||||
if (!alignmentInt)
|
||||
@ -272,11 +272,11 @@ ParseResult AllocaOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
/// the attribute, but not both.
|
||||
static LogicalResult verifyOpaquePtr(Operation *op, LLVMPointerType ptrType,
|
||||
Optional<Type> ptrElementType) {
|
||||
if (ptrType.isOpaque() && !ptrElementType.hasValue()) {
|
||||
if (ptrType.isOpaque() && !ptrElementType.has_value()) {
|
||||
return op->emitOpError() << "expected '" << kElemTypeAttrName
|
||||
<< "' attribute if opaque pointer type is used";
|
||||
}
|
||||
if (!ptrType.isOpaque() && ptrElementType.hasValue()) {
|
||||
if (!ptrType.isOpaque() && ptrElementType.has_value()) {
|
||||
return op->emitOpError()
|
||||
<< "unexpected '" << kElemTypeAttrName
|
||||
<< "' attribute when non-opaque pointer type is used";
|
||||
@ -907,7 +907,7 @@ CallInterfaceCallable InvokeOp::getCallableForCallee() {
|
||||
}
|
||||
|
||||
Operation::operand_range InvokeOp::getArgOperands() {
|
||||
return getOperands().drop_front(getCallee().hasValue() ? 0 : 1);
|
||||
return getOperands().drop_front(getCallee().has_value() ? 0 : 1);
|
||||
}
|
||||
|
||||
LogicalResult InvokeOp::verify() {
|
||||
@ -928,7 +928,7 @@ LogicalResult InvokeOp::verify() {
|
||||
|
||||
void InvokeOp::print(OpAsmPrinter &p) {
|
||||
auto callee = getCallee();
|
||||
bool isDirect = callee.hasValue();
|
||||
bool isDirect = callee.has_value();
|
||||
|
||||
p << ' ';
|
||||
|
||||
@ -1146,7 +1146,7 @@ CallInterfaceCallable CallOp::getCallableForCallee() {
|
||||
}
|
||||
|
||||
Operation::operand_range CallOp::getArgOperands() {
|
||||
return getOperands().drop_front(getCallee().hasValue() ? 0 : 1);
|
||||
return getOperands().drop_front(getCallee().has_value() ? 0 : 1);
|
||||
}
|
||||
|
||||
LogicalResult CallOp::verify() {
|
||||
@ -1233,7 +1233,7 @@ LogicalResult CallOp::verify() {
|
||||
|
||||
void CallOp::print(OpAsmPrinter &p) {
|
||||
auto callee = getCallee();
|
||||
bool isDirect = callee.hasValue();
|
||||
bool isDirect = callee.has_value();
|
||||
|
||||
// Print the direct callee if present as a function attribute, or an indirect
|
||||
// callee (first operand) otherwise.
|
||||
@ -2011,7 +2011,7 @@ LogicalResult GlobalOp::verify() {
|
||||
}
|
||||
|
||||
Optional<uint64_t> alignAttr = getAlignment();
|
||||
if (alignAttr.hasValue()) {
|
||||
if (alignAttr.has_value()) {
|
||||
uint64_t value = alignAttr.getValue();
|
||||
if (!llvm::isPowerOf2_64(value))
|
||||
return emitError() << "alignment attribute is not a power of 2";
|
||||
|
@ -117,14 +117,14 @@ static bool isIntegerPtxType(MMATypes type) {
|
||||
MMATypes MmaOp::accumPtxType() {
|
||||
Optional<mlir::NVVM::MMATypes> val = inferOperandMMAType(
|
||||
getODSOperands(2).getTypes().front(), /*isAccum=*/true);
|
||||
assert(val.hasValue() && "accumulator PTX type should always be inferrable");
|
||||
assert(val.has_value() && "accumulator PTX type should always be inferrable");
|
||||
return val.getValue();
|
||||
}
|
||||
|
||||
MMATypes MmaOp::resultPtxType() {
|
||||
Optional<mlir::NVVM::MMATypes> val =
|
||||
inferOperandMMAType(getResult().getType(), /*isAccum=*/true);
|
||||
assert(val.hasValue() && "result PTX type should always be inferrable");
|
||||
assert(val.has_value() && "result PTX type should always be inferrable");
|
||||
return val.getValue();
|
||||
}
|
||||
|
||||
@ -224,10 +224,10 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
|
||||
result.addAttribute("layoutB", MMALayoutAttr::get(ctx, MMALayout::col));
|
||||
}
|
||||
|
||||
if (intOverflow.hasValue())
|
||||
if (intOverflow.has_value())
|
||||
result.addAttribute("intOverflowBehavior",
|
||||
MMAIntOverflowAttr::get(ctx, *intOverflow));
|
||||
if (b1Op.hasValue())
|
||||
if (b1Op.has_value())
|
||||
result.addAttribute("b1Op", MMAB1OpAttr::get(ctx, *b1Op));
|
||||
|
||||
result.addTypes(resultType);
|
||||
@ -311,13 +311,13 @@ ParseResult MmaOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||
for (unsigned idx = 0; idx < names.size(); idx++) {
|
||||
const auto &frag = frags[idx];
|
||||
Optional<NamedAttribute> attr = namedAttributes.getNamed(names[idx]);
|
||||
if (!frag.elemtype.hasValue() && !attr.hasValue()) {
|
||||
if (!frag.elemtype.has_value() && !attr.has_value()) {
|
||||
return parser.emitError(
|
||||
parser.getNameLoc(),
|
||||
"attribute " + names[idx] +
|
||||
" is not provided explicitly and cannot be inferred");
|
||||
}
|
||||
if (!attr.hasValue())
|
||||
if (!attr.has_value())
|
||||
result.addAttribute(
|
||||
names[idx], MMATypesAttr::get(parser.getContext(), *frag.elemtype));
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ static Value allocBuffer(ImplicitLocOpBuilder &b,
|
||||
auto width = layout.getTypeSize(elementType);
|
||||
|
||||
IntegerAttr alignmentAttr;
|
||||
if (alignment.hasValue())
|
||||
if (alignment.has_value())
|
||||
alignmentAttr = b.getI64IntegerAttr(alignment.getValue());
|
||||
|
||||
// Static buffer.
|
||||
|
@ -102,7 +102,7 @@ LogicalResult mlir::linalg::LinalgTransformationFilter::checkAndNotify(
|
||||
void mlir::linalg::LinalgTransformationFilter::
|
||||
replaceLinalgTransformationFilter(PatternRewriter &rewriter,
|
||||
Operation *op) const {
|
||||
if (replacement.hasValue())
|
||||
if (replacement.has_value())
|
||||
op->setAttr(LinalgTransforms::kLinalgTransformMarker,
|
||||
replacement.getValue());
|
||||
else
|
||||
|
@ -1300,7 +1300,7 @@ LogicalResult GlobalOp::verify() {
|
||||
|
||||
// Verify that the initial value, if present, is either a unit attribute or
|
||||
// an elements attribute.
|
||||
if (getInitialValue().hasValue()) {
|
||||
if (getInitialValue().has_value()) {
|
||||
Attribute initValue = getInitialValue().getValue();
|
||||
if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
|
||||
return emitOpError("initial value should be a unit or elements "
|
||||
@ -1332,7 +1332,7 @@ LogicalResult GlobalOp::verify() {
|
||||
|
||||
ElementsAttr GlobalOp::getConstantInitValue() {
|
||||
auto initVal = getInitialValue();
|
||||
if (getConstant() && initVal.hasValue())
|
||||
if (getConstant() && initVal.has_value())
|
||||
return initVal.getValue().cast<ElementsAttr>();
|
||||
return {};
|
||||
}
|
||||
@ -2164,7 +2164,7 @@ Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
|
||||
// Compute which dimensions are dropped.
|
||||
Optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
|
||||
computeRankReductionMask(inferredType.getShape(), resultShape);
|
||||
assert(dimsToProject.hasValue() && "invalid rank reduction");
|
||||
assert(dimsToProject.has_value() && "invalid rank reduction");
|
||||
llvm::SmallBitVector dimsToProjectVector(inferredType.getRank());
|
||||
for (unsigned dim : *dimsToProject)
|
||||
dimsToProjectVector.set(dim);
|
||||
|
@ -28,7 +28,7 @@ using namespace presburger;
|
||||
static void unpackOptionalValues(ArrayRef<Optional<Value>> source,
|
||||
SmallVector<Value> &target) {
|
||||
target = llvm::to_vector<4>(llvm::map_range(source, [](Optional<Value> val) {
|
||||
return val.hasValue() ? *val : Value();
|
||||
return val.has_value() ? *val : Value();
|
||||
}));
|
||||
}
|
||||
|
||||
|
@ -1247,7 +1247,7 @@ OpFoldResult GetExtentOp::fold(ArrayRef<Attribute> operands) {
|
||||
if (!elements)
|
||||
return nullptr;
|
||||
Optional<int64_t> dim = getConstantDim();
|
||||
if (!dim.hasValue())
|
||||
if (!dim.has_value())
|
||||
return nullptr;
|
||||
if (dim.getValue() >= elements.getNumElements())
|
||||
return nullptr;
|
||||
|
@ -1768,7 +1768,7 @@ public:
|
||||
|
||||
// Builds the tensor expression for the Linalg operation in SSA form.
|
||||
Optional<unsigned> optExp = merger.buildTensorExpFromLinalg(op);
|
||||
if (!optExp.hasValue())
|
||||
if (!optExp.has_value())
|
||||
return failure();
|
||||
unsigned exp = optExp.getValue();
|
||||
|
||||
|
@ -893,7 +893,7 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
|
||||
// Construct unary operations if subexpression can be built.
|
||||
if (def->getNumOperands() == 1) {
|
||||
auto x = buildTensorExp(op, def->getOperand(0));
|
||||
if (x.hasValue()) {
|
||||
if (x.has_value()) {
|
||||
unsigned e = x.getValue();
|
||||
if (isa<math::AbsOp>(def))
|
||||
return addExp(kAbsF, e);
|
||||
@ -966,7 +966,7 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
|
||||
if (def->getNumOperands() == 2) {
|
||||
auto x = buildTensorExp(op, def->getOperand(0));
|
||||
auto y = buildTensorExp(op, def->getOperand(1));
|
||||
if (x.hasValue() && y.hasValue()) {
|
||||
if (x.has_value() && y.has_value()) {
|
||||
unsigned e0 = x.getValue();
|
||||
unsigned e1 = y.getValue();
|
||||
if (isa<arith::MulFOp>(def))
|
||||
|
@ -200,7 +200,7 @@ public:
|
||||
Value weightPaddingVal = createOpAndInfer<tosa::ConstOp>(
|
||||
rewriter, loc, weightPaddingAttr.getType(), weightPaddingAttr);
|
||||
|
||||
if (op.quantization_info().hasValue()) {
|
||||
if (op.quantization_info().has_value()) {
|
||||
auto quantInfo = op.quantization_info().getValue();
|
||||
weight = createOpAndInfer<tosa::PadOp>(
|
||||
rewriter, loc, UnrankedTensorType::get(weightETy), weight,
|
||||
@ -264,7 +264,7 @@ public:
|
||||
Value inputPaddingVal = createOpAndInfer<tosa::ConstOp>(
|
||||
rewriter, loc, inputPaddingAttr.getType(), inputPaddingAttr);
|
||||
|
||||
if (op.quantization_info().hasValue()) {
|
||||
if (op.quantization_info().has_value()) {
|
||||
auto quantInfo = op.quantization_info().getValue();
|
||||
input = createOpAndInfer<tosa::PadOp>(
|
||||
rewriter, loc, UnrankedTensorType::get(inputETy), input,
|
||||
|
@ -143,13 +143,13 @@ transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
|
||||
void transform::AlternativesOp::getSuccessorRegions(
|
||||
Optional<unsigned> index, ArrayRef<Attribute> operands,
|
||||
SmallVectorImpl<RegionSuccessor> ®ions) {
|
||||
for (Region &alternative :
|
||||
llvm::drop_begin(getAlternatives(), index.hasValue() ? *index + 1 : 0)) {
|
||||
for (Region &alternative : llvm::drop_begin(
|
||||
getAlternatives(), index.has_value() ? *index + 1 : 0)) {
|
||||
regions.emplace_back(&alternative, !getOperands().empty()
|
||||
? alternative.getArguments()
|
||||
: Block::BlockArgListType());
|
||||
}
|
||||
if (index.hasValue())
|
||||
if (index.has_value())
|
||||
regions.emplace_back(getOperation()->getResults());
|
||||
}
|
||||
|
||||
|
@ -553,7 +553,7 @@ public:
|
||||
Value b = rewriter.create<vector::BroadcastOp>(loc, lhsType, op.getRhs());
|
||||
Optional<Value> mult = createContractArithOp(loc, op.getLhs(), b, acc,
|
||||
kind, rewriter, isInt);
|
||||
if (!mult.hasValue())
|
||||
if (!mult.has_value())
|
||||
return failure();
|
||||
rewriter.replaceOp(op, mult.getValue());
|
||||
return success();
|
||||
@ -571,7 +571,7 @@ public:
|
||||
r = rewriter.create<vector::ExtractOp>(loc, rhsType, acc, pos);
|
||||
Optional<Value> m =
|
||||
createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt);
|
||||
if (!m.hasValue())
|
||||
if (!m.has_value())
|
||||
return failure();
|
||||
result = rewriter.create<vector::InsertOp>(loc, resType, m.getValue(),
|
||||
result, pos);
|
||||
@ -1861,7 +1861,7 @@ Value ContractionOpLowering::lowerParallel(vector::ContractionOp op,
|
||||
}
|
||||
assert(iterIndex >= 0 && "parallel index not listed in operand mapping");
|
||||
Optional<int64_t> lookup = getResultIndex(iMap[2], iterIndex);
|
||||
assert(lookup.hasValue() && "parallel index not listed in reduction");
|
||||
assert(lookup.has_value() && "parallel index not listed in reduction");
|
||||
int64_t resIndex = lookup.getValue();
|
||||
// Construct new iterator types and affine map array attribute.
|
||||
std::array<AffineMap, 3> lowIndexingMaps = {
|
||||
@ -1901,8 +1901,8 @@ Value ContractionOpLowering::lowerReduction(vector::ContractionOp op,
|
||||
SmallVector<AffineMap, 4> iMap = op.getIndexingMaps();
|
||||
Optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex);
|
||||
Optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex);
|
||||
assert(lookupLhs.hasValue() && "missing LHS parallel index");
|
||||
assert(lookupRhs.hasValue() && "missing RHS parallel index");
|
||||
assert(lookupLhs.has_value() && "missing LHS parallel index");
|
||||
assert(lookupRhs.has_value() && "missing RHS parallel index");
|
||||
int64_t lhsIndex = lookupLhs.getValue();
|
||||
int64_t rhsIndex = lookupRhs.getValue();
|
||||
int64_t dimSize = lhsType.getDimSize(lhsIndex);
|
||||
|
@ -120,7 +120,7 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
|
||||
};
|
||||
|
||||
Optional<TypeRange> sourceTypes = getInputsTypesForRegion(succRegionNo);
|
||||
if (!sourceTypes.hasValue())
|
||||
if (!sourceTypes.has_value())
|
||||
continue;
|
||||
|
||||
TypeRange succInputsTypes = succ.getSuccessorInputs().getTypes();
|
||||
|
@ -332,7 +332,7 @@ AffineExpr AffineParser::parseSymbolSSAIdExpr() {
|
||||
/// affine-expr ::= integer-literal
|
||||
AffineExpr AffineParser::parseIntegerExpr() {
|
||||
auto val = getToken().getUInt64IntegerValue();
|
||||
if (!val.hasValue() || (int64_t)val.getValue() < 0)
|
||||
if (!val.has_value() || (int64_t)val.getValue() < 0)
|
||||
return emitError("constant too large for index"), nullptr;
|
||||
|
||||
consumeToken(Token::integer);
|
||||
|
@ -679,7 +679,7 @@ TensorLiteralParser::getFloatAttrElements(SMLoc loc, FloatType eltTy,
|
||||
DenseElementsAttr TensorLiteralParser::getStringAttr(SMLoc loc,
|
||||
ShapedType type,
|
||||
Type eltTy) {
|
||||
if (hexStorage.hasValue()) {
|
||||
if (hexStorage.has_value()) {
|
||||
auto stringValue = hexStorage.getValue().getStringValue();
|
||||
return DenseStringElementsAttr::get(type, {stringValue});
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ ParseResult Parser::parseNameOrFileLineColLocation(LocationAttr &loc) {
|
||||
return emitWrongTokenError(
|
||||
"expected integer column number in FileLineColLoc");
|
||||
auto column = getToken().getUnsignedIntegerValue();
|
||||
if (!column.hasValue())
|
||||
if (!column.has_value())
|
||||
return emitError("expected integer column number in FileLineColLoc");
|
||||
consumeToken(Token::integer);
|
||||
|
||||
|
@ -2020,7 +2020,7 @@ ParseResult OperationParser::parseRegionBody(Region ®ion, SMLoc startLoc,
|
||||
.attachNote(getEncodedSourceLocation(*defLoc))
|
||||
<< "previously referenced here";
|
||||
}
|
||||
Location loc = entryArg.sourceLoc.hasValue()
|
||||
Location loc = entryArg.sourceLoc.has_value()
|
||||
? entryArg.sourceLoc.getValue()
|
||||
: getEncodedSourceLocation(argInfo.location);
|
||||
BlockArgument arg = block->addArgument(entryArg.type, loc);
|
||||
|
@ -309,7 +309,7 @@ Type Parser::parseNonFunctionType() {
|
||||
// integer-type
|
||||
case Token::inttype: {
|
||||
auto width = getToken().getIntTypeBitwidth();
|
||||
if (!width.hasValue())
|
||||
if (!width.has_value())
|
||||
return (emitError("invalid integer width"), nullptr);
|
||||
if (width.getValue() > IntegerType::kMaxWidth) {
|
||||
emitError(getToken().getLoc(), "integer bitwidth is limited to ")
|
||||
|
@ -432,7 +432,7 @@ GlobalOp Importer::processGlobal(llvm::GlobalVariable *gv) {
|
||||
|
||||
uint64_t alignment = 0;
|
||||
llvm::MaybeAlign maybeAlign = gv->getAlign();
|
||||
if (maybeAlign.hasValue()) {
|
||||
if (maybeAlign.has_value()) {
|
||||
llvm::Align align = maybeAlign.getValue();
|
||||
alignment = align.value();
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ using namespace mlir;
|
||||
namespace {
|
||||
static llvm::omp::ScheduleKind
|
||||
convertToScheduleKind(Optional<omp::ClauseScheduleKind> schedKind) {
|
||||
if (!schedKind.hasValue())
|
||||
if (!schedKind.has_value())
|
||||
return llvm::omp::OMP_SCHEDULE_Default;
|
||||
switch (schedKind.getValue()) {
|
||||
case omp::ClauseScheduleKind::Static:
|
||||
|
@ -666,16 +666,16 @@ LogicalResult ModuleTranslation::convertGlobals() {
|
||||
: llvm::GlobalValue::NotThreadLocal,
|
||||
addrSpace);
|
||||
|
||||
if (op.getUnnamedAddr().hasValue())
|
||||
if (op.getUnnamedAddr().has_value())
|
||||
var->setUnnamedAddr(convertUnnamedAddrToLLVM(*op.getUnnamedAddr()));
|
||||
|
||||
if (op.getSection().hasValue())
|
||||
if (op.getSection().has_value())
|
||||
var->setSection(*op.getSection());
|
||||
|
||||
addRuntimePreemptionSpecifier(op.getDsoLocal(), var);
|
||||
|
||||
Optional<uint64_t> alignment = op.getAlignment();
|
||||
if (alignment.hasValue())
|
||||
if (alignment.has_value())
|
||||
var->setAlignment(llvm::MaybeAlign(alignment.getValue()));
|
||||
|
||||
globalsMapping.try_emplace(op, var);
|
||||
|
@ -52,13 +52,13 @@ getDirectionVectorStr(bool ret, unsigned numCommonLoops, unsigned loopNestDepth,
|
||||
std::string result;
|
||||
for (const auto &dependenceComponent : dependenceComponents) {
|
||||
std::string lbStr = "-inf";
|
||||
if (dependenceComponent.lb.hasValue() &&
|
||||
if (dependenceComponent.lb.has_value() &&
|
||||
dependenceComponent.lb.getValue() !=
|
||||
std::numeric_limits<int64_t>::min())
|
||||
lbStr = std::to_string(dependenceComponent.lb.getValue());
|
||||
|
||||
std::string ubStr = "+inf";
|
||||
if (dependenceComponent.ub.hasValue() &&
|
||||
if (dependenceComponent.ub.has_value() &&
|
||||
dependenceComponent.ub.getValue() !=
|
||||
std::numeric_limits<int64_t>::max())
|
||||
ubStr = std::to_string(dependenceComponent.ub.getValue());
|
||||
|
@ -1417,7 +1417,7 @@ void RegionIfOp::getSuccessorRegions(
|
||||
Optional<unsigned> index, ArrayRef<Attribute> operands,
|
||||
SmallVectorImpl<RegionSuccessor> ®ions) {
|
||||
// We always branch to the join region.
|
||||
if (index.hasValue()) {
|
||||
if (index.has_value()) {
|
||||
if (index.getValue() < 2)
|
||||
regions.push_back(RegionSuccessor(&getJoinRegion(), getJoinArgs()));
|
||||
else
|
||||
|
@ -30,7 +30,7 @@ static LogicalResult replaceWithConstant(DataFlowSolver &solver, OpBuilder &b,
|
||||
const ConstantIntRanges &inferredRange =
|
||||
maybeInferredRange->getValue().getValue();
|
||||
Optional<APInt> maybeConstValue = inferredRange.getConstantValue();
|
||||
if (!maybeConstValue.hasValue())
|
||||
if (!maybeConstValue.has_value())
|
||||
return failure();
|
||||
|
||||
Operation *maybeDefiningOp = value.getDefiningOp();
|
||||
|
@ -730,8 +730,8 @@ static LogicalResult generateNamedGenericOpOds(LinalgOpConfig &opConfig,
|
||||
}
|
||||
// Add the index attributes to the op definition and builders.
|
||||
if (arg.kind == LinalgOperandDefKind::IndexAttr) {
|
||||
assert(arg.indexAttrMap.hasValue());
|
||||
assert(arg.defaultIndices.hasValue());
|
||||
assert(arg.indexAttrMap.has_value());
|
||||
assert(arg.defaultIndices.has_value());
|
||||
size_t size = arg.indexAttrMap->affineMap().getNumResults();
|
||||
assert(arg.defaultIndices.getValue().size() == size);
|
||||
static const char typeFmt[] = "RankedI64ElementsAttr<[{0}]>";
|
||||
@ -1101,7 +1101,7 @@ if ({1}Iter != attrs.end()) {{
|
||||
// Add the optional type parameter to the operands.
|
||||
SmallVector<std::string> operandCppValues;
|
||||
if (expression.scalarFn->kind == ScalarFnKind::Type) {
|
||||
assert(expression.scalarFn->typeVar.hasValue());
|
||||
assert(expression.scalarFn->typeVar.has_value());
|
||||
Optional<std::string> typeCppValue =
|
||||
findTypeValue(expression.scalarFn->typeVar.getValue(), args);
|
||||
if (!typeCppValue) {
|
||||
|
@ -66,8 +66,8 @@ static void checkSample(bool hasSample, const IntegerPolyhedron &poly,
|
||||
maybeLexMin = poly.findIntegerLexMin();
|
||||
|
||||
if (!hasSample) {
|
||||
EXPECT_FALSE(maybeSample.hasValue());
|
||||
if (maybeSample.hasValue()) {
|
||||
EXPECT_FALSE(maybeSample.has_value());
|
||||
if (maybeSample.has_value()) {
|
||||
llvm::errs() << "findIntegerSample gave sample: ";
|
||||
dump(*maybeSample);
|
||||
}
|
||||
@ -78,7 +78,7 @@ static void checkSample(bool hasSample, const IntegerPolyhedron &poly,
|
||||
dump(*maybeLexMin);
|
||||
}
|
||||
} else {
|
||||
ASSERT_TRUE(maybeSample.hasValue());
|
||||
ASSERT_TRUE(maybeSample.has_value());
|
||||
EXPECT_TRUE(poly.containsPoint(*maybeSample));
|
||||
|
||||
ASSERT_FALSE(maybeLexMin.isEmpty());
|
||||
|
@ -138,7 +138,7 @@ TEST(PWMAFunction, valueAt) {
|
||||
EXPECT_THAT(*nonNegPWMAF.valueAt({2, 3}), ElementsAre(11, 23));
|
||||
EXPECT_THAT(*nonNegPWMAF.valueAt({-2, 3}), ElementsAre(11, 23));
|
||||
EXPECT_THAT(*nonNegPWMAF.valueAt({2, -3}), ElementsAre(-1, -1));
|
||||
EXPECT_FALSE(nonNegPWMAF.valueAt({-2, -3}).hasValue());
|
||||
EXPECT_FALSE(nonNegPWMAF.valueAt({-2, -3}).has_value());
|
||||
|
||||
PWMAFunction divPWMAF = parsePWMAF(
|
||||
/*numInputs=*/2, /*numOutputs=*/2,
|
||||
@ -149,11 +149,11 @@ TEST(PWMAFunction, valueAt) {
|
||||
});
|
||||
EXPECT_THAT(*divPWMAF.valueAt({4, 3}), ElementsAre(11, 23));
|
||||
EXPECT_THAT(*divPWMAF.valueAt({4, -3}), ElementsAre(-1, -1));
|
||||
EXPECT_FALSE(divPWMAF.valueAt({3, 3}).hasValue());
|
||||
EXPECT_FALSE(divPWMAF.valueAt({3, -3}).hasValue());
|
||||
EXPECT_FALSE(divPWMAF.valueAt({3, 3}).has_value());
|
||||
EXPECT_FALSE(divPWMAF.valueAt({3, -3}).has_value());
|
||||
|
||||
EXPECT_THAT(*divPWMAF.valueAt({-2, 3}), ElementsAre(11, 23));
|
||||
EXPECT_FALSE(divPWMAF.valueAt({-2, -3}).hasValue());
|
||||
EXPECT_FALSE(divPWMAF.valueAt({-2, -3}).has_value());
|
||||
}
|
||||
|
||||
TEST(PWMAFunction, removeIdRangeRegressionTest) {
|
||||
|
@ -844,7 +844,7 @@ void testComputeReprAtPoints(IntegerPolyhedron poly,
|
||||
EXPECT_TRUE(repr.hasOnlyDivLocals());
|
||||
EXPECT_TRUE(repr.getSpace().isCompatible(poly.getSpace()));
|
||||
for (const SmallVector<int64_t, 4> &point : points) {
|
||||
EXPECT_EQ(poly.containsPointNoLocal(point).hasValue(),
|
||||
EXPECT_EQ(poly.containsPointNoLocal(point).has_value(),
|
||||
repr.containsPoint(point));
|
||||
}
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ TEST(SimplexTest, getSamplePointIfIntegral) {
|
||||
},
|
||||
{})
|
||||
.getSamplePointIfIntegral()
|
||||
.hasValue());
|
||||
.has_value());
|
||||
|
||||
auto maybeSample = simplexFromConstraints(2,
|
||||
{// x = y - 2.
|
||||
@ -208,7 +208,7 @@ TEST(SimplexTest, getSamplePointIfIntegral) {
|
||||
{})
|
||||
.getSamplePointIfIntegral();
|
||||
|
||||
EXPECT_TRUE(maybeSample.hasValue());
|
||||
EXPECT_TRUE(maybeSample.has_value());
|
||||
EXPECT_THAT(*maybeSample, testing::ElementsAre(0, 2));
|
||||
|
||||
auto maybeSample2 = simplexFromConstraints(2,
|
||||
@ -220,7 +220,7 @@ TEST(SimplexTest, getSamplePointIfIntegral) {
|
||||
{0, 1, -2} // y = 2.
|
||||
})
|
||||
.getSamplePointIfIntegral();
|
||||
EXPECT_TRUE(maybeSample2.hasValue());
|
||||
EXPECT_TRUE(maybeSample2.has_value());
|
||||
EXPECT_THAT(*maybeSample2, testing::ElementsAre(0, 2));
|
||||
|
||||
EXPECT_FALSE(simplexFromConstraints(1,
|
||||
@ -229,7 +229,7 @@ TEST(SimplexTest, getSamplePointIfIntegral) {
|
||||
{-2, +1}},
|
||||
{})
|
||||
.getSamplePointIfIntegral()
|
||||
.hasValue());
|
||||
.has_value());
|
||||
}
|
||||
|
||||
/// Some basic sanity checks involving zero or one variables.
|
||||
@ -545,7 +545,7 @@ TEST(SimplexTest, addDivisionVariable) {
|
||||
simplex.addInequality({1, 0, -3}); // x >= 3.
|
||||
simplex.addInequality({-1, 0, 9}); // x <= 9.
|
||||
Optional<SmallVector<int64_t, 8>> sample = simplex.findIntegerSample();
|
||||
ASSERT_TRUE(sample.hasValue());
|
||||
ASSERT_TRUE(sample.has_value());
|
||||
EXPECT_EQ((*sample)[0] / 2, (*sample)[1]);
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,7 @@ TEST(NamedAttrListTest, TestAppendAssign) {
|
||||
attrs.append("foo", b.getStringAttr("zoo"));
|
||||
{
|
||||
auto dup = attrs.findDuplicate();
|
||||
ASSERT_TRUE(dup.hasValue());
|
||||
ASSERT_TRUE(dup.has_value());
|
||||
}
|
||||
|
||||
SmallVector<NamedAttribute> newAttrs = {
|
||||
@ -258,7 +258,7 @@ TEST(NamedAttrListTest, TestAppendAssign) {
|
||||
attrs.assign(newAttrs);
|
||||
|
||||
auto dup = attrs.findDuplicate();
|
||||
ASSERT_FALSE(dup.hasValue());
|
||||
ASSERT_FALSE(dup.has_value());
|
||||
|
||||
{
|
||||
auto *it = attrs.begin();
|
||||
|
@ -79,7 +79,7 @@ struct DoubleLoopRegionsOp
|
||||
void getSuccessorRegions(Optional<unsigned> index,
|
||||
ArrayRef<Attribute> operands,
|
||||
SmallVectorImpl<RegionSuccessor> ®ions) {
|
||||
if (index.hasValue()) {
|
||||
if (index.has_value()) {
|
||||
regions.push_back(RegionSuccessor());
|
||||
regions.push_back(RegionSuccessor(&getOperation()->getRegion(*index)));
|
||||
}
|
||||
|
@ -52,8 +52,8 @@ TEST(AnalysisManagerTest, FineGrainModuleAnalysisPreservation) {
|
||||
am.invalidate(pa);
|
||||
|
||||
// Check that only MyAnalysis is preserved.
|
||||
EXPECT_TRUE(am.getCachedAnalysis<MyAnalysis>().hasValue());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<OtherAnalysis>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<MyAnalysis>().has_value());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<OtherAnalysis>().has_value());
|
||||
}
|
||||
|
||||
TEST(AnalysisManagerTest, FineGrainFunctionAnalysisPreservation) {
|
||||
@ -83,8 +83,8 @@ TEST(AnalysisManagerTest, FineGrainFunctionAnalysisPreservation) {
|
||||
fam.invalidate(pa);
|
||||
|
||||
// Check that only MyAnalysis is preserved.
|
||||
EXPECT_TRUE(fam.getCachedAnalysis<MyAnalysis>().hasValue());
|
||||
EXPECT_FALSE(fam.getCachedAnalysis<OtherAnalysis>().hasValue());
|
||||
EXPECT_TRUE(fam.getCachedAnalysis<MyAnalysis>().has_value());
|
||||
EXPECT_FALSE(fam.getCachedAnalysis<OtherAnalysis>().has_value());
|
||||
}
|
||||
|
||||
TEST(AnalysisManagerTest, FineGrainChildFunctionAnalysisPreservation) {
|
||||
@ -106,7 +106,7 @@ TEST(AnalysisManagerTest, FineGrainChildFunctionAnalysisPreservation) {
|
||||
AnalysisManager am = mam;
|
||||
|
||||
// Check that the analysis cache is initially empty.
|
||||
EXPECT_FALSE(am.getCachedChildAnalysis<MyAnalysis>(func1).hasValue());
|
||||
EXPECT_FALSE(am.getCachedChildAnalysis<MyAnalysis>(func1).has_value());
|
||||
|
||||
// Query two different analyses, but only preserve one before invalidating.
|
||||
am.getChildAnalysis<MyAnalysis>(func1);
|
||||
@ -117,8 +117,8 @@ TEST(AnalysisManagerTest, FineGrainChildFunctionAnalysisPreservation) {
|
||||
am.invalidate(pa);
|
||||
|
||||
// Check that only MyAnalysis is preserved.
|
||||
EXPECT_TRUE(am.getCachedChildAnalysis<MyAnalysis>(func1).hasValue());
|
||||
EXPECT_FALSE(am.getCachedChildAnalysis<OtherAnalysis>(func1).hasValue());
|
||||
EXPECT_TRUE(am.getCachedChildAnalysis<MyAnalysis>(func1).has_value());
|
||||
EXPECT_FALSE(am.getCachedChildAnalysis<OtherAnalysis>(func1).has_value());
|
||||
}
|
||||
|
||||
/// Test analyses with custom invalidation logic.
|
||||
@ -150,13 +150,13 @@ TEST(AnalysisManagerTest, CustomInvalidation) {
|
||||
// Check that the analysis is invalidated properly.
|
||||
am.getAnalysis<CustomInvalidatingAnalysis>();
|
||||
am.invalidate(pa);
|
||||
EXPECT_FALSE(am.getCachedAnalysis<CustomInvalidatingAnalysis>().hasValue());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<CustomInvalidatingAnalysis>().has_value());
|
||||
|
||||
// Check that the analysis is preserved properly.
|
||||
am.getAnalysis<CustomInvalidatingAnalysis>();
|
||||
pa.preserve<TestAnalysisSet>();
|
||||
am.invalidate(pa);
|
||||
EXPECT_TRUE(am.getCachedAnalysis<CustomInvalidatingAnalysis>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<CustomInvalidatingAnalysis>().has_value());
|
||||
}
|
||||
|
||||
TEST(AnalysisManagerTest, OpSpecificAnalysis) {
|
||||
@ -169,7 +169,7 @@ TEST(AnalysisManagerTest, OpSpecificAnalysis) {
|
||||
|
||||
// Query the op specific analysis for the module and verify that its cached.
|
||||
am.getAnalysis<OpSpecificAnalysis, ModuleOp>();
|
||||
EXPECT_TRUE(am.getCachedAnalysis<OpSpecificAnalysis>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<OpSpecificAnalysis>().has_value());
|
||||
}
|
||||
|
||||
struct AnalysisWithDependency {
|
||||
@ -194,15 +194,15 @@ TEST(AnalysisManagerTest, DependentAnalysis) {
|
||||
AnalysisManager am = mam;
|
||||
|
||||
am.getAnalysis<AnalysisWithDependency>();
|
||||
EXPECT_TRUE(am.getCachedAnalysis<AnalysisWithDependency>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<MyAnalysis>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<AnalysisWithDependency>().has_value());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<MyAnalysis>().has_value());
|
||||
|
||||
detail::PreservedAnalyses pa;
|
||||
pa.preserve<AnalysisWithDependency>();
|
||||
am.invalidate(pa);
|
||||
|
||||
EXPECT_FALSE(am.getCachedAnalysis<AnalysisWithDependency>().hasValue());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<MyAnalysis>().hasValue());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<AnalysisWithDependency>().has_value());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<MyAnalysis>().has_value());
|
||||
}
|
||||
|
||||
struct AnalysisWithNestedDependency {
|
||||
@ -227,18 +227,19 @@ TEST(AnalysisManagerTest, NestedDependentAnalysis) {
|
||||
AnalysisManager am = mam;
|
||||
|
||||
am.getAnalysis<AnalysisWithNestedDependency>();
|
||||
EXPECT_TRUE(am.getCachedAnalysis<AnalysisWithNestedDependency>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<AnalysisWithDependency>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<MyAnalysis>().hasValue());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<AnalysisWithNestedDependency>().has_value());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<AnalysisWithDependency>().has_value());
|
||||
EXPECT_TRUE(am.getCachedAnalysis<MyAnalysis>().has_value());
|
||||
|
||||
detail::PreservedAnalyses pa;
|
||||
pa.preserve<AnalysisWithDependency>();
|
||||
pa.preserve<AnalysisWithNestedDependency>();
|
||||
am.invalidate(pa);
|
||||
|
||||
EXPECT_FALSE(am.getCachedAnalysis<AnalysisWithNestedDependency>().hasValue());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<AnalysisWithDependency>().hasValue());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<MyAnalysis>().hasValue());
|
||||
EXPECT_FALSE(
|
||||
am.getCachedAnalysis<AnalysisWithNestedDependency>().has_value());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<AnalysisWithDependency>().has_value());
|
||||
EXPECT_FALSE(am.getCachedAnalysis<MyAnalysis>().has_value());
|
||||
}
|
||||
|
||||
struct AnalysisWith2Ctors {
|
||||
|
Loading…
x
Reference in New Issue
Block a user