[MLIR] Fix assert expressions (#112474)
I noticed that several assertions in MLIR codebase have issues with operator precedence The issue with operator precedence in these assertions is due to the way logical operators are evaluated. The `&&` operator has higher precedence than the `||` operator, which means the assertion is currently evaluating incorrectly, like this: ``` assert((resType.getNumDynamicDims() == dynOutDims.size()) || (dynOutDims.empty() && "Either none or all output dynamic dims must be specified!")); ``` We should add parentheses around the entire expression involving `dynOutDims.empty()` to ensure that the logical conditions are grouped correctly. Here’s the corrected version: ``` assert(((resType.getNumDynamicDims() == dynOutDims.size()) || dynOutDims.empty()) && "Either none or all output dynamic dims must be specified!"); ```
This commit is contained in:
parent
0a53f43c0c
commit
a24c468782
@ -892,8 +892,8 @@ FlatLinearValueConstraints::FlatLinearValueConstraints(IntegerSet set,
|
||||
set.getNumDims() + set.getNumSymbols() + 1,
|
||||
set.getNumDims(), set.getNumSymbols(),
|
||||
/*numLocals=*/0) {
|
||||
assert(operands.empty() ||
|
||||
set.getNumInputs() == operands.size() && "operand count mismatch");
|
||||
assert((operands.empty() || set.getNumInputs() == operands.size()) &&
|
||||
"operand count mismatch");
|
||||
// Set the values for the non-local variables.
|
||||
for (unsigned i = 0, e = operands.size(); i < e; ++i)
|
||||
setValue(i, operands[i]);
|
||||
|
@ -840,11 +840,11 @@ enum VectorMemoryAccessKind { ScalarBroadcast, Contiguous, Gather };
|
||||
/// TODO: Statically shaped loops + vector masking
|
||||
static uint64_t getTrailingNonUnitLoopDimIdx(LinalgOp linalgOp) {
|
||||
SmallVector<int64_t> loopRanges = linalgOp.getStaticLoopRanges();
|
||||
assert(linalgOp.hasDynamicShape() ||
|
||||
llvm::count_if(loopRanges, [](int64_t dim) { return dim != 1; }) ==
|
||||
1 &&
|
||||
"For statically shaped Linalg Ops, only one "
|
||||
"non-unit loop dim is expected");
|
||||
assert(
|
||||
(linalgOp.hasDynamicShape() ||
|
||||
llvm::count_if(loopRanges, [](int64_t dim) { return dim != 1; }) == 1) &&
|
||||
"For statically shaped Linalg Ops, only one "
|
||||
"non-unit loop dim is expected");
|
||||
|
||||
size_t idx = loopRanges.size() - 1;
|
||||
for (; idx >= 0; idx--)
|
||||
|
@ -27,9 +27,9 @@ PadOp mlir::tensor::createPadHighOp(RankedTensorType resType, Value source,
|
||||
OpBuilder &b,
|
||||
SmallVector<Value> dynOutDims) {
|
||||
|
||||
assert((resType.getNumDynamicDims() == dynOutDims.size()) ||
|
||||
dynOutDims.empty() &&
|
||||
"Either none or all output dynamic dims must be specified!");
|
||||
assert(((resType.getNumDynamicDims() == dynOutDims.size()) ||
|
||||
dynOutDims.empty()) &&
|
||||
"Either none or all output dynamic dims must be specified!");
|
||||
|
||||
// Init "low" and "high" padding values ("low" is kept as is, "high" is
|
||||
// computed below).
|
||||
|
Loading…
x
Reference in New Issue
Block a user