[mlir][NFC] update mlir create APIs (34/n) (#150660)

See https://github.com/llvm/llvm-project/pull/147168 for more info.
This commit is contained in:
Maksim Levental 2025-07-25 12:36:54 -05:00 committed by GitHub
parent b46527645d
commit 258daf5395
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 214 additions and 254 deletions

View File

@ -402,8 +402,8 @@ public:
Value actualOp = adaptValueType(adaptor.getIn(), rewriter, castSrcType);
// Actual cast (may change bitwidth)
auto cast = rewriter.template create<emitc::CastOp>(op.getLoc(),
castDestType, actualOp);
auto cast =
emitc::CastOp::create(rewriter, op.getLoc(), castDestType, actualOp);
// Cast to the expected output type
auto result = adaptValueType(cast, rewriter, opReturnType);
@ -507,8 +507,8 @@ public:
Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
Value arithmeticResult = rewriter.template create<EmitCOp>(
op.getLoc(), arithmeticType, lhs, rhs);
Value arithmeticResult =
EmitCOp::create(rewriter, op.getLoc(), arithmeticType, lhs, rhs);
Value result = adaptValueType(arithmeticResult, rewriter, type);
@ -547,8 +547,8 @@ public:
Value lhs = adaptValueType(adaptor.getLhs(), rewriter, arithmeticType);
Value rhs = adaptValueType(adaptor.getRhs(), rewriter, arithmeticType);
Value arithmeticResult = rewriter.template create<EmitCOp>(
op.getLoc(), arithmeticType, lhs, rhs);
Value arithmeticResult =
EmitCOp::create(rewriter, op.getLoc(), arithmeticType, lhs, rhs);
Value result = adaptValueType(arithmeticResult, rewriter, type);
@ -748,8 +748,8 @@ public:
}
Value fpCastOperand = adaptor.getIn();
if (actualOperandType != operandType) {
fpCastOperand = rewriter.template create<emitc::CastOp>(
castOp.getLoc(), actualOperandType, fpCastOperand);
fpCastOperand = emitc::CastOp::create(rewriter, castOp.getLoc(),
actualOperandType, fpCastOperand);
}
rewriter.replaceOpWithNewOp<emitc::CastOp>(castOp, dstType, fpCastOperand);

View File

@ -68,9 +68,8 @@ struct CloneOpConversion : public OpConversionPattern<bufferization::CloneOp> {
scf::YieldOp::create(rewriter, loc, acc);
};
auto size = rewriter
.create<scf::ForOp>(loc, zero, rank, one, ValueRange(one),
loopBody)
auto size = scf::ForOp::create(rewriter, loc, zero, rank, one,
ValueRange(one), loopBody)
.getResult(0);
MemRefType memrefType = MemRefType::get({ShapedType::kDynamic},

View File

@ -144,12 +144,11 @@ ControlFlowToSCFTransformation::createUnreachableTerminator(Location loc,
return emitError(loc, "Cannot create unreachable terminator for '")
<< parentOp->getName() << "'";
return builder
.create<func::ReturnOp>(
loc, llvm::map_to_vector(funcOp.getResultTypes(),
[&](Type type) {
return getUndefValue(loc, builder, type);
}))
return func::ReturnOp::create(
builder, loc,
llvm::map_to_vector(
funcOp.getResultTypes(),
[&](Type type) { return getUndefValue(loc, builder, type); }))
.getOperation();
}

View File

@ -559,8 +559,8 @@ static Value createGroupReduceOpImpl(OpBuilder &builder, Location loc,
builder, loc, builder.getI32Type(),
builder.getIntegerAttr(builder.getI32Type(), *clusterSize));
return builder
.create<NonUniformOp>(loc, type, scope, groupOp, arg, clusterSizeValue)
return NonUniformOp::create(builder, loc, type, scope, groupOp, arg,
clusterSizeValue)
.getResult();
}

View File

@ -272,9 +272,8 @@ LogicalResult ConvertToLLVMPattern::copyUnrankedDescriptors(
// Allocate memory, copy, and free the source if necessary.
Value memory =
toDynamic
? builder
.create<LLVM::CallOp>(loc, mallocFunc.value(), allocationSize)
toDynamic ? LLVM::CallOp::create(builder, loc, mallocFunc.value(),
allocationSize)
.getResult()
: LLVM::AllocaOp::create(builder, loc, getPtrType(),
IntegerType::get(getContext(), 8),

View File

@ -35,7 +35,7 @@ static Op getOrDefineGlobal(ModuleOp &moduleOp, const Location loc,
if (!(ret = moduleOp.lookupSymbol<Op>(name))) {
ConversionPatternRewriter::InsertionGuard guard(rewriter);
rewriter.setInsertionPointToStart(moduleOp.getBody());
ret = rewriter.template create<Op>(loc, std::forward<Args>(args)...);
ret = Op::create(rewriter, loc, std::forward<Args>(args)...);
}
return ret;
}

View File

@ -575,8 +575,8 @@ private:
Value sizePtr = LLVM::GEPOp::create(rewriter, loc, indexPtrTy,
getTypeConverter()->getIndexType(),
offsetPtr, idxPlusOne);
return rewriter
.create<LLVM::LoadOp>(loc, getTypeConverter()->getIndexType(), sizePtr)
return LLVM::LoadOp::create(rewriter, loc,
getTypeConverter()->getIndexType(), sizePtr)
.getResult();
}

View File

@ -1493,11 +1493,11 @@ public:
Value extended;
if (op2TypeWidth < dstTypeWidth) {
if (isUnsignedIntegerOrVector(op2Type)) {
extended = rewriter.template create<LLVM::ZExtOp>(
loc, dstType, adaptor.getOperand2());
extended =
LLVM::ZExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
} else {
extended = rewriter.template create<LLVM::SExtOp>(
loc, dstType, adaptor.getOperand2());
extended =
LLVM::SExtOp::create(rewriter, loc, dstType, adaptor.getOperand2());
}
} else if (op2TypeWidth == dstTypeWidth) {
extended = adaptor.getOperand2();
@ -1505,8 +1505,8 @@ public:
return failure();
}
Value result = rewriter.template create<LLVMOp>(
loc, dstType, adaptor.getOperand1(), extended);
Value result =
LLVMOp::create(rewriter, loc, dstType, adaptor.getOperand1(), extended);
rewriter.replaceOp(op, result);
return success();
}

View File

@ -177,9 +177,8 @@ struct ConvertShardingOp : public OpConversionPattern<ShardingOp> {
auto type = RankedTensorType::get({nSplits, 2}, i64);
Value resHaloSizes =
haloSizes.empty()
? rewriter
.create<tensor::EmptyOp>(loc, std::array<int64_t, 2>{0, 0},
i64)
? tensor::EmptyOp::create(rewriter, loc,
std::array<int64_t, 2>{0, 0}, i64)
.getResult()
: tensor::FromElementsOp::create(rewriter, loc, type, haloSizes)
.getResult();
@ -306,10 +305,8 @@ public:
auto ctx = op.getContext();
Value commWorld =
mpi::CommWorldOp::create(rewriter, loc, mpi::CommType::get(ctx));
auto rank =
rewriter
.create<mpi::CommRankOp>(
loc,
auto rank = mpi::CommRankOp::create(
rewriter, loc,
TypeRange{mpi::RetvalType::get(ctx), rewriter.getI32Type()},
commWorld)
.getRank();
@ -703,9 +700,8 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
// subviews need Index values
for (auto &sz : haloSizes) {
if (auto value = dyn_cast<Value>(sz))
sz =
rewriter
.create<arith::IndexCastOp>(loc, rewriter.getIndexType(), value)
sz = arith::IndexCastOp::create(rewriter, loc, rewriter.getIndexType(),
value)
.getResult();
}
@ -758,9 +754,8 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
assert(currHaloDim >= 0 && (size_t)currHaloDim < haloSizes.size() / 2);
// Get the linearized ids of the neighbors (down and up) for the
// given split
auto tmp = rewriter
.create<NeighborsLinearIndicesOp>(loc, grid, myMultiIndex,
splitAxes)
auto tmp = NeighborsLinearIndicesOp::create(rewriter, loc, grid,
myMultiIndex, splitAxes)
.getResults();
// MPI operates on i32...
Value neighbourIDs[2] = {

View File

@ -569,10 +569,9 @@ static Value createLinalgBodyCalculationForElementwiseOp(
// to UIToFP.
if (srcTy.isUnsignedInteger() && isa<FloatType>(dstTy)) {
auto unrealizedCast =
rewriter
.create<UnrealizedConversionCastOp>(
loc, rewriter.getIntegerType(srcTy.getIntOrFloatBitWidth()),
args[0])
UnrealizedConversionCastOp::create(
rewriter, loc,
rewriter.getIntegerType(srcTy.getIntOrFloatBitWidth()), args[0])
.getResult(0);
return arith::UIToFPOp::create(rewriter, loc, resultTypes[0],
unrealizedCast);
@ -868,10 +867,9 @@ static Value broadcastDynamicDimension(PatternRewriter &rewriter, Location loc,
// Emit 'linalg.generic' op
auto resultTensor =
opBuilder
.create<linalg::GenericOp>(
loc, outputTensor.getType(), operand, outputTensor, affineMaps,
getNParallelLoopsAttrs(rank),
linalg::GenericOp::create(
opBuilder, loc, outputTensor.getType(), operand, outputTensor,
affineMaps, getNParallelLoopsAttrs(rank),
[&](OpBuilder &opBuilder, Location loc, ValueRange blockArgs) {
// Emit 'linalg.yield' op
linalg::YieldOp::create(opBuilder, loc, blockArgs.front());
@ -1155,10 +1153,8 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
inputs.push_back(input);
// First fill the output buffer with the init value.
auto emptyTensor =
rewriter
.create<tensor::EmptyOp>(loc, reduceShape, resultTy.getElementType(),
dynDims)
auto emptyTensor = tensor::EmptyOp::create(rewriter, loc, reduceShape,
resultTy.getElementType(), dynDims)
.getResult();
auto fillValueAttr = createInitialValueForReduceOp(op, elementTy, rewriter);
@ -1167,8 +1163,8 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
op, "No initial value found for reduction operation");
auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
auto filledTensor = rewriter
.create<linalg::FillOp>(loc, ValueRange{fillValue},
auto filledTensor =
linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
ValueRange{emptyTensor})
.result();
outputs.push_back(filledTensor);
@ -1186,13 +1182,11 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
auto trueAttr = rewriter.getBoolAttr(true);
auto trueValue = arith::ConstantOp::create(rewriter, loc, trueAttr);
auto emptyBoolTensor =
rewriter
.create<tensor::EmptyOp>(loc, reduceShape, trueValue.getType(),
dynDims)
tensor::EmptyOp::create(rewriter, loc, reduceShape,
trueValue.getType(), dynDims)
.getResult();
auto allResultsNaNTensor =
rewriter
.create<linalg::FillOp>(loc, ValueRange{trueValue},
linalg::FillOp::create(rewriter, loc, ValueRange{trueValue},
ValueRange{emptyBoolTensor})
.result();
// Note that because the linalg::ReduceOp has two variadic arguments
@ -1261,21 +1255,18 @@ static LogicalResult reduceMatchAndRewriteHelper(OpTy op, uint64_t axis,
APFloat::getNaN(cast<FloatType>(elementTy).getFloatSemantics(), false));
auto nanValue = arith::ConstantOp::create(rewriter, loc, nanValueAttr);
auto emptyNanTensor =
rewriter
.create<tensor::EmptyOp>(loc, reduceShape,
tensor::EmptyOp::create(rewriter, loc, reduceShape,
resultTy.getElementType(), dynDims)
.getResult();
auto nanFilledTensor =
rewriter
.create<linalg::FillOp>(loc, ValueRange{nanValue},
linalg::FillOp::create(rewriter, loc, ValueRange{nanValue},
ValueRange{emptyNanTensor})
.result();
// Create an empty tensor, non need to fill this since it will be
// overwritten by the select.
auto finalEmptyTensor =
rewriter
.create<tensor::EmptyOp>(loc, reduceShape,
tensor::EmptyOp::create(rewriter, loc, reduceShape,
resultTy.getElementType(), dynDims)
.getResult();
@ -1503,9 +1494,8 @@ public:
Value shift = shiftConstant ? shiftConstant : blockArgs[shiftArg];
if (valueTy.isUnsignedInteger()) {
value = nestedBuilder
.create<UnrealizedConversionCastOp>(
nestedLoc,
value = UnrealizedConversionCastOp::create(
nestedBuilder, nestedLoc,
nestedBuilder.getIntegerType(
valueTy.getIntOrFloatBitWidth()),
value)
@ -1557,8 +1547,7 @@ public:
}
if (outIntType.isUnsignedInteger()) {
value = nestedBuilder
.create<UnrealizedConversionCastOp>(nestedLoc,
value = UnrealizedConversionCastOp::create(nestedBuilder, nestedLoc,
outIntType, value)
.getResult(0);
}
@ -2095,10 +2084,9 @@ public:
Value axisDimSize = tensor::DimOp::create(rewriter, loc, input, axis);
// First fill the output buffer with the init value.
auto emptyTensor = rewriter
.create<tensor::EmptyOp>(loc, inputTy.getShape(),
inputTy.getElementType(),
ArrayRef<Value>({dynDims}))
auto emptyTensor = tensor::EmptyOp::create(
rewriter, loc, inputTy.getShape(),
inputTy.getElementType(), ArrayRef<Value>({dynDims}))
.getResult();
SmallVector<AffineMap, 2> affineMaps = {
rewriter.getMultiDimIdentityMap(resultTy.getRank())};
@ -2241,22 +2229,21 @@ public:
}
// First fill the output buffer for the index.
auto emptyTensorIdx = rewriter
.create<tensor::EmptyOp>(loc, resultTy.getShape(),
auto emptyTensorIdx =
tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
outElementTy, dynDims)
.getResult();
auto fillValueIdx = arith::ConstantOp::create(
rewriter, loc, rewriter.getIntegerAttr(outElementTy, 0));
auto filledTensorIdx =
rewriter
.create<linalg::FillOp>(loc, ValueRange{fillValueIdx},
linalg::FillOp::create(rewriter, loc, ValueRange{fillValueIdx},
ValueRange{emptyTensorIdx})
.result();
// Second fill the output buffer for the running max.
auto emptyTensorMax = rewriter
.create<tensor::EmptyOp>(loc, resultTy.getShape(),
inElementTy, dynDims)
auto emptyTensorMax =
tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(), inElementTy,
dynDims)
.getResult();
auto fillValueMaxAttr =
createInitialValueForReduceOp(argmaxOp, inElementTy, rewriter);
@ -2268,8 +2255,7 @@ public:
auto fillValueMax =
arith::ConstantOp::create(rewriter, loc, fillValueMaxAttr);
auto filledTensorMax =
rewriter
.create<linalg::FillOp>(loc, ValueRange{fillValueMax},
linalg::FillOp::create(rewriter, loc, ValueRange{fillValueMax},
ValueRange{emptyTensorMax})
.result();
@ -2371,9 +2357,8 @@ public:
auto loc = op.getLoc();
auto emptyTensor =
rewriter
.create<tensor::EmptyOp>(loc, resultTy.getShape(), resultElementTy,
dynamicDims)
tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
resultElementTy, dynamicDims)
.getResult();
SmallVector<AffineMap, 2> affineMaps = {
@ -2448,8 +2433,8 @@ public:
}
}
auto emptyTensor = rewriter
.create<tensor::EmptyOp>(loc, resultTy.getShape(),
auto emptyTensor =
tensor::EmptyOp::create(rewriter, loc, resultTy.getShape(),
resultElementTy, dynDims)
.getResult();
@ -2585,8 +2570,8 @@ struct RFFT2dConverter final : public OpRewritePattern<RFFT2dOp> {
tensor::EmptyOp::create(rewriter, loc, type, dynamicSizes);
auto fillValueAttr = rewriter.getZeroAttr(type.getElementType());
auto fillValue = arith::ConstantOp::create(rewriter, loc, fillValueAttr);
auto filledTensor = rewriter
.create<linalg::FillOp>(loc, ValueRange{fillValue},
auto filledTensor =
linalg::FillOp::create(rewriter, loc, ValueRange{fillValue},
ValueRange{emptyTensor})
.result();
return filledTensor;

View File

@ -64,17 +64,18 @@ linalgIntBroadcastExtSIAdd(PatternRewriter &rewriter, Location loc, Value bias,
Value conv, Value result,
ArrayRef<AffineMap> indexingMaps) {
ShapedType resultTy = cast<ShapedType>(conv.getType());
return rewriter
.create<linalg::GenericOp>(
loc, resultTy, ValueRange({bias, conv}), result, indexingMaps,
getNParallelLoopsAttrs(resultTy.getRank()),
return linalg::GenericOp::create(
rewriter, loc, resultTy, ValueRange({bias, conv}), result,
indexingMaps, getNParallelLoopsAttrs(resultTy.getRank()),
[](OpBuilder &builder, Location loc, ValueRange args) {
Value biasVal = args[0];
Type resType = args[1].getType();
if (resType != biasVal.getType()) {
biasVal = arith::ExtSIOp::create(builder, loc, resType, biasVal);
biasVal =
arith::ExtSIOp::create(builder, loc, resType, biasVal);
}
Value added = arith::AddIOp::create(builder, loc, biasVal, args[1]);
Value added =
arith::AddIOp::create(builder, loc, biasVal, args[1]);
linalg::YieldOp::create(builder, loc, added);
})
.getResult(0);
@ -124,10 +125,9 @@ static mlir::Value linalgBroadcastAndMaybeExt(PatternRewriter &rewriter,
indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
// Build the broadcast-like operation as a linalg.generic.
return rewriter
.create<linalg::GenericOp>(
loc, resultTy, ValueRange({source}), result, indexingMaps,
getNParallelLoopsAttrs(resultTy.getRank()),
return linalg::GenericOp::create(
rewriter, loc, resultTy, ValueRange({source}), result,
indexingMaps, getNParallelLoopsAttrs(resultTy.getRank()),
[&resultTy](OpBuilder &builder, Location loc, ValueRange args) {
Value biasVal = args[0];
Type resType = args[1].getType();
@ -136,7 +136,8 @@ static mlir::Value linalgBroadcastAndMaybeExt(PatternRewriter &rewriter,
resultTy.getElementType().isFloat()
? arith::ExtFOp::create(builder, loc, resType, biasVal)
.getResult()
: arith::ExtSIOp::create(builder, loc, resType, biasVal)
: arith::ExtSIOp::create(builder, loc, resType,
biasVal)
.getResult();
}
linalg::YieldOp::create(builder, loc, biasVal);
@ -397,10 +398,9 @@ public:
auto iZpVal = arith::ConstantOp::create(rewriter, loc, iZp);
auto kZpVal = arith::ConstantOp::create(rewriter, loc, kZp);
Value conv =
rewriter
.create<LinalgConvQOp>(
loc, resultTy, ValueRange{input, weight, iZpVal, kZpVal},
Value conv = LinalgConvQOp::create(
rewriter, loc, resultTy,
ValueRange{input, weight, iZpVal, kZpVal},
ValueRange{broadcastBias}, strideAttr, dilationAttr)
->getResult(0);
@ -408,9 +408,8 @@ public:
return success();
}
Value conv = rewriter
.create<LinalgConvOp>(
loc, accTy, ValueRange{input, weight},
Value conv = LinalgConvOp::create(
rewriter, loc, accTy, ValueRange{input, weight},
ValueRange{broadcastBias}, strideAttr, dilationAttr)
->getResult(0);
@ -529,8 +528,7 @@ public:
Value emptyTensor = tensor::EmptyOp::create(
rewriter, loc, linalgConvTy.getShape(), accETy, filteredDims);
Value zero = arith::ConstantOp::create(rewriter, loc, resultZeroAttr);
Value zeroTensor = rewriter
.create<linalg::FillOp>(loc, ValueRange{zero},
Value zeroTensor = linalg::FillOp::create(rewriter, loc, ValueRange{zero},
ValueRange{emptyTensor})
.result();
@ -544,9 +542,8 @@ public:
indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
if (hasNullZps) {
Value conv = rewriter
.create<linalg::DepthwiseConv2DNhwcHwcmOp>(
loc, linalgConvTy, ValueRange{input, weight},
Value conv = linalg::DepthwiseConv2DNhwcHwcmOp::create(
rewriter, loc, linalgConvTy, ValueRange{input, weight},
ValueRange{zeroTensor}, strideAttr, dilationAttr)
.getResult(0);
@ -565,11 +562,9 @@ public:
rewriter, loc, resultTy, conv, reassociationMap);
Value result =
rewriter
.create<linalg::GenericOp>(
loc, resultTy, ValueRange({bias, convReshape}),
biasEmptyTensor, indexingMaps,
getNParallelLoopsAttrs(resultRank),
linalg::GenericOp::create(
rewriter, loc, resultTy, ValueRange({bias, convReshape}),
biasEmptyTensor, indexingMaps, getNParallelLoopsAttrs(resultRank),
[&](OpBuilder &nestedBuilder, Location nestedLoc,
ValueRange args) {
Value added;
@ -588,10 +583,9 @@ public:
IntegerAttr wZp = rewriter.getI32IntegerAttr(weightZpVal);
auto iZpVal = arith::ConstantOp::create(rewriter, loc, iZp);
auto kZpVal = arith::ConstantOp::create(rewriter, loc, wZp);
Value conv =
rewriter
.create<linalg::DepthwiseConv2DNhwcHwcmQOp>(
loc, linalgConvTy, ValueRange{input, weight, iZpVal, kZpVal},
Value conv = linalg::DepthwiseConv2DNhwcHwcmQOp::create(
rewriter, loc, linalgConvTy,
ValueRange{input, weight, iZpVal, kZpVal},
ValueRange{zeroTensor}, strideAttr, dilationAttr)
.getResult(0);
SmallVector<ReassociationExprs, 4> reassociationMap;
@ -639,8 +633,7 @@ public:
auto emptyTensor =
tensor::EmptyOp::create(rewriter, loc, outputTy.getShape(),
outputTy.getElementType(), filteredDims);
Value zeroTensor = rewriter
.create<linalg::FillOp>(loc, ValueRange{zero},
Value zeroTensor = linalg::FillOp::create(rewriter, loc, ValueRange{zero},
ValueRange{emptyTensor})
.result();
@ -910,8 +903,7 @@ public:
rewriter, loc, accTy.getShape(), accETy, dynamicDims);
Value filledEmptyTensor =
rewriter
.create<linalg::FillOp>(loc, ValueRange{initialValue},
linalg::FillOp::create(rewriter, loc, ValueRange{initialValue},
ValueRange{poolEmptyTensor})
.result();
@ -919,9 +911,8 @@ public:
tensor::EmptyOp::create(rewriter, loc, kernel, accETy);
// Sum across the pooled region.
Value poolingOp = rewriter
.create<linalg::PoolingNhwcSumOp>(
loc, ArrayRef<Type>{accTy},
Value poolingOp = linalg::PoolingNhwcSumOp::create(
rewriter, loc, ArrayRef<Type>{accTy},
ValueRange{paddedInput, fakeWindowDims},
filledEmptyTensor, strideAttr, dilationAttr)
.getResult(0);
@ -1050,10 +1041,9 @@ public:
Value shift = arith::AddIOp::create(rewriter, loc, k8, thirty8);
auto scaled =
rewriter
.create<tosa::ApplyScaleOp>(
loc, rewriter.getI32Type(), poolVal, multiplier, shift,
rewriter.getStringAttr("SINGLE_ROUND"))
tosa::ApplyScaleOp::create(
rewriter, loc, rewriter.getI32Type(), poolVal, multiplier,
shift, rewriter.getStringAttr("SINGLE_ROUND"))
.getResult();
// If we have quantization information we need to apply output

View File

@ -482,10 +482,8 @@ struct CombineTransferReadOpTranspose final
permutationMap.compose(transferReadOp.getPermutationMap());
auto loc = op.getLoc();
Value result =
rewriter
.create<vector::TransferReadOp>(
loc, resultType, transferReadOp.getBase(),
Value result = vector::TransferReadOp::create(
rewriter, loc, resultType, transferReadOp.getBase(),
transferReadOp.getIndices(), AffineMapAttr::get(newMap),
transferReadOp.getPadding(), transferReadOp.getMask(),
transferReadOp.getInBoundsAttr())

View File

@ -142,6 +142,7 @@ static LogicalResult convertInstructionImpl(OpBuilder &odsBuilder,
// TODO: Implement the `convertInstruction` hooks in the
// `LLVMDialectLLVMIRImportInterface` and move the following include there.
#include "mlir/Dialect/LLVMIR/LLVMOpFromLLVMIRConversions.inc"
return failure();
}
@ -1626,9 +1627,8 @@ FailureOr<Value> ModuleImport::convertConstant(llvm::Constant *constant) {
// Convert dso_local_equivalent.
if (auto *dsoLocalEquivalent = dyn_cast<llvm::DSOLocalEquivalent>(constant)) {
Type type = convertType(dsoLocalEquivalent->getType());
return builder
.create<DSOLocalEquivalentOp>(
loc, type,
return DSOLocalEquivalentOp::create(
builder, loc, type,
FlatSymbolRefAttr::get(
builder.getContext(),
dsoLocalEquivalent->getGlobalValue()->getName()))
@ -1736,8 +1736,8 @@ FailureOr<Value> ModuleImport::convertConstant(llvm::Constant *constant) {
FlatSymbolRefAttr::get(context, blockAddr->getFunction()->getName());
auto blockTag =
BlockTagAttr::get(context, blockAddr->getBasicBlock()->getNumber());
return builder
.create<BlockAddressOp>(loc, convertType(blockAddr->getType()),
return BlockAddressOp::create(
builder, loc, convertType(blockAddr->getType()),
BlockAddressAttr::get(context, fnSym, blockTag))
.getRes();
}
@ -2228,9 +2228,8 @@ LogicalResult ModuleImport::convertInstruction(llvm::Instruction *inst) {
if (!resultTy)
return failure();
ArrayAttr operandAttrs = convertAsmInlineOperandAttrs(*callInst);
return builder
.create<InlineAsmOp>(
loc, resultTy, *operands,
return InlineAsmOp::create(
builder, loc, resultTy, *operands,
builder.getStringAttr(asmI->getAsmString()),
builder.getStringAttr(asmI->getConstraintString()),
asmI->hasSideEffects(), asmI->isAlignStack(),

View File

@ -72,15 +72,14 @@ struct TestReshardingRewritePattern : OpRewritePattern<ShardOp> {
ShapedType sourceShardShape =
shardShapedType(op.getResult().getType(), grid, op.getSharding());
TypedValue<ShapedType> sourceShard = cast<TypedValue<ShapedType>>(
builder
.create<UnrealizedConversionCastOp>(sourceShardShape, op.getSrc())
UnrealizedConversionCastOp::create(builder, sourceShardShape,
op.getSrc())
->getResult(0));
TypedValue<ShapedType> targetShard =
reshard(builder, grid, op, targetShardOp, sourceShard);
Value newTargetUnsharded =
builder
.create<UnrealizedConversionCastOp>(
targetShardOp.getResult().getType(), targetShard)
UnrealizedConversionCastOp::create(
builder, targetShardOp.getResult().getType(), targetShard)
->getResult(0);
rewriter.replaceAllUsesWith(targetShardOp.getResult(),
newTargetUnsharded);

View File

@ -1007,8 +1007,7 @@ struct TestPassthroughInvalidOp : public ConversionPattern {
// This is a 1:N replacement. Insert a test.cast op. (That's what the
// argument materialization used to do.)
flattened.push_back(
rewriter
.create<TestCastOp>(op->getLoc(),
TestCastOp::create(rewriter, op->getLoc(),
op->getOperand(it.index()).getType(), range)
.getResult());
}

View File

@ -569,8 +569,7 @@ static Value warpReduction(Location loc, OpBuilder &builder, Value input,
Value laneVal = vector::ReductionOp::create(builder, loc, kind, input);
// Parallel reduction using butterfly shuffles.
for (uint64_t i = 1; i < size; i <<= 1) {
Value shuffled = builder
.create<gpu::ShuffleOp>(loc, laneVal, i,
Value shuffled = gpu::ShuffleOp::create(builder, loc, laneVal, i,
/*width=*/size,
/*mode=*/gpu::ShuffleMode::XOR)
.getShuffleResult();
@ -650,9 +649,8 @@ struct TestVectorDistribution
arith::IndexCastOp::create(builder, loc, i32Type, srcIdx);
Value warpSzI32 = arith::ConstantOp::create(
builder, loc, builder.getIntegerAttr(i32Type, warpSz));
Value result = builder
.create<gpu::ShuffleOp>(loc, val, srcIdxI32, warpSzI32,
gpu::ShuffleMode::IDX)
Value result = gpu::ShuffleOp::create(builder, loc, val, srcIdxI32,
warpSzI32, gpu::ShuffleMode::IDX)
.getResult(0);
return result;
};