llvm-project/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
River Riddle ace01605e0 [mlir] Split out a new ControlFlow dialect from Standard
This dialect is intended to model lower level/branch based control-flow constructs. The initial set
of operations are: AssertOp, BranchOp, CondBranchOp, SwitchOp; all split out from the current
standard dialect.

See https://discourse.llvm.org/t/standard-dialect-the-final-chapter/6061

Differential Revision: https://reviews.llvm.org/D118966
2022-02-06 14:51:16 -08:00

62 lines
2.7 KiB
C++

//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
#include "mlir/Conversion/Passes.h"
#include "mlir/Dialect/Arithmetic/Transforms/Passes.h"
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Dialect/Tensor/Transforms/Passes.h"
#include "mlir/Pass/PassManager.h"
using namespace mlir;
using namespace mlir::sparse_tensor;
//===----------------------------------------------------------------------===//
// Pipeline implementation.
//===----------------------------------------------------------------------===//
void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
// TODO(wrengr): ensure the original `pm` is for ModuleOp
pm.addPass(createSparsificationPass(options.sparsificationOptions()));
pm.addPass(createSparseTensorConversionPass());
pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<FuncOp>(createConvertSCFToCFPass());
pm.addPass(createFuncBufferizePass());
pm.addPass(arith::createConstantBufferizePass());
pm.addNestedPass<FuncOp>(createTensorBufferizePass());
pm.addNestedPass<FuncOp>(
mlir::bufferization::createFinalizingBufferizePass());
pm.addPass(createLowerAffinePass());
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createMemRefToLLVMPass());
pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createLowerToLLVMPass()); // --convert-std-to-llvm
pm.addPass(createReconcileUnrealizedCastsPass());
}
//===----------------------------------------------------------------------===//
// Pipeline registration.
//===----------------------------------------------------------------------===//
void mlir::sparse_tensor::registerSparseTensorPipelines() {
PassPipelineRegistration<SparseCompilerOptions>(
"sparse-compiler",
"The standard pipeline for taking sparsity-agnostic IR using the"
" sparse-tensor type, and lowering it to LLVM IR with concrete"
" representations and algorithms for sparse tensors.",
buildSparseCompiler);
}