
The revert happened due to a build bot failure that threw 'CUDA_ERROR_UNSUPPORTED_PTX_VERSION'. The failure's root cause was a pass using "+ptx76" for compilation and an old CUDA driver on the bot. This commit relands the patch with "+ptx60". Original Gh PR: #65768 Original commit message: Migrate tests referencing `gpu-to-cubin` to the new compilation workflow using `TargetAttrs`. The `test-lower-to-nvvm` pass pipeline was modified to use the new compilation workflow to simplify the introduction of future tests. The `createLowerGpuOpsToNVVMOpsPass` function was removed, as it didn't allow for passing all options available in the `ConvertGpuOpsToNVVMOp` pass.
103 lines
4.6 KiB
C++
103 lines
4.6 KiB
C++
//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
|
|
|
|
#include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h"
|
|
#include "mlir/Conversion/Passes.h"
|
|
#include "mlir/Dialect/Arith/Transforms/Passes.h"
|
|
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
|
|
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
|
|
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
|
|
#include "mlir/Dialect/Func/IR/FuncOps.h"
|
|
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
|
|
#include "mlir/Dialect/GPU/Transforms/Passes.h"
|
|
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
|
|
#include "mlir/Dialect/Linalg/Passes.h"
|
|
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
|
|
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
|
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
|
|
#include "mlir/Pass/PassManager.h"
|
|
#include "mlir/Transforms/Passes.h"
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Pipeline implementation.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
void mlir::sparse_tensor::buildSparseCompiler(
|
|
OpPassManager &pm, const SparseCompilerOptions &options) {
|
|
pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
|
|
pm.addPass(createSparsificationAndBufferizationPass(
|
|
getBufferizationOptionsForSparsification(
|
|
options.testBufferizationAnalysisOnly),
|
|
options.sparsificationOptions(), options.sparseTensorConversionOptions(),
|
|
options.createSparseDeallocs, options.enableRuntimeLibrary,
|
|
options.enableBufferInitialization, options.vectorLength,
|
|
/*enableVLAVectorization=*/options.armSVE,
|
|
/*enableSIMDIndex32=*/options.force32BitVectorIndices));
|
|
if (options.testBufferizationAnalysisOnly)
|
|
return;
|
|
pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
|
|
pm.addNestedPass<func::FuncOp>(
|
|
mlir::bufferization::createFinalizingBufferizePass());
|
|
|
|
// GPU code generation.
|
|
const bool gpuCodegen = options.gpuTriple.hasValue();
|
|
if (gpuCodegen) {
|
|
pm.addPass(createSparseGPUCodegenPass());
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass());
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass());
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToNVVMOps());
|
|
}
|
|
|
|
// TODO(springerm): Add sparse support to the BufferDeallocation pass and add
|
|
// it to this pipeline.
|
|
pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
|
|
pm.addNestedPass<func::FuncOp>(memref::createExpandReallocPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
|
|
pm.addPass(memref::createExpandStridedMetadataPass());
|
|
pm.addPass(createLowerAffinePass());
|
|
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
|
|
pm.addPass(createFinalizeMemRefToLLVMConversionPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass());
|
|
pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
|
|
pm.addPass(createConvertMathToLibmPass());
|
|
pm.addPass(createConvertComplexToLibmPass());
|
|
// Repeat convert-vector-to-llvm.
|
|
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
|
|
pm.addPass(createConvertComplexToLLVMPass());
|
|
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
|
|
pm.addPass(createConvertFuncToLLVMPass());
|
|
|
|
// Finalize GPU code generation.
|
|
if (gpuCodegen) {
|
|
#if MLIR_GPU_TO_CUBIN_PASS_ENABLE
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createGpuSerializeToCubinPass(
|
|
options.gpuTriple, options.gpuChip, options.gpuFeatures));
|
|
#endif
|
|
pm.addPass(createGpuToLLVMConversionPass());
|
|
}
|
|
|
|
pm.addPass(createReconcileUnrealizedCastsPass());
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Pipeline registration.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
void mlir::sparse_tensor::registerSparseTensorPipelines() {
|
|
PassPipelineRegistration<SparseCompilerOptions>(
|
|
"sparse-compiler",
|
|
"The standard pipeline for taking sparsity-agnostic IR using the"
|
|
" sparse-tensor type, and lowering it to LLVM IR with concrete"
|
|
" representations and algorithms for sparse tensors.",
|
|
buildSparseCompiler);
|
|
}
|