llvm-project/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
Aart Bik 1b434652c5 [mlir][sparse] add more dimension level types and properties
We recently removed the singleton dimension level type (see the revision
https://reviews.llvm.org/D131002) since it was unimplemented but also
incomplete (properties were missing). This revision add singleton back as
extra dimension level type, together with properties ordered/not-ordered
and unique/not-unique. Even though still not lowered to actual code, this
provides a complete way of defining many more sparse storage schemes (in
the long run, we want to support even dimension level types and properties
using the additional extensions proposed in [Chou]).

Note that the current solution of using suffixes for the properties is not
ideal, but keeps the extension relatively simple with respect to parsing and
printing. Furthermore, it is rather consistent with the TACO implementation
which uses things like Compressed-Unique as well. Nevertheless, we probably
want to separate dimension level types from properties when we add more types
and properties.

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D132897
2022-08-30 10:37:49 -07:00

81 lines
3.4 KiB
C++

//===- DialectSparseTensor.cpp - 'sparse_tensor' dialect submodule --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir-c/Dialect/SparseTensor.h"
#include "mlir-c/IR.h"
#include "mlir/Bindings/Python/PybindAdaptors.h"
namespace py = pybind11;
using namespace llvm;
using namespace mlir;
using namespace mlir::python::adaptors;
static void populateDialectSparseTensorSubmodule(const py::module &m) {
py::enum_<MlirSparseTensorDimLevelType>(m, "DimLevelType", py::module_local())
.value("dense", MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE)
.value("compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED)
.value("compressed-nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU)
.value("compressed-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO)
.value("compressed-nu-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO)
.value("singleton", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON)
.value("singleton-nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
.value("singleton-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
.value("singleton-nu-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO);
mlir_attribute_subclass(m, "EncodingAttr",
mlirAttributeIsASparseTensorEncodingAttr)
.def_classmethod(
"get",
[](py::object cls,
std::vector<MlirSparseTensorDimLevelType> dimLevelTypes,
llvm::Optional<MlirAffineMap> dimOrdering, int pointerBitWidth,
int indexBitWidth, MlirContext context) {
return cls(mlirSparseTensorEncodingAttrGet(
context, dimLevelTypes.size(), dimLevelTypes.data(),
dimOrdering ? *dimOrdering : MlirAffineMap{nullptr},
pointerBitWidth, indexBitWidth));
},
py::arg("cls"), py::arg("dim_level_types"), py::arg("dim_ordering"),
py::arg("pointer_bit_width"), py::arg("index_bit_width"),
py::arg("context") = py::none(),
"Gets a sparse_tensor.encoding from parameters.")
.def_property_readonly(
"dim_level_types",
[](MlirAttribute self) {
std::vector<MlirSparseTensorDimLevelType> ret;
for (int i = 0,
e = mlirSparseTensorEncodingGetNumDimLevelTypes(self);
i < e; ++i)
ret.push_back(
mlirSparseTensorEncodingAttrGetDimLevelType(self, i));
return ret;
})
.def_property_readonly(
"dim_ordering",
[](MlirAttribute self) -> llvm::Optional<MlirAffineMap> {
MlirAffineMap ret =
mlirSparseTensorEncodingAttrGetDimOrdering(self);
if (mlirAffineMapIsNull(ret))
return {};
return ret;
})
.def_property_readonly(
"pointer_bit_width",
[](MlirAttribute self) {
return mlirSparseTensorEncodingAttrGetPointerBitWidth(self);
})
.def_property_readonly("index_bit_width", [](MlirAttribute self) {
return mlirSparseTensorEncodingAttrGetIndexBitWidth(self);
});
}
PYBIND11_MODULE(_mlirDialectsSparseTensor, m) {
m.doc() = "MLIR SparseTensor dialect.";
populateDialectSparseTensorSubmodule(m);
}