[mlir][sparse] using non-static field to avoid data races. (#81165)
This commit is contained in:
parent
17f0680f69
commit
35fae044c5
@ -94,7 +94,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
|
||||
this->loopTag = loopTag;
|
||||
this->hasOutput = hasOutput;
|
||||
this->isSparseOut = isSparseOut;
|
||||
SparseIterator::setSparseEmitStrategy(emitStrategy);
|
||||
this->emitStrategy = emitStrategy;
|
||||
|
||||
const unsigned numManifestTensors = ts.size();
|
||||
const unsigned synTensorId = numManifestTensors;
|
||||
@ -166,13 +166,13 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
|
||||
std::unique_ptr<SparseIterator>
|
||||
LoopEmitter::makeLevelIterator(OpBuilder &builder, Location loc, TensorId t,
|
||||
Level l) {
|
||||
auto it = makeSimpleIterator(*lvls[t][l]);
|
||||
auto it = makeSimpleIterator(*lvls[t][l], emitStrategy);
|
||||
auto stt = getSparseTensorType(tensors[t]);
|
||||
if (stt.hasEncoding() && stt.getEncoding().isSlice()) {
|
||||
Value offset = genSliceOffset(builder, loc, tensors[t], l);
|
||||
Value stride = genSliceStride(builder, loc, tensors[t], l);
|
||||
auto slicedIt = makeSlicedLevelIterator(std::move(it), offset, stride,
|
||||
lvls[t][l]->getSize());
|
||||
auto slicedIt = makeSlicedLevelIterator(
|
||||
std::move(it), offset, stride, lvls[t][l]->getSize(), emitStrategy);
|
||||
return slicedIt;
|
||||
}
|
||||
return it;
|
||||
@ -186,7 +186,7 @@ void LoopEmitter::initializeLoopEmit(
|
||||
TensorId synId = getSynTensorId();
|
||||
for (unsigned i = 0, e = loopHighs.size(); i < e; i++) {
|
||||
Value sz = loopHighs[i] = synSetter(builder, loc, i);
|
||||
auto [stl, it] = makeSynLevelAndIterator(sz, synId, i);
|
||||
auto [stl, it] = makeSynLevelAndIterator(sz, synId, i, emitStrategy);
|
||||
lvls[synId][i] = std::move(stl);
|
||||
iters[synId][i].emplace_back(std::move(it));
|
||||
}
|
||||
@ -317,12 +317,13 @@ void LoopEmitter::initSubSectIterator(OpBuilder &builder, Location loc) {
|
||||
size = ADDI(size, ADDI(MULI(idxMax, C_IDX(stride)), C_IDX(1)));
|
||||
}
|
||||
it = makeNonEmptySubSectIterator(builder, loc, parent, loopHighs[loop],
|
||||
std::move(lvlIt), size, curDep.second);
|
||||
std::move(lvlIt), size, curDep.second,
|
||||
emitStrategy);
|
||||
} else {
|
||||
const SparseIterator &subSectIter = *iters[t][lvl].back();
|
||||
it = makeTraverseSubSectIterator(builder, loc, subSectIter, *parent,
|
||||
std::move(lvlIt), loopHighs[loop],
|
||||
curDep.second);
|
||||
curDep.second, emitStrategy);
|
||||
}
|
||||
lastIter[t] = it.get();
|
||||
iters[t][lvl].emplace_back(std::move(it));
|
||||
|
@ -380,6 +380,7 @@ private:
|
||||
/// tensor.
|
||||
bool hasOutput;
|
||||
bool isSparseOut;
|
||||
SparseEmitStrategy emitStrategy;
|
||||
|
||||
//
|
||||
// Fields which have `numTensor` many entries.
|
||||
|
@ -773,9 +773,6 @@ public:
|
||||
// SparseIterator derived classes implementation.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
SparseEmitStrategy SparseIterator::emitStrategy =
|
||||
SparseEmitStrategy::kFunctional;
|
||||
|
||||
void SparseIterator::genInit(OpBuilder &b, Location l,
|
||||
const SparseIterator *p) {
|
||||
if (emitStrategy == SparseEmitStrategy::kDebugInterface) {
|
||||
@ -1303,27 +1300,38 @@ sparse_tensor::makeSparseTensorLevel(OpBuilder &b, Location l, Value t,
|
||||
}
|
||||
|
||||
std::pair<std::unique_ptr<SparseTensorLevel>, std::unique_ptr<SparseIterator>>
|
||||
sparse_tensor::makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl) {
|
||||
sparse_tensor::makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl,
|
||||
SparseEmitStrategy strategy) {
|
||||
auto stl = std::make_unique<DenseLevel>(tid, lvl, sz, /*encoded=*/false);
|
||||
auto it = std::make_unique<TrivialIterator>(*stl);
|
||||
it->setSparseEmitStrategy(strategy);
|
||||
return std::make_pair(std::move(stl), std::move(it));
|
||||
}
|
||||
|
||||
std::unique_ptr<SparseIterator>
|
||||
sparse_tensor::makeSimpleIterator(const SparseTensorLevel &stl) {
|
||||
sparse_tensor::makeSimpleIterator(const SparseTensorLevel &stl,
|
||||
SparseEmitStrategy strategy) {
|
||||
std::unique_ptr<SparseIterator> ret;
|
||||
if (!isUniqueLT(stl.getLT())) {
|
||||
// We always dedupliate the non-unique level, but we should optimize it away
|
||||
// if possible.
|
||||
return std::make_unique<DedupIterator>(stl);
|
||||
ret = std::make_unique<DedupIterator>(stl);
|
||||
} else {
|
||||
ret = std::make_unique<TrivialIterator>(stl);
|
||||
}
|
||||
return std::make_unique<TrivialIterator>(stl);
|
||||
ret->setSparseEmitStrategy(strategy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::unique_ptr<SparseIterator>
|
||||
sparse_tensor::makeSlicedLevelIterator(std::unique_ptr<SparseIterator> &&sit,
|
||||
Value offset, Value stride, Value size) {
|
||||
Value offset, Value stride, Value size,
|
||||
SparseEmitStrategy strategy) {
|
||||
|
||||
return std::make_unique<FilterIterator>(std::move(sit), offset, stride, size);
|
||||
auto ret =
|
||||
std::make_unique<FilterIterator>(std::move(sit), offset, stride, size);
|
||||
ret->setSparseEmitStrategy(strategy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const SparseIterator *tryUnwrapFilter(const SparseIterator *it) {
|
||||
@ -1335,38 +1343,42 @@ static const SparseIterator *tryUnwrapFilter(const SparseIterator *it) {
|
||||
|
||||
std::unique_ptr<SparseIterator> sparse_tensor::makeNonEmptySubSectIterator(
|
||||
OpBuilder &b, Location l, const SparseIterator *parent, Value loopBound,
|
||||
std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride) {
|
||||
std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride,
|
||||
SparseEmitStrategy strategy) {
|
||||
|
||||
// Try unwrap the NonEmptySubSectIterator from a filter parent.
|
||||
parent = tryUnwrapFilter(parent);
|
||||
auto it = std::make_unique<NonEmptySubSectIterator>(
|
||||
b, l, parent, std::move(delegate), size);
|
||||
std::unique_ptr<SparseIterator> it =
|
||||
std::make_unique<NonEmptySubSectIterator>(b, l, parent,
|
||||
std::move(delegate), size);
|
||||
|
||||
if (stride != 1) {
|
||||
// TODO: We can safely skip bound checking on sparse levels, but for dense
|
||||
// iteration space, we need the bound to infer the dense loop range.
|
||||
return std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
|
||||
it = std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
|
||||
C_IDX(stride), /*size=*/loopBound);
|
||||
}
|
||||
it->setSparseEmitStrategy(strategy);
|
||||
return it;
|
||||
}
|
||||
|
||||
std::unique_ptr<SparseIterator> sparse_tensor::makeTraverseSubSectIterator(
|
||||
OpBuilder &b, Location l, const SparseIterator &subSectIter,
|
||||
const SparseIterator &parent, std::unique_ptr<SparseIterator> &&wrap,
|
||||
Value loopBound, unsigned stride) {
|
||||
Value loopBound, unsigned stride, SparseEmitStrategy strategy) {
|
||||
|
||||
// This must be a subsection iterator or a filtered subsection iterator.
|
||||
auto &subSect =
|
||||
llvm::cast<NonEmptySubSectIterator>(*tryUnwrapFilter(&subSectIter));
|
||||
|
||||
auto it = std::make_unique<SubSectIterator>(
|
||||
std::unique_ptr<SparseIterator> it = std::make_unique<SubSectIterator>(
|
||||
subSect, *tryUnwrapFilter(&parent), std::move(wrap));
|
||||
|
||||
if (stride != 1) {
|
||||
return std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
|
||||
it = std::make_unique<FilterIterator>(std::move(it), /*offset=*/C_IDX(0),
|
||||
C_IDX(stride), /*size=*/loopBound);
|
||||
}
|
||||
it->setSparseEmitStrategy(strategy);
|
||||
return it;
|
||||
}
|
||||
|
||||
|
@ -111,8 +111,8 @@ protected:
|
||||
public:
|
||||
virtual ~SparseIterator() = default;
|
||||
|
||||
static void setSparseEmitStrategy(SparseEmitStrategy strategy) {
|
||||
SparseIterator::emitStrategy = strategy;
|
||||
void setSparseEmitStrategy(SparseEmitStrategy strategy) {
|
||||
emitStrategy = strategy;
|
||||
}
|
||||
|
||||
virtual std::string getDebugInterfacePrefix() const = 0;
|
||||
@ -248,7 +248,7 @@ protected:
|
||||
return ref.take_front(cursorValsCnt);
|
||||
}
|
||||
|
||||
static SparseEmitStrategy emitStrategy;
|
||||
SparseEmitStrategy emitStrategy;
|
||||
|
||||
public:
|
||||
const IterKind kind; // For LLVM-style RTTI.
|
||||
@ -277,32 +277,34 @@ std::unique_ptr<SparseTensorLevel> makeSparseTensorLevel(OpBuilder &builder,
|
||||
|
||||
/// Helper function to create a simple SparseIterator object that iterate over
|
||||
/// the SparseTensorLevel.
|
||||
std::unique_ptr<SparseIterator>
|
||||
makeSimpleIterator(const SparseTensorLevel &stl);
|
||||
std::unique_ptr<SparseIterator> makeSimpleIterator(const SparseTensorLevel &stl,
|
||||
SparseEmitStrategy strategy);
|
||||
|
||||
/// Helper function to create a synthetic SparseIterator object that iterate
|
||||
/// over a dense space specified by [0,`sz`).
|
||||
std::pair<std::unique_ptr<SparseTensorLevel>, std::unique_ptr<SparseIterator>>
|
||||
makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl);
|
||||
makeSynLevelAndIterator(Value sz, unsigned tid, unsigned lvl,
|
||||
SparseEmitStrategy strategy);
|
||||
|
||||
/// Helper function to create a SparseIterator object that iterate over a
|
||||
/// sliced space, the orignal space (before slicing) is traversed by `sit`.
|
||||
std::unique_ptr<SparseIterator>
|
||||
makeSlicedLevelIterator(std::unique_ptr<SparseIterator> &&sit, Value offset,
|
||||
Value stride, Value size);
|
||||
Value stride, Value size, SparseEmitStrategy strategy);
|
||||
|
||||
/// Helper function to create a SparseIterator object that iterate over the
|
||||
/// non-empty subsections set.
|
||||
std::unique_ptr<SparseIterator> makeNonEmptySubSectIterator(
|
||||
OpBuilder &b, Location l, const SparseIterator *parent, Value loopBound,
|
||||
std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride);
|
||||
std::unique_ptr<SparseIterator> &&delegate, Value size, unsigned stride,
|
||||
SparseEmitStrategy strategy);
|
||||
|
||||
/// Helper function to create a SparseIterator object that iterate over a
|
||||
/// non-empty subsection created by NonEmptySubSectIterator.
|
||||
std::unique_ptr<SparseIterator> makeTraverseSubSectIterator(
|
||||
OpBuilder &b, Location l, const SparseIterator &subsectIter,
|
||||
const SparseIterator &parent, std::unique_ptr<SparseIterator> &&wrap,
|
||||
Value loopBound, unsigned stride);
|
||||
Value loopBound, unsigned stride, SparseEmitStrategy strategy);
|
||||
|
||||
} // namespace sparse_tensor
|
||||
} // namespace mlir
|
||||
|
Loading…
x
Reference in New Issue
Block a user