Remove MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX.

This commit is contained in:
Bartosz Taudul 2019-07-29 21:56:53 +02:00
parent b496f1ff90
commit e9a0145cd5

View File

@ -2423,9 +2423,6 @@ private:
if (!details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) {
return false;
}
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
debug::DebugLock lock(mutex);
#endif
// Find out where we'll be inserting this block in the block index
BlockIndexEntry* idxEntry;
if (!insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) {
@ -2495,11 +2492,6 @@ private:
auto& el = *((*block)[index]);
if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) {
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
// Note: Acquiring the mutex with every dequeue instead of only when a block
// is released is very sub-optimal, but it is, after all, purely debug code.
debug::DebugLock lock(producer->mutex);
#endif
struct Guard {
Block* block;
index_t index;
@ -2524,9 +2516,6 @@ private:
if (block->ConcurrentQueue::Block::template set_empty<implicit_context>(index)) {
{
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
debug::DebugLock lock(mutex);
#endif
// Add the block back into the global free pool (and remove from block index)
entry->value.store(nullptr, std::memory_order_relaxed);
}
@ -2565,9 +2554,6 @@ private:
size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1));
index_t currentTailIndex = (startTailIndex - 1) & ~static_cast<index_t>(BLOCK_SIZE - 1);
if (blockBaseDiff > 0) {
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
debug::DebugLock lock(mutex);
#endif
do {
blockBaseDiff -= static_cast<index_t>(BLOCK_SIZE);
currentTailIndex += static_cast<index_t>(BLOCK_SIZE);
@ -2754,9 +2740,6 @@ private:
}
if (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
debug::DebugLock lock(mutex);
#endif
entry->value.store(nullptr, std::memory_order_relaxed);
this->parent->add_block_to_free_list(block);
}
@ -2772,9 +2755,6 @@ private:
}
if (block->ConcurrentQueue::Block::template set_many_empty<implicit_context>(blockStartIndex, static_cast<size_t>(endIndex - blockStartIndex))) {
{
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
debug::DebugLock lock(mutex);
#endif
// Note that the set_many_empty above did a release, meaning that anybody who acquires the block
// we're about to free can use it safely since our writes (and reads!) will have happened-before then.
entry->value.store(nullptr, std::memory_order_relaxed);
@ -2858,9 +2838,6 @@ private:
inline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const
{
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
debug::DebugLock lock(mutex);
#endif
index &= ~static_cast<index_t>(BLOCK_SIZE - 1);
localBlockIndex = blockIndex.load(std::memory_order_acquire);
auto tail = localBlockIndex->tail.load(std::memory_order_acquire);
@ -2927,10 +2904,6 @@ private:
details::ThreadExitListener threadExitListener;
private:
#endif
#if MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX
mutable debug::DebugMutex mutex;
#endif
};