Merge pull request #109 from JustSid/master

Fixed a race condition with incremental defragmentation.
This commit is contained in:
Adam Sawicki 2020-03-31 17:01:25 +02:00 committed by GitHub
commit a39951c716
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -6523,6 +6523,7 @@ public:
VkCommandBuffer commandBuffer); VkCommandBuffer commandBuffer);
void DefragmentationEnd( void DefragmentationEnd(
class VmaBlockVectorDefragmentationContext* pCtx, class VmaBlockVectorDefragmentationContext* pCtx,
uint32_t flags,
VmaDefragmentationStats* pStats); VmaDefragmentationStats* pStats);
uint32_t ProcessDefragmentations( uint32_t ProcessDefragmentations(
@ -13180,22 +13181,36 @@ void VmaBlockVector::Defragment(
void VmaBlockVector::DefragmentationEnd( void VmaBlockVector::DefragmentationEnd(
class VmaBlockVectorDefragmentationContext* pCtx, class VmaBlockVectorDefragmentationContext* pCtx,
uint32_t flags,
VmaDefragmentationStats* pStats) VmaDefragmentationStats* pStats)
{ {
// Destroy buffers. if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
{ {
VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex]; VMA_ASSERT(pCtx->mutexLocked == false);
if(blockCtx.hBuffer)
{ // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
(*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)( // lock protecting us. Since we mutate state here, we have to take the lock out now
m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); m_Mutex.LockWrite();
} pCtx->mutexLocked = true;
} }
if(pCtx->res >= VK_SUCCESS) // If the mutex isn't locked we didn't do any work and there is nothing to delete.
if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
{ {
FreeEmptyBlocks(pStats); // Destroy buffers.
for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
{
VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
if(blockCtx.hBuffer)
{
(*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
}
}
if(pCtx->res >= VK_SUCCESS)
{
FreeEmptyBlocks(pStats);
}
} }
if(pCtx->mutexLocked) if(pCtx->mutexLocked)
@ -14117,7 +14132,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
for(size_t i = m_CustomPoolContexts.size(); i--; ) for(size_t i = m_CustomPoolContexts.size(); i--; )
{ {
VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
vma_delete(m_hAllocator, pBlockVectorCtx); vma_delete(m_hAllocator, pBlockVectorCtx);
} }
for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
@ -14125,7 +14140,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
if(pBlockVectorCtx) if(pBlockVectorCtx)
{ {
pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
vma_delete(m_hAllocator, pBlockVectorCtx); vma_delete(m_hAllocator, pBlockVectorCtx);
} }
} }