Further refactoring of defragmentation.

This commit is contained in:
Adam Sawicki 2018-10-17 15:20:36 +02:00
parent a114419b23
commit a9f030d7ba

View File

@ -5587,6 +5587,15 @@ private:
VmaAllocation* pAllocation);
VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
VkResult ApplyDefragmentationMovesCpu(
const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
/*
Used during defragmentation. pDefragmentationStats is optional. It's in/out
- updated with new data.
*/
void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
};
struct VmaPool_T
@ -11031,96 +11040,9 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn
return VK_SUCCESS;
}
#if VMA_STATS_STRING_ENABLED
void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
VkResult VmaBlockVector::ApplyDefragmentationMovesCpu(
const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
{
VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
json.BeginObject();
if(m_IsCustomPool)
{
json.WriteString("MemoryTypeIndex");
json.WriteNumber(m_MemoryTypeIndex);
json.WriteString("BlockSize");
json.WriteNumber(m_PreferredBlockSize);
json.WriteString("BlockCount");
json.BeginObject(true);
if(m_MinBlockCount > 0)
{
json.WriteString("Min");
json.WriteNumber((uint64_t)m_MinBlockCount);
}
if(m_MaxBlockCount < SIZE_MAX)
{
json.WriteString("Max");
json.WriteNumber((uint64_t)m_MaxBlockCount);
}
json.WriteString("Cur");
json.WriteNumber((uint64_t)m_Blocks.size());
json.EndObject();
if(m_FrameInUseCount > 0)
{
json.WriteString("FrameInUseCount");
json.WriteNumber(m_FrameInUseCount);
}
if(m_Algorithm != 0)
{
json.WriteString("Algorithm");
json.WriteString(VmaAlgorithmToStr(m_Algorithm));
}
}
else
{
json.WriteString("PreferredBlockSize");
json.WriteNumber(m_PreferredBlockSize);
}
json.WriteString("Blocks");
json.BeginObject();
for(size_t i = 0; i < m_Blocks.size(); ++i)
{
json.BeginString();
json.ContinueString(m_Blocks[i]->GetId());
json.EndString();
m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
}
json.EndObject();
json.EndObject();
}
#endif // #if VMA_STATS_STRING_ENABLED
VkResult VmaBlockVector::Defragment(
VmaDefragmentationStats* pDefragmentationStats,
VkDeviceSize& maxBytesToMove,
uint32_t& maxAllocationsToMove)
{
if(!m_pDefragmentationAlgorithm)
{
return VK_SUCCESS;
}
VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
// Defragment.
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
VkResult res = m_pDefragmentationAlgorithm->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
if(res < 0)
{
return res;
}
if(res >= VK_SUCCESS)
{
const size_t blockCount = m_Blocks.size();
const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
@ -11148,6 +11070,8 @@ VkResult VmaBlockVector::Defragment(
blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
}
VkResult res = VK_SUCCESS;
// Go over all blocks. Get mapped pointer or map if necessary.
for(size_t blockIndex = 0; (res >= 0) && (blockIndex < blockCount); ++blockIndex)
{
@ -11232,24 +11156,12 @@ VkResult VmaBlockVector::Defragment(
pBlock->Unmap(m_hAllocator, 1);
}
}
}
// Accumulate statistics.
if(pDefragmentationStats != VMA_NULL)
{
const VkDeviceSize bytesMoved = m_pDefragmentationAlgorithm->GetBytesMoved();
const uint32_t allocationsMoved = m_pDefragmentationAlgorithm->GetAllocationsMoved();
pDefragmentationStats->bytesMoved += bytesMoved;
pDefragmentationStats->allocationsMoved += allocationsMoved;
VMA_ASSERT(bytesMoved <= maxBytesToMove);
VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
maxBytesToMove -= bytesMoved;
maxAllocationsToMove -= allocationsMoved;
}
return res;
}
// Free empty blocks.
if(res >= 0)
{
void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
{
m_HasEmptyBlock = false;
for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
{
@ -11274,6 +11186,113 @@ VkResult VmaBlockVector::Defragment(
}
}
}
}
#if VMA_STATS_STRING_ENABLED
void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
{
VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
json.BeginObject();
if(m_IsCustomPool)
{
json.WriteString("MemoryTypeIndex");
json.WriteNumber(m_MemoryTypeIndex);
json.WriteString("BlockSize");
json.WriteNumber(m_PreferredBlockSize);
json.WriteString("BlockCount");
json.BeginObject(true);
if(m_MinBlockCount > 0)
{
json.WriteString("Min");
json.WriteNumber((uint64_t)m_MinBlockCount);
}
if(m_MaxBlockCount < SIZE_MAX)
{
json.WriteString("Max");
json.WriteNumber((uint64_t)m_MaxBlockCount);
}
json.WriteString("Cur");
json.WriteNumber((uint64_t)m_Blocks.size());
json.EndObject();
if(m_FrameInUseCount > 0)
{
json.WriteString("FrameInUseCount");
json.WriteNumber(m_FrameInUseCount);
}
if(m_Algorithm != 0)
{
json.WriteString("Algorithm");
json.WriteString(VmaAlgorithmToStr(m_Algorithm));
}
}
else
{
json.WriteString("PreferredBlockSize");
json.WriteNumber(m_PreferredBlockSize);
}
json.WriteString("Blocks");
json.BeginObject();
for(size_t i = 0; i < m_Blocks.size(); ++i)
{
json.BeginString();
json.ContinueString(m_Blocks[i]->GetId());
json.EndString();
m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
}
json.EndObject();
json.EndObject();
}
#endif // #if VMA_STATS_STRING_ENABLED
VkResult VmaBlockVector::Defragment(
VmaDefragmentationStats* pDefragmentationStats,
VkDeviceSize& maxBytesToMove,
uint32_t& maxAllocationsToMove)
{
if(!m_pDefragmentationAlgorithm)
{
return VK_SUCCESS;
}
VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
// Defragment.
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
VkResult res = m_pDefragmentationAlgorithm->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
// Accumulate statistics.
if(pDefragmentationStats != VMA_NULL)
{
const VkDeviceSize bytesMoved = m_pDefragmentationAlgorithm->GetBytesMoved();
const uint32_t allocationsMoved = m_pDefragmentationAlgorithm->GetAllocationsMoved();
pDefragmentationStats->bytesMoved += bytesMoved;
pDefragmentationStats->allocationsMoved += allocationsMoved;
VMA_ASSERT(bytesMoved <= maxBytesToMove);
VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
maxBytesToMove -= bytesMoved;
maxAllocationsToMove -= allocationsMoved;
}
if(res >= VK_SUCCESS)
{
res = ApplyDefragmentationMovesCpu(moves);
}
if(res >= VK_SUCCESS)
{
FreeEmptyBlocks(pDefragmentationStats);
}
// Destroy defragmentation algorithm object.