Added support for lost allocations in ring buffer.

Fixed some more bugs.
This commit is contained in:
Adam Sawicki 2018-08-22 16:48:17 +02:00
parent fd11d759dd
commit 8cfe05fad9
2 changed files with 216 additions and 16 deletions

View File

@ -101,6 +101,8 @@ struct PoolTestResult
static const uint32_t IMAGE_BYTES_PER_PIXEL = 1;
static uint32_t g_FrameIndex = 0;
struct BufferInfo
{
VkBuffer Buffer = VK_NULL_HANDLE;
@ -1701,6 +1703,95 @@ static void TestLinearAllocator()
}
}
// Test ring buffer with lost allocations.
{
// Allocate number of buffers until pool is full.
// Notice CAN_BECOME_LOST flag and call to vmaSetCurrentFrameIndex.
allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT;
res = VK_SUCCESS;
for(size_t i = 0; res == VK_SUCCESS; ++i)
{
vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
if(res == VK_SUCCESS)
bufInfo.push_back(newBufInfo);
}
// Free first half of it.
{
const size_t buffersToDelete = bufInfo.size() / 2;
for(size_t i = 0; i < buffersToDelete; ++i)
{
vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
}
bufInfo.erase(bufInfo.begin(), bufInfo.begin() + buffersToDelete);
}
// Allocate number of buffers until pool is full again.
// This way we make sure ring buffers wraps around.
res = VK_SUCCESS;
for(size_t i = 0; res == VK_SUCCESS; ++i)
{
vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
if(res == VK_SUCCESS)
bufInfo.push_back(newBufInfo);
}
VkDeviceSize firstNewOffset;
{
vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
// Allocate a large buffer with CAN_MAKE_OTHER_LOST.
allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
bufCreateInfo.size = bufSizeMax;
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
assert(res == VK_SUCCESS);
bufInfo.push_back(newBufInfo);
firstNewOffset = allocInfo.offset;
// Make sure at least one buffer from the beginning became lost.
vmaGetAllocationInfo(g_hAllocator, bufInfo[0].Allocation, &allocInfo);
assert(allocInfo.deviceMemory == VK_NULL_HANDLE);
}
// Allocate more buffers that CAN_MAKE_OTHER_LOST until we wrap-around with this.
size_t newCount = 1;
for(;;)
{
vmaSetCurrentFrameIndex(g_hAllocator, ++g_FrameIndex);
bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
assert(res == VK_SUCCESS);
bufInfo.push_back(newBufInfo);
++newCount;
if(allocInfo.offset < firstNewOffset)
break;
}
// Destroy all the buffers in forward order.
for(size_t i = 0; i < bufInfo.size(); ++i)
vmaDestroyBuffer(g_hAllocator, bufInfo[i].Buffer, bufInfo[i].Allocation);
bufInfo.clear();
}
vmaDestroyPool(g_hAllocator, pool);
}
@ -3541,10 +3632,12 @@ void Test()
{
wprintf(L"TESTING:\n");
// TEMP tests
// TODO delete
{
TestLinearAllocator();
ManuallyTestLinearAllocator();
return;
}
// # Simple tests
@ -3561,6 +3654,7 @@ return;
TestMapping();
TestMappingMultithreaded();
TestLinearAllocator();
ManuallyTestLinearAllocator();
TestDefragmentationSimple();
TestDefragmentationFull();

View File

@ -8102,6 +8102,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
VMA_ASSERT(pAllocationRequest != VMA_NULL);
VMA_HEAVY_ASSERT(Validate());
const VkDeviceSize size = GetSize();
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
@ -8114,11 +8115,11 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
}
// Try to allocate before 2nd.back(), or end of block if 2nd.empty().
if(allocSize > GetSize())
if(allocSize > size)
{
return false;
}
VkDeviceSize resultBaseOffset = GetSize() - allocSize;
VkDeviceSize resultBaseOffset = size - allocSize;
if(!suballocations2nd.empty())
{
const VmaSuballocation& lastSuballoc = suballocations2nd.back();
@ -8208,11 +8209,12 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
return true;
}
}
else
else // !upperAddress
{
if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
{
// Try to allocate at the end of 1st vector.
VkDeviceSize resultBaseOffset = 0;
if(!suballocations1st.empty())
{
@ -8259,7 +8261,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
}
const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
suballocations2nd.back().offset : GetSize();
suballocations2nd.back().offset : size;
// There is enough free space at the end after alignment.
if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
@ -8300,6 +8302,8 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
// beginning of 1st vector as the end of free space.
if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
{
VMA_ASSERT(!suballocations1st.empty());
VkDeviceSize resultBaseOffset = 0;
if(!suballocations2nd.empty())
{
@ -8345,15 +8349,81 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
}
}
pAllocationRequest->itemsToMakeLostCount = 0;
pAllocationRequest->sumItemSize = 0;
size_t index1st = m_1stNullItemsBeginCount;
if(canMakeOtherLost)
{
while(index1st < suballocations1st.size() &&
resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
{
// Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
const VmaSuballocation& suballoc = suballocations1st[index1st];
if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
{
// No problem.
}
else
{
VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
if(suballoc.hAllocation->CanBecomeLost() &&
suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
{
++pAllocationRequest->itemsToMakeLostCount;
pAllocationRequest->sumItemSize += suballoc.size;
}
else
{
return false;
}
}
++index1st;
}
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, we must mark more allocations lost or fail.
if(bufferImageGranularity > 1)
{
while(index1st < suballocations1st.size())
{
const VmaSuballocation& suballoc = suballocations1st[index1st];
if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
{
if(suballoc.hAllocation != VK_NULL_HANDLE)
{
// Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
if(suballoc.hAllocation->CanBecomeLost() &&
suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
{
++pAllocationRequest->itemsToMakeLostCount;
pAllocationRequest->sumItemSize += suballoc.size;
}
else
{
return false;
}
}
}
else
{
// Already on next page.
break;
}
++index1st;
}
}
}
// There is enough free space at the end after alignment.
const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpace2ndTo1stEnd)
if(index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size ||
index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)
{
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here.
if(bufferImageGranularity > 1)
{
for(size_t nextSuballocIndex = m_1stNullItemsBeginCount;
for(size_t nextSuballocIndex = index1st;
nextSuballocIndex < suballocations1st.size();
nextSuballocIndex++)
{
@ -8375,10 +8445,11 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
// All tests passed: Success.
pAllocationRequest->offset = resultOffset;
pAllocationRequest->sumFreeSize = freeSpace2ndTo1stEnd - resultBaseOffset;
pAllocationRequest->sumItemSize = 0;
pAllocationRequest->sumFreeSize =
(index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
- resultBaseOffset
- pAllocationRequest->sumItemSize;
// pAllocationRequest->item unused.
pAllocationRequest->itemsToMakeLostCount = 0;
return true;
}
}
@ -8392,9 +8463,44 @@ bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
uint32_t frameInUseCount,
VmaAllocationRequest* pAllocationRequest)
{
VMA_ASSERT(0 && "TODO");
if(pAllocationRequest->itemsToMakeLostCount == 0)
{
return true;
}
VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
size_t index1st = m_1stNullItemsBeginCount;
size_t madeLostCount = 0;
while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
{
VMA_ASSERT(index1st < suballocations1st.size());
VmaSuballocation& suballoc = suballocations1st[index1st];
if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
{
VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
{
suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
suballoc.hAllocation = VK_NULL_HANDLE;
++m_1stNullItemsMiddleCount;
++madeLostCount;
}
else
{
return false;
}
}
++index1st;
}
CleanupAfterFree();
//VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
return true;
}
uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
{