Implemented double stack. Written tests for it.

This commit is contained in:
Adam Sawicki 2018-08-22 14:47:32 +02:00
parent 45cee6ee4f
commit 680b2251fa
2 changed files with 445 additions and 139 deletions

View File

@ -1570,6 +1570,137 @@ static void TestLinearAllocator()
} }
} }
// Test double stack.
{
// Allocate number of buffers of varying size that surely fit into this block, alternate from bottom/top.
VkDeviceSize prevOffsetLower = 0;
VkDeviceSize prevOffsetUpper = poolCreateInfo.blockSize;
for(size_t i = 0; i < maxBufCount; ++i)
{
const bool upperAddress = (i % 2) != 0;
if(upperAddress)
allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
else
allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
assert(res == VK_SUCCESS);
if(upperAddress)
{
assert(allocInfo.offset < prevOffsetUpper);
prevOffsetUpper = allocInfo.offset;
}
else
{
assert(allocInfo.offset >= prevOffsetLower);
prevOffsetLower = allocInfo.offset;
}
assert(prevOffsetLower < prevOffsetUpper);
bufInfo.push_back(newBufInfo);
}
// Destroy few buffers from top of the stack.
for(size_t i = 0; i < maxBufCount / 5; ++i)
{
const BufferInfo& currBufInfo = bufInfo.back();
vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
bufInfo.pop_back();
}
// Create some more
for(size_t i = 0; i < maxBufCount / 5; ++i)
{
const bool upperAddress = (i % 2) != 0;
if(upperAddress)
allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
else
allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
assert(res == VK_SUCCESS);
bufInfo.push_back(newBufInfo);
}
// Destroy the buffers in reverse order.
while(!bufInfo.empty())
{
const BufferInfo& currBufInfo = bufInfo.back();
vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
bufInfo.pop_back();
}
// Create buffers on both sides until we reach out of memory.
prevOffsetLower = 0;
prevOffsetUpper = poolCreateInfo.blockSize;
res = VK_SUCCESS;
for(size_t i = 0; res == VK_SUCCESS; ++i)
{
const bool upperAddress = (i % 2) != 0;
if(upperAddress)
allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
else
allocCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
bufCreateInfo.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin);
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
if(res == VK_SUCCESS)
{
if(upperAddress)
{
assert(allocInfo.offset < prevOffsetUpper);
prevOffsetUpper = allocInfo.offset;
}
else
{
assert(allocInfo.offset >= prevOffsetLower);
prevOffsetLower = allocInfo.offset;
}
assert(prevOffsetLower < prevOffsetUpper);
bufInfo.push_back(newBufInfo);
}
}
// Destroy the buffers in random order.
while(!bufInfo.empty())
{
const size_t indexToDestroy = rand.Generate() % bufInfo.size();
const BufferInfo& currBufInfo = bufInfo[indexToDestroy];
vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
bufInfo.erase(bufInfo.begin() + indexToDestroy);
}
// Create buffers on upper side only, constant size, until we reach out of memory.
prevOffsetUpper = poolCreateInfo.blockSize;
res = VK_SUCCESS;
allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT;
bufCreateInfo.size = bufSizeMax;
for(size_t i = 0; res == VK_SUCCESS; ++i)
{
BufferInfo newBufInfo;
res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo,
&newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo);
if(res == VK_SUCCESS)
{
assert(allocInfo.offset < prevOffsetUpper);
prevOffsetUpper = allocInfo.offset;
bufInfo.push_back(newBufInfo);
}
}
// Destroy the buffers in reverse order.
while(!bufInfo.empty())
{
const BufferInfo& currBufInfo = bufInfo.back();
vmaDestroyBuffer(g_hAllocator, currBufInfo.Buffer, currBufInfo.Allocation);
bufInfo.pop_back();
}
}
vmaDestroyPool(g_hAllocator, pool); vmaDestroyPool(g_hAllocator, pool);
} }

View File

@ -4405,6 +4405,7 @@ public:
VkDeviceSize bufferImageGranularity, VkDeviceSize bufferImageGranularity,
VkDeviceSize allocSize, VkDeviceSize allocSize,
VkDeviceSize allocAlignment, VkDeviceSize allocAlignment,
bool upperAddress,
VmaSuballocationType allocType, VmaSuballocationType allocType,
bool canMakeOtherLost, bool canMakeOtherLost,
VmaAllocationRequest* pAllocationRequest) = 0; VmaAllocationRequest* pAllocationRequest) = 0;
@ -4423,6 +4424,7 @@ public:
const VmaAllocationRequest& request, const VmaAllocationRequest& request,
VmaSuballocationType type, VmaSuballocationType type,
VkDeviceSize allocSize, VkDeviceSize allocSize,
bool upperAddress,
VmaAllocation hAllocation) = 0; VmaAllocation hAllocation) = 0;
// Frees suballocation assigned to given memory region. // Frees suballocation assigned to given memory region.
@ -4475,6 +4477,7 @@ public:
VkDeviceSize bufferImageGranularity, VkDeviceSize bufferImageGranularity,
VkDeviceSize allocSize, VkDeviceSize allocSize,
VkDeviceSize allocAlignment, VkDeviceSize allocAlignment,
bool upperAddress,
VmaSuballocationType allocType, VmaSuballocationType allocType,
bool canMakeOtherLost, bool canMakeOtherLost,
VmaAllocationRequest* pAllocationRequest); VmaAllocationRequest* pAllocationRequest);
@ -4492,6 +4495,7 @@ public:
const VmaAllocationRequest& request, const VmaAllocationRequest& request,
VmaSuballocationType type, VmaSuballocationType type,
VkDeviceSize allocSize, VkDeviceSize allocSize,
bool upperAddress,
VmaAllocation hAllocation); VmaAllocation hAllocation);
virtual void Free(const VmaAllocation allocation); virtual void Free(const VmaAllocation allocation);
@ -4641,6 +4645,7 @@ public:
VkDeviceSize bufferImageGranularity, VkDeviceSize bufferImageGranularity,
VkDeviceSize allocSize, VkDeviceSize allocSize,
VkDeviceSize allocAlignment, VkDeviceSize allocAlignment,
bool upperAddress,
VmaSuballocationType allocType, VmaSuballocationType allocType,
bool canMakeOtherLost, bool canMakeOtherLost,
VmaAllocationRequest* pAllocationRequest); VmaAllocationRequest* pAllocationRequest);
@ -4658,6 +4663,7 @@ public:
const VmaAllocationRequest& request, const VmaAllocationRequest& request,
VmaSuballocationType type, VmaSuballocationType type,
VkDeviceSize allocSize, VkDeviceSize allocSize,
bool upperAddress,
VmaAllocation hAllocation); VmaAllocation hAllocation);
virtual void Free(const VmaAllocation allocation); virtual void Free(const VmaAllocation allocation);
@ -6443,11 +6449,13 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest(
VkDeviceSize bufferImageGranularity, VkDeviceSize bufferImageGranularity,
VkDeviceSize allocSize, VkDeviceSize allocSize,
VkDeviceSize allocAlignment, VkDeviceSize allocAlignment,
bool upperAddress,
VmaSuballocationType allocType, VmaSuballocationType allocType,
bool canMakeOtherLost, bool canMakeOtherLost,
VmaAllocationRequest* pAllocationRequest) VmaAllocationRequest* pAllocationRequest)
{ {
VMA_ASSERT(allocSize > 0); VMA_ASSERT(allocSize > 0);
VMA_ASSERT(!upperAddress);
VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
VMA_ASSERT(pAllocationRequest != VMA_NULL); VMA_ASSERT(pAllocationRequest != VMA_NULL);
VMA_HEAVY_ASSERT(Validate()); VMA_HEAVY_ASSERT(Validate());
@ -6644,8 +6652,10 @@ void VmaBlockMetadata_Generic::Alloc(
const VmaAllocationRequest& request, const VmaAllocationRequest& request,
VmaSuballocationType type, VmaSuballocationType type,
VkDeviceSize allocSize, VkDeviceSize allocSize,
bool upperAddress,
VmaAllocation hAllocation) VmaAllocation hAllocation)
{ {
VMA_ASSERT(!upperAddress);
VMA_ASSERT(request.item != m_Suballocations.end()); VMA_ASSERT(request.item != m_Suballocations.end());
VmaSuballocation& suballoc = *request.item; VmaSuballocation& suballoc = *request.item;
// Given suballocation is a free block. // Given suballocation is a free block.
@ -7196,7 +7206,8 @@ bool VmaBlockMetadata_Linear::Validate() const
{ {
return false; return false;
} }
if(suballocations1st.empty() && !suballocations2nd.empty()) if(suballocations1st.empty() && !suballocations2nd.empty() &&
m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
{ {
return false; return false;
} }
@ -8081,6 +8092,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
VkDeviceSize bufferImageGranularity, VkDeviceSize bufferImageGranularity,
VkDeviceSize allocSize, VkDeviceSize allocSize,
VkDeviceSize allocAlignment, VkDeviceSize allocAlignment,
bool upperAddress,
VmaSuballocationType allocType, VmaSuballocationType allocType,
bool canMakeOtherLost, bool canMakeOtherLost,
VmaAllocationRequest* pAllocationRequest) VmaAllocationRequest* pAllocationRequest)
@ -8093,7 +8105,112 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations1st = AccessSuballocations1st();
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
if(suballocations2nd.empty()) if(upperAddress)
{
if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
{
VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
return false;
}
// Try to allocate before 2nd.back(), or end of block if 2nd.empty().
if(allocSize > GetSize())
{
return false;
}
VkDeviceSize resultBaseOffset = GetSize() - allocSize;
if(!suballocations2nd.empty())
{
const VmaSuballocation& lastSuballoc = suballocations2nd.back();
resultBaseOffset = lastSuballoc.offset - allocSize;
if(allocSize > lastSuballoc.offset)
{
return false;
}
}
// Start from offset equal to end of free space.
VkDeviceSize resultOffset = resultBaseOffset;
// Apply VMA_DEBUG_MARGIN at the end.
if(VMA_DEBUG_MARGIN > 0)
{
if(resultOffset < VMA_DEBUG_MARGIN)
{
return false;
}
resultOffset -= VMA_DEBUG_MARGIN;
}
// Apply alignment.
resultOffset = VmaAlignDown(resultOffset, allocAlignment);
// Check next suballocations from 2nd for BufferImageGranularity conflicts.
// Make bigger alignment if necessary.
if(bufferImageGranularity > 1 && !suballocations2nd.empty())
{
bool bufferImageGranularityConflict = false;
for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
{
const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
if(VmaBlocksOnSamePage(nextSuballoc.offset, nextSuballoc.size, resultOffset, bufferImageGranularity))
{
if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
{
bufferImageGranularityConflict = true;
break;
}
}
else
// Already on previous page.
break;
}
if(bufferImageGranularityConflict)
{
resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
}
}
// There is enough free space.
const VkDeviceSize endOf1st = !suballocations1st.empty() ?
suballocations1st.back().offset + suballocations1st.back().size :
0;
if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
{
// Check previous suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here.
if(bufferImageGranularity > 1)
{
for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
{
const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
if(VmaBlocksOnSamePage(resultOffset, allocSize, prevSuballoc.offset, bufferImageGranularity))
{
if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
{
return false;
}
}
else
{
// Already on next page.
break;
}
}
}
// All tests passed: Success.
pAllocationRequest->offset = resultOffset;
pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
pAllocationRequest->sumItemSize = 0;
// pAllocationRequest->item unused.
pAllocationRequest->itemsToMakeLostCount = 0;
return true;
}
}
else
{
if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
{ {
// Try to allocate at the end of 1st vector. // Try to allocate at the end of 1st vector.
VkDeviceSize resultBaseOffset = 0; VkDeviceSize resultBaseOffset = 0;
@ -8141,15 +8258,37 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
} }
} }
const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
suballocations2nd.back().offset : GetSize();
// There is enough free space at the end after alignment. // There is enough free space at the end after alignment.
if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= GetSize()) if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
{ {
// Check next suballocations for BufferImageGranularity conflicts {when there are some}. // Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here. // If conflict exists, allocation cannot be made here.
if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
{
for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
{
const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
{
if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
{
return false;
}
}
else
{
// Already on previous page.
break;
}
}
}
// All tests passed: Success. // All tests passed: Success.
pAllocationRequest->offset = resultOffset; pAllocationRequest->offset = resultOffset;
pAllocationRequest->sumFreeSize = GetSize() - resultBaseOffset; pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
pAllocationRequest->sumItemSize = 0; pAllocationRequest->sumItemSize = 0;
// pAllocationRequest->item unused. // pAllocationRequest->item unused.
pAllocationRequest->itemsToMakeLostCount = 0; pAllocationRequest->itemsToMakeLostCount = 0;
@ -8243,6 +8382,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest(
return true; return true;
} }
} }
}
return false; return false;
} }
@ -8272,9 +8412,21 @@ void VmaBlockMetadata_Linear::Alloc(
const VmaAllocationRequest& request, const VmaAllocationRequest& request,
VmaSuballocationType type, VmaSuballocationType type,
VkDeviceSize allocSize, VkDeviceSize allocSize,
bool upperAddress,
VmaAllocation hAllocation) VmaAllocation hAllocation)
{ {
const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type }; const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
if(upperAddress)
{
VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
"CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
suballocations2nd.push_back(newSuballoc);
m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
}
else
{
SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations1st = AccessSuballocations1st();
// First allocation. // First allocation.
@ -8322,6 +8474,7 @@ void VmaBlockMetadata_Linear::Alloc(
} }
} }
} }
}
void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
{ {
@ -8332,8 +8485,9 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
{ {
SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations1st = AccessSuballocations1st();
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
VMA_ASSERT(!suballocations1st.empty());
if(!suballocations1st.empty())
{
// First allocation: Mark it as next empty at the beginning. // First allocation: Mark it as next empty at the beginning.
VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
if(firstSuballoc.offset == offset) if(firstSuballoc.offset == offset)
@ -8344,9 +8498,11 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
CleanupAfterFree(); CleanupAfterFree();
return; return;
} }
}
// Last allocation in 2-part ring buffer. // Last allocation in 2-part ring buffer or top of 2nd stack (same logic).
if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
{ {
VmaSuballocation& lastSuballoc = suballocations2nd.back(); VmaSuballocation& lastSuballoc = suballocations2nd.back();
if(lastSuballoc.offset == offset) if(lastSuballoc.offset == offset)
@ -8383,10 +8539,10 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
} }
} }
if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
{ {
// Item from the middle of 2nd vector. // Item from the middle of 2nd vector.
// TODO optimize using binary search. // TODO optimize using binary search. Careful when DOUBLE_STACK - suballocations are then sorted in reverse order of offsets.
for(size_t i = 0; i < suballocations2nd.size() - 1; ++i) for(size_t i = 0; i < suballocations2nd.size() - 1; ++i)
{ {
VmaSuballocation& currSuballoc = suballocations2nd[i]; VmaSuballocation& currSuballoc = suballocations2nd[i];
@ -8414,17 +8570,19 @@ bool VmaBlockMetadata_Linear::ShouldCompact1st() const
void VmaBlockMetadata_Linear::CleanupAfterFree() void VmaBlockMetadata_Linear::CleanupAfterFree()
{ {
SuballocationVectorType& suballocations1st = AccessSuballocations1st(); SuballocationVectorType& suballocations1st = AccessSuballocations1st();
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
if(IsEmpty()) if(IsEmpty())
{ {
suballocations1st.clear(); suballocations1st.clear();
suballocations2nd.clear();
m_1stNullItemsBeginCount = 0; m_1stNullItemsBeginCount = 0;
m_1stNullItemsMiddleCount = 0; m_1stNullItemsMiddleCount = 0;
m_2ndNullItemsCount = 0;
m_2ndVectorMode = SECOND_VECTOR_EMPTY; m_2ndVectorMode = SECOND_VECTOR_EMPTY;
} }
else else
{ {
SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc1stCount = suballocations1st.size();
const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
VMA_ASSERT(nullItem1stCount <= suballoc1stCount); VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
@ -8486,7 +8644,7 @@ void VmaBlockMetadata_Linear::CleanupAfterFree()
suballocations1st.clear(); suballocations1st.clear();
m_1stNullItemsBeginCount = 0; m_1stNullItemsBeginCount = 0;
if(!suballocations2nd.empty()) if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
{ {
// Swap 1st with 2nd. Now 2nd is empty. // Swap 1st with 2nd. Now 2nd is empty.
m_2ndVectorMode = SECOND_VECTOR_EMPTY; m_2ndVectorMode = SECOND_VECTOR_EMPTY;
@ -8868,6 +9026,13 @@ VkResult VmaBlockVector::Allocate(
VmaSuballocationType suballocType, VmaSuballocationType suballocType,
VmaAllocation* pAllocation) VmaAllocation* pAllocation)
{ {
// Upper address can only be used with linear allocator.
if((createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0 &&
!m_LinearAlgorithm)
{
return VK_ERROR_FEATURE_NOT_PRESENT;
}
// Early reject: requested allocation size is larger that maximum block size for this block vector. // Early reject: requested allocation size is larger that maximum block size for this block vector.
if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
{ {
@ -8876,6 +9041,7 @@ VkResult VmaBlockVector::Allocate(
const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
const bool upperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex); VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
@ -8892,6 +9058,7 @@ VkResult VmaBlockVector::Allocate(
m_BufferImageGranularity, m_BufferImageGranularity,
size, size,
alignment, alignment,
upperAddress,
suballocType, suballocType,
false, // canMakeOtherLost false, // canMakeOtherLost
&currRequest)) &currRequest))
@ -8915,7 +9082,7 @@ VkResult VmaBlockVector::Allocate(
} }
*pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString); *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
pCurrBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation); pCurrBlock->m_pMetadata->Alloc(currRequest, suballocType, size, upperAddress, *pAllocation);
(*pAllocation)->InitBlockAllocation( (*pAllocation)->InitBlockAllocation(
hCurrentPool, hCurrentPool,
pCurrBlock, pCurrBlock,
@ -9017,12 +9184,13 @@ VkResult VmaBlockVector::Allocate(
m_BufferImageGranularity, m_BufferImageGranularity,
size, size,
alignment, alignment,
upperAddress,
suballocType, suballocType,
false, // canMakeOtherLost false, // canMakeOtherLost
&allocRequest)) &allocRequest))
{ {
*pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString); *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
pBlock->m_pMetadata->Alloc(allocRequest, suballocType, size, *pAllocation); pBlock->m_pMetadata->Alloc(allocRequest, suballocType, size, upperAddress, *pAllocation);
(*pAllocation)->InitBlockAllocation( (*pAllocation)->InitBlockAllocation(
hCurrentPool, hCurrentPool,
pBlock, pBlock,
@ -9079,6 +9247,7 @@ VkResult VmaBlockVector::Allocate(
m_BufferImageGranularity, m_BufferImageGranularity,
size, size,
alignment, alignment,
(createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
suballocType, suballocType,
canMakeOtherLost, canMakeOtherLost,
&currRequest)) &currRequest))
@ -9122,7 +9291,7 @@ VkResult VmaBlockVector::Allocate(
} }
// Allocate from this pBlock. // Allocate from this pBlock.
*pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString); *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation); pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, upperAddress, *pAllocation);
(*pAllocation)->InitBlockAllocation( (*pAllocation)->InitBlockAllocation(
hCurrentPool, hCurrentPool,
pBestRequestBlock, pBestRequestBlock,
@ -9627,6 +9796,7 @@ VkResult VmaDefragmentator::DefragmentRound(
m_pBlockVector->GetBufferImageGranularity(), m_pBlockVector->GetBufferImageGranularity(),
size, size,
alignment, alignment,
false, // upperAddress
suballocType, suballocType,
false, // canMakeOtherLost false, // canMakeOtherLost
&dstAllocRequest) && &dstAllocRequest) &&
@ -9668,7 +9838,12 @@ VkResult VmaDefragmentator::DefragmentRound(
VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size); VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
} }
pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation); pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
dstAllocRequest,
suballocType,
size,
false, // upperAddress
allocInfo.m_hAllocation);
pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);