Add const qualifier to handles where possible

This mirrors the Vulkan specification where handles are often passed with a const qualifier
This commit is contained in:
Joe Hermaszewski 2020-04-07 17:39:07 +08:00
parent 08bc59bee5
commit f22cd7b3c6

View File

@ -3423,7 +3423,7 @@ typedef struct VmaDefragmentationInfo2 {
It is safe to pass allocations that are in the lost state - they are ignored.
All allocations not present in this array are considered non-moveable during this defragmentation.
*/
VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
/** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation.
The array should have `allocationCount` elements.
@ -3448,7 +3448,7 @@ typedef struct VmaDefragmentationInfo2 {
Using this array is equivalent to specifying all allocations from the pools in `pAllocations`.
It might be more efficient.
*/
VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
/** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`.
`VK_WHOLE_SIZE` means no limit.
@ -3620,7 +3620,7 @@ For more information, see [Defragmentation](@ref defragmentation) chapter.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
VmaAllocator VMA_NOT_NULL allocator,
VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
size_t allocationCount,
VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
@ -7191,10 +7191,10 @@ public:
VmaDefragmentationStats* pStats);
~VmaDefragmentationContext_T();
void AddPools(uint32_t poolCount, VmaPool* pPools);
void AddPools(uint32_t poolCount, const VmaPool* pPools);
void AddAllocations(
uint32_t allocationCount,
VmaAllocation* pAllocations,
const VmaAllocation* pAllocations,
VkBool32* pAllocationsChanged);
/*
@ -14293,7 +14293,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
}
}
void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
{
for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
{
@ -14330,7 +14330,7 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
void VmaDefragmentationContext_T::AddAllocations(
uint32_t allocationCount,
VmaAllocation* pAllocations,
const VmaAllocation* pAllocations,
VkBool32* pAllocationsChanged)
{
// Dispatch pAllocations among defragmentators. Create them when necessary.
@ -17947,7 +17947,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
VmaAllocator allocator,
size_t allocationCount,
VmaAllocation* pAllocations)
const VmaAllocation* pAllocations)
{
if(allocationCount == 0)
{
@ -18166,7 +18166,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, u
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
VmaAllocator allocator,
VmaAllocation* pAllocations,
const VmaAllocation* pAllocations,
size_t allocationCount,
VkBool32* pAllocationsChanged,
const VmaDefragmentationInfo *pDefragmentationInfo,