From 87cea3667086bdd59b07060637b5af27ef320d50 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 2 Jul 2019 12:24:48 +0200 Subject: [PATCH] Add "Common mistakes" documentation chapter --- docs/html/index.html | 1 + docs/html/usage_patterns.html | 6 + docs/html/vk__mem__alloc_8h_source.html | 294 ++++++++++++------------ src/vk_mem_alloc.h | 22 ++ 4 files changed, 176 insertions(+), 147 deletions(-) diff --git a/docs/html/index.html b/docs/html/index.html index 4ffc5e4..d560ed5 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -134,6 +134,7 @@ Table of contents
  • Recommended usage patterns diff --git a/docs/html/usage_patterns.html b/docs/html/usage_patterns.html index 948d22d..055e689 100644 --- a/docs/html/usage_patterns.html +++ b/docs/html/usage_patterns.html @@ -70,6 +70,12 @@ $(function() {

    See also slides from talk: Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018

    +

    +Common mistakes

    +

    Use of CPU_TO_GPU instead of CPU_ONLY memory

    +

    VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be mapped and written by the CPU, as well as read directly by the GPU - like some buffers or textures updated every frame (dynamic). If you create a staging copy of a resource to be written by CPU and then used as a source of transfer to another resource placed in the GPU memory, that staging resource should be created with VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these enums carefully for details.

    +

    Unnecessary use of custom pools

    +

    Custom memory pools may be useful for special purposes - when you want to keep certain type of resources separate e.g. to reserve minimum amount of memory for them, limit maximum amount of memory they can occupy, or make some of them push out the other through the mechanism of Lost allocations. For most resources this is not needed and so it is not recommended to create VmaPool objects and allocations out of them. Allocating from the default pool is sufficient.

    Simple patterns

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 022f0e2..8a8373c 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,212 +65,212 @@ $(function() {
    vk_mem_alloc.h

    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1655 /*
    1656 Define this macro to 0/1 to disable/enable support for recording functionality,
    1657 available through VmaAllocatorCreateInfo::pRecordSettings.
    1658 */
    1659 #ifndef VMA_RECORDING_ENABLED
    1660  #ifdef _WIN32
    1661  #define VMA_RECORDING_ENABLED 1
    1662  #else
    1663  #define VMA_RECORDING_ENABLED 0
    1664  #endif
    1665 #endif
    1666 
    1667 #ifndef NOMINMAX
    1668  #define NOMINMAX // For windows.h
    1669 #endif
    1670 
    1671 #ifndef VULKAN_H_
    1672  #include <vulkan/vulkan.h>
    1673 #endif
    1674 
    1675 #if VMA_RECORDING_ENABLED
    1676  #include <windows.h>
    1677 #endif
    1678 
    1679 #if !defined(VMA_DEDICATED_ALLOCATION)
    1680  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1681  #define VMA_DEDICATED_ALLOCATION 1
    1682  #else
    1683  #define VMA_DEDICATED_ALLOCATION 0
    1684  #endif
    1685 #endif
    1686 
    1696 VK_DEFINE_HANDLE(VmaAllocator)
    1697 
    1698 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1700  VmaAllocator allocator,
    1701  uint32_t memoryType,
    1702  VkDeviceMemory memory,
    1703  VkDeviceSize size);
    1705 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1706  VmaAllocator allocator,
    1707  uint32_t memoryType,
    1708  VkDeviceMemory memory,
    1709  VkDeviceSize size);
    1710 
    1724 
    1754 
    1757 typedef VkFlags VmaAllocatorCreateFlags;
    1758 
    1763 typedef struct VmaVulkanFunctions {
    1764  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1765  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1766  PFN_vkAllocateMemory vkAllocateMemory;
    1767  PFN_vkFreeMemory vkFreeMemory;
    1768  PFN_vkMapMemory vkMapMemory;
    1769  PFN_vkUnmapMemory vkUnmapMemory;
    1770  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1771  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1772  PFN_vkBindBufferMemory vkBindBufferMemory;
    1773  PFN_vkBindImageMemory vkBindImageMemory;
    1774  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1775  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1776  PFN_vkCreateBuffer vkCreateBuffer;
    1777  PFN_vkDestroyBuffer vkDestroyBuffer;
    1778  PFN_vkCreateImage vkCreateImage;
    1779  PFN_vkDestroyImage vkDestroyImage;
    1780  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1781 #if VMA_DEDICATED_ALLOCATION
    1782  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1783  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1784 #endif
    1786 
    1788 typedef enum VmaRecordFlagBits {
    1795 
    1798 typedef VkFlags VmaRecordFlags;
    1799 
    1801 typedef struct VmaRecordSettings
    1802 {
    1812  const char* pFilePath;
    1814 
    1817 {
    1821 
    1822  VkPhysicalDevice physicalDevice;
    1824 
    1825  VkDevice device;
    1827 
    1830 
    1831  const VkAllocationCallbacks* pAllocationCallbacks;
    1833 
    1873  const VkDeviceSize* pHeapSizeLimit;
    1894 
    1896 VkResult vmaCreateAllocator(
    1897  const VmaAllocatorCreateInfo* pCreateInfo,
    1898  VmaAllocator* pAllocator);
    1899 
    1901 void vmaDestroyAllocator(
    1902  VmaAllocator allocator);
    1903 
    1909  VmaAllocator allocator,
    1910  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1911 
    1917  VmaAllocator allocator,
    1918  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1919 
    1927  VmaAllocator allocator,
    1928  uint32_t memoryTypeIndex,
    1929  VkMemoryPropertyFlags* pFlags);
    1930 
    1940  VmaAllocator allocator,
    1941  uint32_t frameIndex);
    1942 
    1945 typedef struct VmaStatInfo
    1946 {
    1948  uint32_t blockCount;
    1954  VkDeviceSize usedBytes;
    1956  VkDeviceSize unusedBytes;
    1959 } VmaStatInfo;
    1960 
    1962 typedef struct VmaStats
    1963 {
    1964  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1965  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1967 } VmaStats;
    1968 
    1970 void vmaCalculateStats(
    1971  VmaAllocator allocator,
    1972  VmaStats* pStats);
    1973 
    1974 #ifndef VMA_STATS_STRING_ENABLED
    1975 #define VMA_STATS_STRING_ENABLED 1
    1976 #endif
    1977 
    1978 #if VMA_STATS_STRING_ENABLED
    1979 
    1981 
    1983 void vmaBuildStatsString(
    1984  VmaAllocator allocator,
    1985  char** ppStatsString,
    1986  VkBool32 detailedMap);
    1987 
    1988 void vmaFreeStatsString(
    1989  VmaAllocator allocator,
    1990  char* pStatsString);
    1991 
    1992 #endif // #if VMA_STATS_STRING_ENABLED
    1993 
    2002 VK_DEFINE_HANDLE(VmaPool)
    2003 
    2004 typedef enum VmaMemoryUsage
    2005 {
    2054 } VmaMemoryUsage;
    2055 
    2065 
    2126 
    2142 
    2152 
    2159 
    2163 
    2165 {
    2178  VkMemoryPropertyFlags requiredFlags;
    2183  VkMemoryPropertyFlags preferredFlags;
    2191  uint32_t memoryTypeBits;
    2204  void* pUserData;
    2206 
    2223 VkResult vmaFindMemoryTypeIndex(
    2224  VmaAllocator allocator,
    2225  uint32_t memoryTypeBits,
    2226  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2227  uint32_t* pMemoryTypeIndex);
    2228 
    2242  VmaAllocator allocator,
    2243  const VkBufferCreateInfo* pBufferCreateInfo,
    2244  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2245  uint32_t* pMemoryTypeIndex);
    2246 
    2260  VmaAllocator allocator,
    2261  const VkImageCreateInfo* pImageCreateInfo,
    2262  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2263  uint32_t* pMemoryTypeIndex);
    2264 
    2285 
    2302 
    2313 
    2319 
    2322 typedef VkFlags VmaPoolCreateFlags;
    2323 
    2326 typedef struct VmaPoolCreateInfo {
    2341  VkDeviceSize blockSize;
    2370 
    2373 typedef struct VmaPoolStats {
    2376  VkDeviceSize size;
    2379  VkDeviceSize unusedSize;
    2392  VkDeviceSize unusedRangeSizeMax;
    2395  size_t blockCount;
    2396 } VmaPoolStats;
    2397 
    2404 VkResult vmaCreatePool(
    2405  VmaAllocator allocator,
    2406  const VmaPoolCreateInfo* pCreateInfo,
    2407  VmaPool* pPool);
    2408 
    2411 void vmaDestroyPool(
    2412  VmaAllocator allocator,
    2413  VmaPool pool);
    2414 
    2421 void vmaGetPoolStats(
    2422  VmaAllocator allocator,
    2423  VmaPool pool,
    2424  VmaPoolStats* pPoolStats);
    2425 
    2433  VmaAllocator allocator,
    2434  VmaPool pool,
    2435  size_t* pLostAllocationCount);
    2436 
    2451 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2452 
    2477 VK_DEFINE_HANDLE(VmaAllocation)
    2478 
    2479 
    2481 typedef struct VmaAllocationInfo {
    2486  uint32_t memoryType;
    2495  VkDeviceMemory deviceMemory;
    2500  VkDeviceSize offset;
    2505  VkDeviceSize size;
    2519  void* pUserData;
    2521 
    2532 VkResult vmaAllocateMemory(
    2533  VmaAllocator allocator,
    2534  const VkMemoryRequirements* pVkMemoryRequirements,
    2535  const VmaAllocationCreateInfo* pCreateInfo,
    2536  VmaAllocation* pAllocation,
    2537  VmaAllocationInfo* pAllocationInfo);
    2538 
    2558 VkResult vmaAllocateMemoryPages(
    2559  VmaAllocator allocator,
    2560  const VkMemoryRequirements* pVkMemoryRequirements,
    2561  const VmaAllocationCreateInfo* pCreateInfo,
    2562  size_t allocationCount,
    2563  VmaAllocation* pAllocations,
    2564  VmaAllocationInfo* pAllocationInfo);
    2565 
    2573  VmaAllocator allocator,
    2574  VkBuffer buffer,
    2575  const VmaAllocationCreateInfo* pCreateInfo,
    2576  VmaAllocation* pAllocation,
    2577  VmaAllocationInfo* pAllocationInfo);
    2578 
    2580 VkResult vmaAllocateMemoryForImage(
    2581  VmaAllocator allocator,
    2582  VkImage image,
    2583  const VmaAllocationCreateInfo* pCreateInfo,
    2584  VmaAllocation* pAllocation,
    2585  VmaAllocationInfo* pAllocationInfo);
    2586 
    2591 void vmaFreeMemory(
    2592  VmaAllocator allocator,
    2593  VmaAllocation allocation);
    2594 
    2605 void vmaFreeMemoryPages(
    2606  VmaAllocator allocator,
    2607  size_t allocationCount,
    2608  VmaAllocation* pAllocations);
    2609 
    2630 VkResult vmaResizeAllocation(
    2631  VmaAllocator allocator,
    2632  VmaAllocation allocation,
    2633  VkDeviceSize newSize);
    2634 
    2652  VmaAllocator allocator,
    2653  VmaAllocation allocation,
    2654  VmaAllocationInfo* pAllocationInfo);
    2655 
    2670 VkBool32 vmaTouchAllocation(
    2671  VmaAllocator allocator,
    2672  VmaAllocation allocation);
    2673 
    2688  VmaAllocator allocator,
    2689  VmaAllocation allocation,
    2690  void* pUserData);
    2691 
    2703  VmaAllocator allocator,
    2704  VmaAllocation* pAllocation);
    2705 
    2740 VkResult vmaMapMemory(
    2741  VmaAllocator allocator,
    2742  VmaAllocation allocation,
    2743  void** ppData);
    2744 
    2749 void vmaUnmapMemory(
    2750  VmaAllocator allocator,
    2751  VmaAllocation allocation);
    2752 
    2769 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2770 
    2787 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2788 
    2805 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2806 
    2813 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2814 
    2815 typedef enum VmaDefragmentationFlagBits {
    2819 typedef VkFlags VmaDefragmentationFlags;
    2820 
    2825 typedef struct VmaDefragmentationInfo2 {
    2849  uint32_t poolCount;
    2870  VkDeviceSize maxCpuBytesToMove;
    2880  VkDeviceSize maxGpuBytesToMove;
    2894  VkCommandBuffer commandBuffer;
    2896 
    2901 typedef struct VmaDefragmentationInfo {
    2906  VkDeviceSize maxBytesToMove;
    2913 
    2915 typedef struct VmaDefragmentationStats {
    2917  VkDeviceSize bytesMoved;
    2919  VkDeviceSize bytesFreed;
    2925 
    2955 VkResult vmaDefragmentationBegin(
    2956  VmaAllocator allocator,
    2957  const VmaDefragmentationInfo2* pInfo,
    2958  VmaDefragmentationStats* pStats,
    2959  VmaDefragmentationContext *pContext);
    2960 
    2966 VkResult vmaDefragmentationEnd(
    2967  VmaAllocator allocator,
    2968  VmaDefragmentationContext context);
    2969 
    3010 VkResult vmaDefragment(
    3011  VmaAllocator allocator,
    3012  VmaAllocation* pAllocations,
    3013  size_t allocationCount,
    3014  VkBool32* pAllocationsChanged,
    3015  const VmaDefragmentationInfo *pDefragmentationInfo,
    3016  VmaDefragmentationStats* pDefragmentationStats);
    3017 
    3030 VkResult vmaBindBufferMemory(
    3031  VmaAllocator allocator,
    3032  VmaAllocation allocation,
    3033  VkBuffer buffer);
    3034 
    3047 VkResult vmaBindImageMemory(
    3048  VmaAllocator allocator,
    3049  VmaAllocation allocation,
    3050  VkImage image);
    3051 
    3078 VkResult vmaCreateBuffer(
    3079  VmaAllocator allocator,
    3080  const VkBufferCreateInfo* pBufferCreateInfo,
    3081  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3082  VkBuffer* pBuffer,
    3083  VmaAllocation* pAllocation,
    3084  VmaAllocationInfo* pAllocationInfo);
    3085 
    3097 void vmaDestroyBuffer(
    3098  VmaAllocator allocator,
    3099  VkBuffer buffer,
    3100  VmaAllocation allocation);
    3101 
    3103 VkResult vmaCreateImage(
    3104  VmaAllocator allocator,
    3105  const VkImageCreateInfo* pImageCreateInfo,
    3106  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3107  VkImage* pImage,
    3108  VmaAllocation* pAllocation,
    3109  VmaAllocationInfo* pAllocationInfo);
    3110 
    3122 void vmaDestroyImage(
    3123  VmaAllocator allocator,
    3124  VkImage image,
    3125  VmaAllocation allocation);
    3126 
    3127 #ifdef __cplusplus
    3128 }
    3129 #endif
    3130 
    3131 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3132 
    3133 // For Visual Studio IntelliSense.
    3134 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3135 #define VMA_IMPLEMENTATION
    3136 #endif
    3137 
    3138 #ifdef VMA_IMPLEMENTATION
    3139 #undef VMA_IMPLEMENTATION
    3140 
    3141 #include <cstdint>
    3142 #include <cstdlib>
    3143 #include <cstring>
    3144 
    3145 /*******************************************************************************
    3146 CONFIGURATION SECTION
    3147 
    3148 Define some of these macros before each #include of this header or change them
    3149 here if you need other then default behavior depending on your environment.
    3150 */
    3151 
    3152 /*
    3153 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3154 internally, like:
    3155 
    3156  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3157 
    3158 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3159 VmaAllocatorCreateInfo::pVulkanFunctions.
    3160 */
    3161 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3162 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3163 #endif
    3164 
    3165 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3166 //#define VMA_USE_STL_CONTAINERS 1
    3167 
    3168 /* Set this macro to 1 to make the library including and using STL containers:
    3169 std::pair, std::vector, std::list, std::unordered_map.
    3170 
    3171 Set it to 0 or undefined to make the library using its own implementation of
    3172 the containers.
    3173 */
    3174 #if VMA_USE_STL_CONTAINERS
    3175  #define VMA_USE_STL_VECTOR 1
    3176  #define VMA_USE_STL_UNORDERED_MAP 1
    3177  #define VMA_USE_STL_LIST 1
    3178 #endif
    3179 
    3180 #ifndef VMA_USE_STL_SHARED_MUTEX
    3181  // Compiler conforms to C++17.
    3182  #if __cplusplus >= 201703L
    3183  #define VMA_USE_STL_SHARED_MUTEX 1
    3184  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3185  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3186  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3187  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3188  #define VMA_USE_STL_SHARED_MUTEX 1
    3189  #else
    3190  #define VMA_USE_STL_SHARED_MUTEX 0
    3191  #endif
    3192 #endif
    3193 
    3194 /*
    3195 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
    3196 Library has its own container implementation.
    3197 */
    3198 #if VMA_USE_STL_VECTOR
    3199  #include <vector>
    3200 #endif
    3201 
    3202 #if VMA_USE_STL_UNORDERED_MAP
    3203  #include <unordered_map>
    3204 #endif
    3205 
    3206 #if VMA_USE_STL_LIST
    3207  #include <list>
    3208 #endif
    3209 
    3210 /*
    3211 Following headers are used in this CONFIGURATION section only, so feel free to
    3212 remove them if not needed.
    3213 */
    3214 #include <cassert> // for assert
    3215 #include <algorithm> // for min, max
    3216 #include <mutex>
    3217 
    3218 #ifndef VMA_NULL
    3219  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3220  #define VMA_NULL nullptr
    3221 #endif
    3222 
    3223 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3224 #include <cstdlib>
    3225 void *aligned_alloc(size_t alignment, size_t size)
    3226 {
    3227  // alignment must be >= sizeof(void*)
    3228  if(alignment < sizeof(void*))
    3229  {
    3230  alignment = sizeof(void*);
    3231  }
    3232 
    3233  return memalign(alignment, size);
    3234 }
    3235 #elif defined(__APPLE__) || defined(__ANDROID__)
    3236 #include <cstdlib>
    3237 void *aligned_alloc(size_t alignment, size_t size)
    3238 {
    3239  // alignment must be >= sizeof(void*)
    3240  if(alignment < sizeof(void*))
    3241  {
    3242  alignment = sizeof(void*);
    3243  }
    3244 
    3245  void *pointer;
    3246  if(posix_memalign(&pointer, alignment, size) == 0)
    3247  return pointer;
    3248  return VMA_NULL;
    3249 }
    3250 #endif
    3251 
    3252 // If your compiler is not compatible with C++11 and definition of
    3253 // aligned_alloc() function is missing, uncommeting following line may help:
    3254 
    3255 //#include <malloc.h>
    3256 
    3257 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3258 #ifndef VMA_ASSERT
    3259  #ifdef _DEBUG
    3260  #define VMA_ASSERT(expr) assert(expr)
    3261  #else
    3262  #define VMA_ASSERT(expr)
    3263  #endif
    3264 #endif
    3265 
    3266 // Assert that will be called very often, like inside data structures e.g. operator[].
    3267 // Making it non-empty can make program slow.
    3268 #ifndef VMA_HEAVY_ASSERT
    3269  #ifdef _DEBUG
    3270  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3271  #else
    3272  #define VMA_HEAVY_ASSERT(expr)
    3273  #endif
    3274 #endif
    3275 
    3276 #ifndef VMA_ALIGN_OF
    3277  #define VMA_ALIGN_OF(type) (__alignof(type))
    3278 #endif
    3279 
    3280 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3281  #if defined(_WIN32)
    3282  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3283  #else
    3284  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3285  #endif
    3286 #endif
    3287 
    3288 #ifndef VMA_SYSTEM_FREE
    3289  #if defined(_WIN32)
    3290  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3291  #else
    3292  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3293  #endif
    3294 #endif
    3295 
    3296 #ifndef VMA_MIN
    3297  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3298 #endif
    3299 
    3300 #ifndef VMA_MAX
    3301  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3302 #endif
    3303 
    3304 #ifndef VMA_SWAP
    3305  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3306 #endif
    3307 
    3308 #ifndef VMA_SORT
    3309  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3310 #endif
    3311 
    3312 #ifndef VMA_DEBUG_LOG
    3313  #define VMA_DEBUG_LOG(format, ...)
    3314  /*
    3315  #define VMA_DEBUG_LOG(format, ...) do { \
    3316  printf(format, __VA_ARGS__); \
    3317  printf("\n"); \
    3318  } while(false)
    3319  */
    3320 #endif
    3321 
    3322 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3323 #if VMA_STATS_STRING_ENABLED
    3324  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3325  {
    3326  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3327  }
    3328  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3329  {
    3330  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3331  }
    3332  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3333  {
    3334  snprintf(outStr, strLen, "%p", ptr);
    3335  }
    3336 #endif
    3337 
    3338 #ifndef VMA_MUTEX
    3339  class VmaMutex
    3340  {
    3341  public:
    3342  void Lock() { m_Mutex.lock(); }
    3343  void Unlock() { m_Mutex.unlock(); }
    3344  private:
    3345  std::mutex m_Mutex;
    3346  };
    3347  #define VMA_MUTEX VmaMutex
    3348 #endif
    3349 
    3350 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3351 #ifndef VMA_RW_MUTEX
    3352  #if VMA_USE_STL_SHARED_MUTEX
    3353  // Use std::shared_mutex from C++17.
    3354  #include <shared_mutex>
    3355  class VmaRWMutex
    3356  {
    3357  public:
    3358  void LockRead() { m_Mutex.lock_shared(); }
    3359  void UnlockRead() { m_Mutex.unlock_shared(); }
    3360  void LockWrite() { m_Mutex.lock(); }
    3361  void UnlockWrite() { m_Mutex.unlock(); }
    3362  private:
    3363  std::shared_mutex m_Mutex;
    3364  };
    3365  #define VMA_RW_MUTEX VmaRWMutex
    3366  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3367  // Use SRWLOCK from WinAPI.
    3368  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3369  class VmaRWMutex
    3370  {
    3371  public:
    3372  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3373  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3374  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3375  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3376  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3377  private:
    3378  SRWLOCK m_Lock;
    3379  };
    3380  #define VMA_RW_MUTEX VmaRWMutex
    3381  #else
    3382  // Less efficient fallback: Use normal mutex.
    3383  class VmaRWMutex
    3384  {
    3385  public:
    3386  void LockRead() { m_Mutex.Lock(); }
    3387  void UnlockRead() { m_Mutex.Unlock(); }
    3388  void LockWrite() { m_Mutex.Lock(); }
    3389  void UnlockWrite() { m_Mutex.Unlock(); }
    3390  private:
    3391  VMA_MUTEX m_Mutex;
    3392  };
    3393  #define VMA_RW_MUTEX VmaRWMutex
    3394  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3395 #endif // #ifndef VMA_RW_MUTEX
    3396 
    3397 /*
    3398 If providing your own implementation, you need to implement a subset of std::atomic:
    3399 
    3400 - Constructor(uint32_t desired)
    3401 - uint32_t load() const
    3402 - void store(uint32_t desired)
    3403 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3404 */
    3405 #ifndef VMA_ATOMIC_UINT32
    3406  #include <atomic>
    3407  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3408 #endif
    3409 
    3410 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3411 
    3415  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3416 #endif
    3417 
    3418 #ifndef VMA_DEBUG_ALIGNMENT
    3419 
    3423  #define VMA_DEBUG_ALIGNMENT (1)
    3424 #endif
    3425 
    3426 #ifndef VMA_DEBUG_MARGIN
    3427 
    3431  #define VMA_DEBUG_MARGIN (0)
    3432 #endif
    3433 
    3434 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3435 
    3439  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3440 #endif
    3441 
    3442 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3443 
    3448  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3449 #endif
    3450 
    3451 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3452 
    3456  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3457 #endif
    3458 
    3459 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3460 
    3464  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3465 #endif
    3466 
    3467 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3468  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3470 #endif
    3471 
    3472 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3473  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3475 #endif
    3476 
    3477 #ifndef VMA_CLASS_NO_COPY
    3478  #define VMA_CLASS_NO_COPY(className) \
    3479  private: \
    3480  className(const className&) = delete; \
    3481  className& operator=(const className&) = delete;
    3482 #endif
    3483 
    3484 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3485 
    3486 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3487 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3488 
    3489 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3490 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3491 
    3492 /*******************************************************************************
    3493 END OF CONFIGURATION
    3494 */
    3495 
    3496 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3497 
    3498 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3499  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3500 
    3501 // Returns number of bits set to 1 in (v).
    3502 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3503 {
    3504  uint32_t c = v - ((v >> 1) & 0x55555555);
    3505  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3506  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3507  c = ((c >> 8) + c) & 0x00FF00FF;
    3508  c = ((c >> 16) + c) & 0x0000FFFF;
    3509  return c;
    3510 }
    3511 
    3512 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3513 // Use types like uint32_t, uint64_t as T.
    3514 template <typename T>
    3515 static inline T VmaAlignUp(T val, T align)
    3516 {
    3517  return (val + align - 1) / align * align;
    3518 }
    3519 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3520 // Use types like uint32_t, uint64_t as T.
    3521 template <typename T>
    3522 static inline T VmaAlignDown(T val, T align)
    3523 {
    3524  return val / align * align;
    3525 }
    3526 
    3527 // Division with mathematical rounding to nearest number.
    3528 template <typename T>
    3529 static inline T VmaRoundDiv(T x, T y)
    3530 {
    3531  return (x + (y / (T)2)) / y;
    3532 }
    3533 
    3534 /*
    3535 Returns true if given number is a power of two.
    3536 T must be unsigned integer number or signed integer but always nonnegative.
    3537 For 0 returns true.
    3538 */
    3539 template <typename T>
    3540 inline bool VmaIsPow2(T x)
    3541 {
    3542  return (x & (x-1)) == 0;
    3543 }
    3544 
    3545 // Returns smallest power of 2 greater or equal to v.
    3546 static inline uint32_t VmaNextPow2(uint32_t v)
    3547 {
    3548  v--;
    3549  v |= v >> 1;
    3550  v |= v >> 2;
    3551  v |= v >> 4;
    3552  v |= v >> 8;
    3553  v |= v >> 16;
    3554  v++;
    3555  return v;
    3556 }
    3557 static inline uint64_t VmaNextPow2(uint64_t v)
    3558 {
    3559  v--;
    3560  v |= v >> 1;
    3561  v |= v >> 2;
    3562  v |= v >> 4;
    3563  v |= v >> 8;
    3564  v |= v >> 16;
    3565  v |= v >> 32;
    3566  v++;
    3567  return v;
    3568 }
    3569 
    3570 // Returns largest power of 2 less or equal to v.
    3571 static inline uint32_t VmaPrevPow2(uint32_t v)
    3572 {
    3573  v |= v >> 1;
    3574  v |= v >> 2;
    3575  v |= v >> 4;
    3576  v |= v >> 8;
    3577  v |= v >> 16;
    3578  v = v ^ (v >> 1);
    3579  return v;
    3580 }
    3581 static inline uint64_t VmaPrevPow2(uint64_t v)
    3582 {
    3583  v |= v >> 1;
    3584  v |= v >> 2;
    3585  v |= v >> 4;
    3586  v |= v >> 8;
    3587  v |= v >> 16;
    3588  v |= v >> 32;
    3589  v = v ^ (v >> 1);
    3590  return v;
    3591 }
    3592 
    3593 static inline bool VmaStrIsEmpty(const char* pStr)
    3594 {
    3595  return pStr == VMA_NULL || *pStr == '\0';
    3596 }
    3597 
    3598 #if VMA_STATS_STRING_ENABLED
    3599 
    3600 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3601 {
    3602  switch(algorithm)
    3603  {
    3605  return "Linear";
    3607  return "Buddy";
    3608  case 0:
    3609  return "Default";
    3610  default:
    3611  VMA_ASSERT(0);
    3612  return "";
    3613  }
    3614 }
    3615 
    3616 #endif // #if VMA_STATS_STRING_ENABLED
    3617 
    3618 #ifndef VMA_SORT
    3619 
    3620 template<typename Iterator, typename Compare>
    3621 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3622 {
    3623  Iterator centerValue = end; --centerValue;
    3624  Iterator insertIndex = beg;
    3625  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3626  {
    3627  if(cmp(*memTypeIndex, *centerValue))
    3628  {
    3629  if(insertIndex != memTypeIndex)
    3630  {
    3631  VMA_SWAP(*memTypeIndex, *insertIndex);
    3632  }
    3633  ++insertIndex;
    3634  }
    3635  }
    3636  if(insertIndex != centerValue)
    3637  {
    3638  VMA_SWAP(*insertIndex, *centerValue);
    3639  }
    3640  return insertIndex;
    3641 }
    3642 
    3643 template<typename Iterator, typename Compare>
    3644 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3645 {
    3646  if(beg < end)
    3647  {
    3648  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3649  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3650  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3651  }
    3652 }
    3653 
    3654 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3655 
    3656 #endif // #ifndef VMA_SORT
    3657 
    3658 /*
    3659 Returns true if two memory blocks occupy overlapping pages.
    3660 ResourceA must be in less memory offset than ResourceB.
    3661 
    3662 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3663 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3664 */
    3665 static inline bool VmaBlocksOnSamePage(
    3666  VkDeviceSize resourceAOffset,
    3667  VkDeviceSize resourceASize,
    3668  VkDeviceSize resourceBOffset,
    3669  VkDeviceSize pageSize)
    3670 {
    3671  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3672  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3673  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3674  VkDeviceSize resourceBStart = resourceBOffset;
    3675  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3676  return resourceAEndPage == resourceBStartPage;
    3677 }
    3678 
    3679 enum VmaSuballocationType
    3680 {
    3681  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3682  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3683  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3684  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3685  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3686  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3687  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3688 };
    3689 
    3690 /*
    3691 Returns true if given suballocation types could conflict and must respect
    3692 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3693 or linear image and another one is optimal image. If type is unknown, behave
    3694 conservatively.
    3695 */
    3696 static inline bool VmaIsBufferImageGranularityConflict(
    3697  VmaSuballocationType suballocType1,
    3698  VmaSuballocationType suballocType2)
    3699 {
    3700  if(suballocType1 > suballocType2)
    3701  {
    3702  VMA_SWAP(suballocType1, suballocType2);
    3703  }
    3704 
    3705  switch(suballocType1)
    3706  {
    3707  case VMA_SUBALLOCATION_TYPE_FREE:
    3708  return false;
    3709  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3710  return true;
    3711  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3712  return
    3713  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3714  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3715  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3716  return
    3717  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3718  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3719  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3720  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3721  return
    3722  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3723  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3724  return false;
    3725  default:
    3726  VMA_ASSERT(0);
    3727  return true;
    3728  }
    3729 }
    3730 
    3731 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3732 {
    3733 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3734  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3735  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3736  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3737  {
    3738  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3739  }
    3740 #else
    3741  // no-op
    3742 #endif
    3743 }
    3744 
    3745 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3746 {
    3747 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3748  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3749  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3750  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3751  {
    3752  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3753  {
    3754  return false;
    3755  }
    3756  }
    3757 #endif
    3758  return true;
    3759 }
    3760 
    3761 /*
    3762 Fills structure with parameters of an example buffer to be used for transfers
    3763 during GPU memory defragmentation.
    3764 */
    3765 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
    3766 {
    3767  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
    3768  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    3769  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    3770  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
    3771 }
    3772 
    3773 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3774 struct VmaMutexLock
    3775 {
    3776  VMA_CLASS_NO_COPY(VmaMutexLock)
    3777 public:
    3778  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3779  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3780  { if(m_pMutex) { m_pMutex->Lock(); } }
    3781  ~VmaMutexLock()
    3782  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3783 private:
    3784  VMA_MUTEX* m_pMutex;
    3785 };
    3786 
    3787 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3788 struct VmaMutexLockRead
    3789 {
    3790  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3791 public:
    3792  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3793  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3794  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3795  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3796 private:
    3797  VMA_RW_MUTEX* m_pMutex;
    3798 };
    3799 
    3800 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3801 struct VmaMutexLockWrite
    3802 {
    3803  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3804 public:
    3805  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3806  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3807  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3808  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3809 private:
    3810  VMA_RW_MUTEX* m_pMutex;
    3811 };
    3812 
    3813 #if VMA_DEBUG_GLOBAL_MUTEX
    3814  static VMA_MUTEX gDebugGlobalMutex;
    3815  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3816 #else
    3817  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3818 #endif
    3819 
    3820 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3821 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3822 
    3823 /*
    3824 Performs binary search and returns iterator to first element that is greater or
    3825 equal to (key), according to comparison (cmp).
    3826 
    3827 Cmp should return true if first argument is less than second argument.
    3828 
    3829 Returned value is the found element, if present in the collection or place where
    3830 new element with value (key) should be inserted.
    3831 */
    3832 template <typename CmpLess, typename IterT, typename KeyT>
    3833 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
    3834 {
    3835  size_t down = 0, up = (end - beg);
    3836  while(down < up)
    3837  {
    3838  const size_t mid = (down + up) / 2;
    3839  if(cmp(*(beg+mid), key))
    3840  {
    3841  down = mid + 1;
    3842  }
    3843  else
    3844  {
    3845  up = mid;
    3846  }
    3847  }
    3848  return beg + down;
    3849 }
    3850 
    3851 template<typename CmpLess, typename IterT, typename KeyT>
    3852 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
    3853 {
    3854  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3855  beg, end, value, cmp);
    3856  if(it == end ||
    3857  (!cmp(*it, value) && !cmp(value, *it)))
    3858  {
    3859  return it;
    3860  }
    3861  return end;
    3862 }
    3863 
    3864 /*
    3865 Returns true if all pointers in the array are not-null and unique.
    3866 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3867 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3868 */
    3869 template<typename T>
    3870 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3871 {
    3872  for(uint32_t i = 0; i < count; ++i)
    3873  {
    3874  const T iPtr = arr[i];
    3875  if(iPtr == VMA_NULL)
    3876  {
    3877  return false;
    3878  }
    3879  for(uint32_t j = i + 1; j < count; ++j)
    3880  {
    3881  if(iPtr == arr[j])
    3882  {
    3883  return false;
    3884  }
    3885  }
    3886  }
    3887  return true;
    3888 }
    3889 
    3891 // Memory allocation
    3892 
    3893 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3894 {
    3895  if((pAllocationCallbacks != VMA_NULL) &&
    3896  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3897  {
    3898  return (*pAllocationCallbacks->pfnAllocation)(
    3899  pAllocationCallbacks->pUserData,
    3900  size,
    3901  alignment,
    3902  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3903  }
    3904  else
    3905  {
    3906  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3907  }
    3908 }
    3909 
    3910 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3911 {
    3912  if((pAllocationCallbacks != VMA_NULL) &&
    3913  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3914  {
    3915  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3916  }
    3917  else
    3918  {
    3919  VMA_SYSTEM_FREE(ptr);
    3920  }
    3921 }
    3922 
    3923 template<typename T>
    3924 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3925 {
    3926  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3927 }
    3928 
    3929 template<typename T>
    3930 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3931 {
    3932  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3933 }
    3934 
    3935 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3936 
    3937 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3938 
    3939 template<typename T>
    3940 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3941 {
    3942  ptr->~T();
    3943  VmaFree(pAllocationCallbacks, ptr);
    3944 }
    3945 
    3946 template<typename T>
    3947 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3948 {
    3949  if(ptr != VMA_NULL)
    3950  {
    3951  for(size_t i = count; i--; )
    3952  {
    3953  ptr[i].~T();
    3954  }
    3955  VmaFree(pAllocationCallbacks, ptr);
    3956  }
    3957 }
    3958 
    3959 // STL-compatible allocator.
    3960 template<typename T>
    3961 class VmaStlAllocator
    3962 {
    3963 public:
    3964  const VkAllocationCallbacks* const m_pCallbacks;
    3965  typedef T value_type;
    3966 
    3967  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3968  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3969 
    3970  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3971  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3972 
    3973  template<typename U>
    3974  bool operator==(const VmaStlAllocator<U>& rhs) const
    3975  {
    3976  return m_pCallbacks == rhs.m_pCallbacks;
    3977  }
    3978  template<typename U>
    3979  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3980  {
    3981  return m_pCallbacks != rhs.m_pCallbacks;
    3982  }
    3983 
    3984  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3985 };
    3986 
    3987 #if VMA_USE_STL_VECTOR
    3988 
    3989 #define VmaVector std::vector
    3990 
    3991 template<typename T, typename allocatorT>
    3992 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3993 {
    3994  vec.insert(vec.begin() + index, item);
    3995 }
    3996 
    3997 template<typename T, typename allocatorT>
    3998 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3999 {
    4000  vec.erase(vec.begin() + index);
    4001 }
    4002 
    4003 #else // #if VMA_USE_STL_VECTOR
    4004 
    4005 /* Class with interface compatible with subset of std::vector.
    4006 T must be POD because constructors and destructors are not called and memcpy is
    4007 used for these objects. */
    4008 template<typename T, typename AllocatorT>
    4009 class VmaVector
    4010 {
    4011 public:
    4012  typedef T value_type;
    4013 
    4014  VmaVector(const AllocatorT& allocator) :
    4015  m_Allocator(allocator),
    4016  m_pArray(VMA_NULL),
    4017  m_Count(0),
    4018  m_Capacity(0)
    4019  {
    4020  }
    4021 
    4022  VmaVector(size_t count, const AllocatorT& allocator) :
    4023  m_Allocator(allocator),
    4024  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    4025  m_Count(count),
    4026  m_Capacity(count)
    4027  {
    4028  }
    4029 
    4030  VmaVector(const VmaVector<T, AllocatorT>& src) :
    4031  m_Allocator(src.m_Allocator),
    4032  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    4033  m_Count(src.m_Count),
    4034  m_Capacity(src.m_Count)
    4035  {
    4036  if(m_Count != 0)
    4037  {
    4038  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    4039  }
    4040  }
    4041 
    4042  ~VmaVector()
    4043  {
    4044  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4045  }
    4046 
    4047  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    4048  {
    4049  if(&rhs != this)
    4050  {
    4051  resize(rhs.m_Count);
    4052  if(m_Count != 0)
    4053  {
    4054  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4055  }
    4056  }
    4057  return *this;
    4058  }
    4059 
    4060  bool empty() const { return m_Count == 0; }
    4061  size_t size() const { return m_Count; }
    4062  T* data() { return m_pArray; }
    4063  const T* data() const { return m_pArray; }
    4064 
    4065  T& operator[](size_t index)
    4066  {
    4067  VMA_HEAVY_ASSERT(index < m_Count);
    4068  return m_pArray[index];
    4069  }
    4070  const T& operator[](size_t index) const
    4071  {
    4072  VMA_HEAVY_ASSERT(index < m_Count);
    4073  return m_pArray[index];
    4074  }
    4075 
    4076  T& front()
    4077  {
    4078  VMA_HEAVY_ASSERT(m_Count > 0);
    4079  return m_pArray[0];
    4080  }
    4081  const T& front() const
    4082  {
    4083  VMA_HEAVY_ASSERT(m_Count > 0);
    4084  return m_pArray[0];
    4085  }
    4086  T& back()
    4087  {
    4088  VMA_HEAVY_ASSERT(m_Count > 0);
    4089  return m_pArray[m_Count - 1];
    4090  }
    4091  const T& back() const
    4092  {
    4093  VMA_HEAVY_ASSERT(m_Count > 0);
    4094  return m_pArray[m_Count - 1];
    4095  }
    4096 
    4097  void reserve(size_t newCapacity, bool freeMemory = false)
    4098  {
    4099  newCapacity = VMA_MAX(newCapacity, m_Count);
    4100 
    4101  if((newCapacity < m_Capacity) && !freeMemory)
    4102  {
    4103  newCapacity = m_Capacity;
    4104  }
    4105 
    4106  if(newCapacity != m_Capacity)
    4107  {
    4108  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4109  if(m_Count != 0)
    4110  {
    4111  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4112  }
    4113  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4114  m_Capacity = newCapacity;
    4115  m_pArray = newArray;
    4116  }
    4117  }
    4118 
    4119  void resize(size_t newCount, bool freeMemory = false)
    4120  {
    4121  size_t newCapacity = m_Capacity;
    4122  if(newCount > m_Capacity)
    4123  {
    4124  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4125  }
    4126  else if(freeMemory)
    4127  {
    4128  newCapacity = newCount;
    4129  }
    4130 
    4131  if(newCapacity != m_Capacity)
    4132  {
    4133  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4134  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4135  if(elementsToCopy != 0)
    4136  {
    4137  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4138  }
    4139  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4140  m_Capacity = newCapacity;
    4141  m_pArray = newArray;
    4142  }
    4143 
    4144  m_Count = newCount;
    4145  }
    4146 
    4147  void clear(bool freeMemory = false)
    4148  {
    4149  resize(0, freeMemory);
    4150  }
    4151 
    4152  void insert(size_t index, const T& src)
    4153  {
    4154  VMA_HEAVY_ASSERT(index <= m_Count);
    4155  const size_t oldCount = size();
    4156  resize(oldCount + 1);
    4157  if(index < oldCount)
    4158  {
    4159  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4160  }
    4161  m_pArray[index] = src;
    4162  }
    4163 
    4164  void remove(size_t index)
    4165  {
    4166  VMA_HEAVY_ASSERT(index < m_Count);
    4167  const size_t oldCount = size();
    4168  if(index < oldCount - 1)
    4169  {
    4170  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4171  }
    4172  resize(oldCount - 1);
    4173  }
    4174 
    4175  void push_back(const T& src)
    4176  {
    4177  const size_t newIndex = size();
    4178  resize(newIndex + 1);
    4179  m_pArray[newIndex] = src;
    4180  }
    4181 
    4182  void pop_back()
    4183  {
    4184  VMA_HEAVY_ASSERT(m_Count > 0);
    4185  resize(size() - 1);
    4186  }
    4187 
    4188  void push_front(const T& src)
    4189  {
    4190  insert(0, src);
    4191  }
    4192 
    4193  void pop_front()
    4194  {
    4195  VMA_HEAVY_ASSERT(m_Count > 0);
    4196  remove(0);
    4197  }
    4198 
    4199  typedef T* iterator;
    4200 
    4201  iterator begin() { return m_pArray; }
    4202  iterator end() { return m_pArray + m_Count; }
    4203 
    4204 private:
    4205  AllocatorT m_Allocator;
    4206  T* m_pArray;
    4207  size_t m_Count;
    4208  size_t m_Capacity;
    4209 };
    4210 
    4211 template<typename T, typename allocatorT>
    4212 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4213 {
    4214  vec.insert(index, item);
    4215 }
    4216 
    4217 template<typename T, typename allocatorT>
    4218 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4219 {
    4220  vec.remove(index);
    4221 }
    4222 
    4223 #endif // #if VMA_USE_STL_VECTOR
    4224 
    4225 template<typename CmpLess, typename VectorT>
    4226 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4227 {
    4228  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4229  vector.data(),
    4230  vector.data() + vector.size(),
    4231  value,
    4232  CmpLess()) - vector.data();
    4233  VmaVectorInsert(vector, indexToInsert, value);
    4234  return indexToInsert;
    4235 }
    4236 
    4237 template<typename CmpLess, typename VectorT>
    4238 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4239 {
    4240  CmpLess comparator;
    4241  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4242  vector.begin(),
    4243  vector.end(),
    4244  value,
    4245  comparator);
    4246  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4247  {
    4248  size_t indexToRemove = it - vector.begin();
    4249  VmaVectorRemove(vector, indexToRemove);
    4250  return true;
    4251  }
    4252  return false;
    4253 }
    4254 
    4256 // class VmaPoolAllocator
    4257 
    4258 /*
    4259 Allocator for objects of type T using a list of arrays (pools) to speed up
    4260 allocation. Number of elements that can be allocated is not bounded because
    4261 allocator can create multiple blocks.
    4262 */
    4263 template<typename T>
    4264 class VmaPoolAllocator
    4265 {
    4266  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4267 public:
    4268  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4269  ~VmaPoolAllocator();
    4270  void Clear();
    4271  T* Alloc();
    4272  void Free(T* ptr);
    4273 
    4274 private:
    4275  union Item
    4276  {
    4277  uint32_t NextFreeIndex;
    4278  T Value;
    4279  };
    4280 
    4281  struct ItemBlock
    4282  {
    4283  Item* pItems;
    4284  uint32_t Capacity;
    4285  uint32_t FirstFreeIndex;
    4286  };
    4287 
    4288  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4289  const uint32_t m_FirstBlockCapacity;
    4290  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4291 
    4292  ItemBlock& CreateNewBlock();
    4293 };
    4294 
    4295 template<typename T>
    4296 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4297  m_pAllocationCallbacks(pAllocationCallbacks),
    4298  m_FirstBlockCapacity(firstBlockCapacity),
    4299  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4300 {
    4301  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4302 }
    4303 
    4304 template<typename T>
    4305 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4306 {
    4307  Clear();
    4308 }
    4309 
    4310 template<typename T>
    4311 void VmaPoolAllocator<T>::Clear()
    4312 {
    4313  for(size_t i = m_ItemBlocks.size(); i--; )
    4314  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4315  m_ItemBlocks.clear();
    4316 }
    4317 
    4318 template<typename T>
    4319 T* VmaPoolAllocator<T>::Alloc()
    4320 {
    4321  for(size_t i = m_ItemBlocks.size(); i--; )
    4322  {
    4323  ItemBlock& block = m_ItemBlocks[i];
    4324  // This block has some free items: Use first one.
    4325  if(block.FirstFreeIndex != UINT32_MAX)
    4326  {
    4327  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4328  block.FirstFreeIndex = pItem->NextFreeIndex;
    4329  return &pItem->Value;
    4330  }
    4331  }
    4332 
    4333  // No block has free item: Create new one and use it.
    4334  ItemBlock& newBlock = CreateNewBlock();
    4335  Item* const pItem = &newBlock.pItems[0];
    4336  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4337  return &pItem->Value;
    4338 }
    4339 
    4340 template<typename T>
    4341 void VmaPoolAllocator<T>::Free(T* ptr)
    4342 {
    4343  // Search all memory blocks to find ptr.
    4344  for(size_t i = m_ItemBlocks.size(); i--; )
    4345  {
    4346  ItemBlock& block = m_ItemBlocks[i];
    4347 
    4348  // Casting to union.
    4349  Item* pItemPtr;
    4350  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4351 
    4352  // Check if pItemPtr is in address range of this block.
    4353  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4354  {
    4355  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4356  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4357  block.FirstFreeIndex = index;
    4358  return;
    4359  }
    4360  }
    4361  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4362 }
    4363 
    4364 template<typename T>
    4365 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4366 {
    4367  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4368  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4369 
    4370  const ItemBlock newBlock = {
    4371  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4372  newBlockCapacity,
    4373  0 };
    4374 
    4375  m_ItemBlocks.push_back(newBlock);
    4376 
    4377  // Setup singly-linked list of all free items in this block.
    4378  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4379  newBlock.pItems[i].NextFreeIndex = i + 1;
    4380  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4381  return m_ItemBlocks.back();
    4382 }
    4383 
    4385 // class VmaRawList, VmaList
    4386 
    4387 #if VMA_USE_STL_LIST
    4388 
    4389 #define VmaList std::list
    4390 
    4391 #else // #if VMA_USE_STL_LIST
    4392 
    4393 template<typename T>
    4394 struct VmaListItem
    4395 {
    4396  VmaListItem* pPrev;
    4397  VmaListItem* pNext;
    4398  T Value;
    4399 };
    4400 
    4401 // Doubly linked list.
    4402 template<typename T>
    4403 class VmaRawList
    4404 {
    4405  VMA_CLASS_NO_COPY(VmaRawList)
    4406 public:
    4407  typedef VmaListItem<T> ItemType;
    4408 
    4409  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4410  ~VmaRawList();
    4411  void Clear();
    4412 
    4413  size_t GetCount() const { return m_Count; }
    4414  bool IsEmpty() const { return m_Count == 0; }
    4415 
    4416  ItemType* Front() { return m_pFront; }
    4417  const ItemType* Front() const { return m_pFront; }
    4418  ItemType* Back() { return m_pBack; }
    4419  const ItemType* Back() const { return m_pBack; }
    4420 
    4421  ItemType* PushBack();
    4422  ItemType* PushFront();
    4423  ItemType* PushBack(const T& value);
    4424  ItemType* PushFront(const T& value);
    4425  void PopBack();
    4426  void PopFront();
    4427 
    4428  // Item can be null - it means PushBack.
    4429  ItemType* InsertBefore(ItemType* pItem);
    4430  // Item can be null - it means PushFront.
    4431  ItemType* InsertAfter(ItemType* pItem);
    4432 
    4433  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4434  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4435 
    4436  void Remove(ItemType* pItem);
    4437 
    4438 private:
    4439  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4440  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4441  ItemType* m_pFront;
    4442  ItemType* m_pBack;
    4443  size_t m_Count;
    4444 };
    4445 
    4446 template<typename T>
    4447 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4448  m_pAllocationCallbacks(pAllocationCallbacks),
    4449  m_ItemAllocator(pAllocationCallbacks, 128),
    4450  m_pFront(VMA_NULL),
    4451  m_pBack(VMA_NULL),
    4452  m_Count(0)
    4453 {
    4454 }
    4455 
    4456 template<typename T>
    4457 VmaRawList<T>::~VmaRawList()
    4458 {
    4459  // Intentionally not calling Clear, because that would be unnecessary
    4460  // computations to return all items to m_ItemAllocator as free.
    4461 }
    4462 
    4463 template<typename T>
    4464 void VmaRawList<T>::Clear()
    4465 {
    4466  if(IsEmpty() == false)
    4467  {
    4468  ItemType* pItem = m_pBack;
    4469  while(pItem != VMA_NULL)
    4470  {
    4471  ItemType* const pPrevItem = pItem->pPrev;
    4472  m_ItemAllocator.Free(pItem);
    4473  pItem = pPrevItem;
    4474  }
    4475  m_pFront = VMA_NULL;
    4476  m_pBack = VMA_NULL;
    4477  m_Count = 0;
    4478  }
    4479 }
    4480 
    4481 template<typename T>
    4482 VmaListItem<T>* VmaRawList<T>::PushBack()
    4483 {
    4484  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4485  pNewItem->pNext = VMA_NULL;
    4486  if(IsEmpty())
    4487  {
    4488  pNewItem->pPrev = VMA_NULL;
    4489  m_pFront = pNewItem;
    4490  m_pBack = pNewItem;
    4491  m_Count = 1;
    4492  }
    4493  else
    4494  {
    4495  pNewItem->pPrev = m_pBack;
    4496  m_pBack->pNext = pNewItem;
    4497  m_pBack = pNewItem;
    4498  ++m_Count;
    4499  }
    4500  return pNewItem;
    4501 }
    4502 
    4503 template<typename T>
    4504 VmaListItem<T>* VmaRawList<T>::PushFront()
    4505 {
    4506  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4507  pNewItem->pPrev = VMA_NULL;
    4508  if(IsEmpty())
    4509  {
    4510  pNewItem->pNext = VMA_NULL;
    4511  m_pFront = pNewItem;
    4512  m_pBack = pNewItem;
    4513  m_Count = 1;
    4514  }
    4515  else
    4516  {
    4517  pNewItem->pNext = m_pFront;
    4518  m_pFront->pPrev = pNewItem;
    4519  m_pFront = pNewItem;
    4520  ++m_Count;
    4521  }
    4522  return pNewItem;
    4523 }
    4524 
    4525 template<typename T>
    4526 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4527 {
    4528  ItemType* const pNewItem = PushBack();
    4529  pNewItem->Value = value;
    4530  return pNewItem;
    4531 }
    4532 
    4533 template<typename T>
    4534 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4535 {
    4536  ItemType* const pNewItem = PushFront();
    4537  pNewItem->Value = value;
    4538  return pNewItem;
    4539 }
    4540 
    4541 template<typename T>
    4542 void VmaRawList<T>::PopBack()
    4543 {
    4544  VMA_HEAVY_ASSERT(m_Count > 0);
    4545  ItemType* const pBackItem = m_pBack;
    4546  ItemType* const pPrevItem = pBackItem->pPrev;
    4547  if(pPrevItem != VMA_NULL)
    4548  {
    4549  pPrevItem->pNext = VMA_NULL;
    4550  }
    4551  m_pBack = pPrevItem;
    4552  m_ItemAllocator.Free(pBackItem);
    4553  --m_Count;
    4554 }
    4555 
    4556 template<typename T>
    4557 void VmaRawList<T>::PopFront()
    4558 {
    4559  VMA_HEAVY_ASSERT(m_Count > 0);
    4560  ItemType* const pFrontItem = m_pFront;
    4561  ItemType* const pNextItem = pFrontItem->pNext;
    4562  if(pNextItem != VMA_NULL)
    4563  {
    4564  pNextItem->pPrev = VMA_NULL;
    4565  }
    4566  m_pFront = pNextItem;
    4567  m_ItemAllocator.Free(pFrontItem);
    4568  --m_Count;
    4569 }
    4570 
    4571 template<typename T>
    4572 void VmaRawList<T>::Remove(ItemType* pItem)
    4573 {
    4574  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4575  VMA_HEAVY_ASSERT(m_Count > 0);
    4576 
    4577  if(pItem->pPrev != VMA_NULL)
    4578  {
    4579  pItem->pPrev->pNext = pItem->pNext;
    4580  }
    4581  else
    4582  {
    4583  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4584  m_pFront = pItem->pNext;
    4585  }
    4586 
    4587  if(pItem->pNext != VMA_NULL)
    4588  {
    4589  pItem->pNext->pPrev = pItem->pPrev;
    4590  }
    4591  else
    4592  {
    4593  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4594  m_pBack = pItem->pPrev;
    4595  }
    4596 
    4597  m_ItemAllocator.Free(pItem);
    4598  --m_Count;
    4599 }
    4600 
    4601 template<typename T>
    4602 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4603 {
    4604  if(pItem != VMA_NULL)
    4605  {
    4606  ItemType* const prevItem = pItem->pPrev;
    4607  ItemType* const newItem = m_ItemAllocator.Alloc();
    4608  newItem->pPrev = prevItem;
    4609  newItem->pNext = pItem;
    4610  pItem->pPrev = newItem;
    4611  if(prevItem != VMA_NULL)
    4612  {
    4613  prevItem->pNext = newItem;
    4614  }
    4615  else
    4616  {
    4617  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4618  m_pFront = newItem;
    4619  }
    4620  ++m_Count;
    4621  return newItem;
    4622  }
    4623  else
    4624  return PushBack();
    4625 }
    4626 
    4627 template<typename T>
    4628 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4629 {
    4630  if(pItem != VMA_NULL)
    4631  {
    4632  ItemType* const nextItem = pItem->pNext;
    4633  ItemType* const newItem = m_ItemAllocator.Alloc();
    4634  newItem->pNext = nextItem;
    4635  newItem->pPrev = pItem;
    4636  pItem->pNext = newItem;
    4637  if(nextItem != VMA_NULL)
    4638  {
    4639  nextItem->pPrev = newItem;
    4640  }
    4641  else
    4642  {
    4643  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4644  m_pBack = newItem;
    4645  }
    4646  ++m_Count;
    4647  return newItem;
    4648  }
    4649  else
    4650  return PushFront();
    4651 }
    4652 
    4653 template<typename T>
    4654 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4655 {
    4656  ItemType* const newItem = InsertBefore(pItem);
    4657  newItem->Value = value;
    4658  return newItem;
    4659 }
    4660 
    4661 template<typename T>
    4662 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4663 {
    4664  ItemType* const newItem = InsertAfter(pItem);
    4665  newItem->Value = value;
    4666  return newItem;
    4667 }
    4668 
    4669 template<typename T, typename AllocatorT>
    4670 class VmaList
    4671 {
    4672  VMA_CLASS_NO_COPY(VmaList)
    4673 public:
    4674  class iterator
    4675  {
    4676  public:
    4677  iterator() :
    4678  m_pList(VMA_NULL),
    4679  m_pItem(VMA_NULL)
    4680  {
    4681  }
    4682 
    4683  T& operator*() const
    4684  {
    4685  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4686  return m_pItem->Value;
    4687  }
    4688  T* operator->() const
    4689  {
    4690  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4691  return &m_pItem->Value;
    4692  }
    4693 
    4694  iterator& operator++()
    4695  {
    4696  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4697  m_pItem = m_pItem->pNext;
    4698  return *this;
    4699  }
    4700  iterator& operator--()
    4701  {
    4702  if(m_pItem != VMA_NULL)
    4703  {
    4704  m_pItem = m_pItem->pPrev;
    4705  }
    4706  else
    4707  {
    4708  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4709  m_pItem = m_pList->Back();
    4710  }
    4711  return *this;
    4712  }
    4713 
    4714  iterator operator++(int)
    4715  {
    4716  iterator result = *this;
    4717  ++*this;
    4718  return result;
    4719  }
    4720  iterator operator--(int)
    4721  {
    4722  iterator result = *this;
    4723  --*this;
    4724  return result;
    4725  }
    4726 
    4727  bool operator==(const iterator& rhs) const
    4728  {
    4729  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4730  return m_pItem == rhs.m_pItem;
    4731  }
    4732  bool operator!=(const iterator& rhs) const
    4733  {
    4734  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4735  return m_pItem != rhs.m_pItem;
    4736  }
    4737 
    4738  private:
    4739  VmaRawList<T>* m_pList;
    4740  VmaListItem<T>* m_pItem;
    4741 
    4742  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4743  m_pList(pList),
    4744  m_pItem(pItem)
    4745  {
    4746  }
    4747 
    4748  friend class VmaList<T, AllocatorT>;
    4749  };
    4750 
    4751  class const_iterator
    4752  {
    4753  public:
    4754  const_iterator() :
    4755  m_pList(VMA_NULL),
    4756  m_pItem(VMA_NULL)
    4757  {
    4758  }
    4759 
    4760  const_iterator(const iterator& src) :
    4761  m_pList(src.m_pList),
    4762  m_pItem(src.m_pItem)
    4763  {
    4764  }
    4765 
    4766  const T& operator*() const
    4767  {
    4768  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4769  return m_pItem->Value;
    4770  }
    4771  const T* operator->() const
    4772  {
    4773  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4774  return &m_pItem->Value;
    4775  }
    4776 
    4777  const_iterator& operator++()
    4778  {
    4779  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4780  m_pItem = m_pItem->pNext;
    4781  return *this;
    4782  }
    4783  const_iterator& operator--()
    4784  {
    4785  if(m_pItem != VMA_NULL)
    4786  {
    4787  m_pItem = m_pItem->pPrev;
    4788  }
    4789  else
    4790  {
    4791  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4792  m_pItem = m_pList->Back();
    4793  }
    4794  return *this;
    4795  }
    4796 
    4797  const_iterator operator++(int)
    4798  {
    4799  const_iterator result = *this;
    4800  ++*this;
    4801  return result;
    4802  }
    4803  const_iterator operator--(int)
    4804  {
    4805  const_iterator result = *this;
    4806  --*this;
    4807  return result;
    4808  }
    4809 
    4810  bool operator==(const const_iterator& rhs) const
    4811  {
    4812  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4813  return m_pItem == rhs.m_pItem;
    4814  }
    4815  bool operator!=(const const_iterator& rhs) const
    4816  {
    4817  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4818  return m_pItem != rhs.m_pItem;
    4819  }
    4820 
    4821  private:
    4822  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4823  m_pList(pList),
    4824  m_pItem(pItem)
    4825  {
    4826  }
    4827 
    4828  const VmaRawList<T>* m_pList;
    4829  const VmaListItem<T>* m_pItem;
    4830 
    4831  friend class VmaList<T, AllocatorT>;
    4832  };
    4833 
    4834  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4835 
    4836  bool empty() const { return m_RawList.IsEmpty(); }
    4837  size_t size() const { return m_RawList.GetCount(); }
    4838 
    4839  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4840  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4841 
    4842  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4843  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4844 
    4845  void clear() { m_RawList.Clear(); }
    4846  void push_back(const T& value) { m_RawList.PushBack(value); }
    4847  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4848  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4849 
    4850 private:
    4851  VmaRawList<T> m_RawList;
    4852 };
    4853 
    4854 #endif // #if VMA_USE_STL_LIST
    4855 
    4857 // class VmaMap
    4858 
    4859 // Unused in this version.
    4860 #if 0
    4861 
    4862 #if VMA_USE_STL_UNORDERED_MAP
    4863 
    4864 #define VmaPair std::pair
    4865 
    4866 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4867  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4868 
    4869 #else // #if VMA_USE_STL_UNORDERED_MAP
    4870 
    4871 template<typename T1, typename T2>
    4872 struct VmaPair
    4873 {
    4874  T1 first;
    4875  T2 second;
    4876 
    4877  VmaPair() : first(), second() { }
    4878  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4879 };
    4880 
    4881 /* Class compatible with subset of interface of std::unordered_map.
    4882 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4883 */
    4884 template<typename KeyT, typename ValueT>
    4885 class VmaMap
    4886 {
    4887 public:
    4888  typedef VmaPair<KeyT, ValueT> PairType;
    4889  typedef PairType* iterator;
    4890 
    4891  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4892 
    4893  iterator begin() { return m_Vector.begin(); }
    4894  iterator end() { return m_Vector.end(); }
    4895 
    4896  void insert(const PairType& pair);
    4897  iterator find(const KeyT& key);
    4898  void erase(iterator it);
    4899 
    4900 private:
    4901  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4902 };
    4903 
    4904 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4905 
    4906 template<typename FirstT, typename SecondT>
    4907 struct VmaPairFirstLess
    4908 {
    4909  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4910  {
    4911  return lhs.first < rhs.first;
    4912  }
    4913  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4914  {
    4915  return lhs.first < rhsFirst;
    4916  }
    4917 };
    4918 
    4919 template<typename KeyT, typename ValueT>
    4920 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4921 {
    4922  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4923  m_Vector.data(),
    4924  m_Vector.data() + m_Vector.size(),
    4925  pair,
    4926  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4927  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4928 }
    4929 
    4930 template<typename KeyT, typename ValueT>
    4931 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4932 {
    4933  PairType* it = VmaBinaryFindFirstNotLess(
    4934  m_Vector.data(),
    4935  m_Vector.data() + m_Vector.size(),
    4936  key,
    4937  VmaPairFirstLess<KeyT, ValueT>());
    4938  if((it != m_Vector.end()) && (it->first == key))
    4939  {
    4940  return it;
    4941  }
    4942  else
    4943  {
    4944  return m_Vector.end();
    4945  }
    4946 }
    4947 
    4948 template<typename KeyT, typename ValueT>
    4949 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4950 {
    4951  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4952 }
    4953 
    4954 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4955 
    4956 #endif // #if 0
    4957 
    4959 
    4960 class VmaDeviceMemoryBlock;
    4961 
    4962 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4963 
    4964 struct VmaAllocation_T
    4965 {
    4966 private:
    4967  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4968 
    4969  enum FLAGS
    4970  {
    4971  FLAG_USER_DATA_STRING = 0x01,
    4972  };
    4973 
    4974 public:
    4975  enum ALLOCATION_TYPE
    4976  {
    4977  ALLOCATION_TYPE_NONE,
    4978  ALLOCATION_TYPE_BLOCK,
    4979  ALLOCATION_TYPE_DEDICATED,
    4980  };
    4981 
    4982  /*
    4983  This struct cannot have constructor or destructor. It must be POD because it is
    4984  allocated using VmaPoolAllocator.
    4985  */
    4986 
    4987  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    4988  {
    4989  m_Alignment = 1;
    4990  m_Size = 0;
    4991  m_pUserData = VMA_NULL;
    4992  m_LastUseFrameIndex = currentFrameIndex;
    4993  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    4994  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    4995  m_MapCount = 0;
    4996  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    4997 
    4998 #if VMA_STATS_STRING_ENABLED
    4999  m_CreationFrameIndex = currentFrameIndex;
    5000  m_BufferImageUsage = 0;
    5001 #endif
    5002  }
    5003 
    5004  void Dtor()
    5005  {
    5006  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    5007 
    5008  // Check if owned string was freed.
    5009  VMA_ASSERT(m_pUserData == VMA_NULL);
    5010  }
    5011 
    5012  void InitBlockAllocation(
    5013  VmaDeviceMemoryBlock* block,
    5014  VkDeviceSize offset,
    5015  VkDeviceSize alignment,
    5016  VkDeviceSize size,
    5017  VmaSuballocationType suballocationType,
    5018  bool mapped,
    5019  bool canBecomeLost)
    5020  {
    5021  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5022  VMA_ASSERT(block != VMA_NULL);
    5023  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5024  m_Alignment = alignment;
    5025  m_Size = size;
    5026  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5027  m_SuballocationType = (uint8_t)suballocationType;
    5028  m_BlockAllocation.m_Block = block;
    5029  m_BlockAllocation.m_Offset = offset;
    5030  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    5031  }
    5032 
    5033  void InitLost()
    5034  {
    5035  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5036  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5037  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5038  m_BlockAllocation.m_Block = VMA_NULL;
    5039  m_BlockAllocation.m_Offset = 0;
    5040  m_BlockAllocation.m_CanBecomeLost = true;
    5041  }
    5042 
    5043  void ChangeBlockAllocation(
    5044  VmaAllocator hAllocator,
    5045  VmaDeviceMemoryBlock* block,
    5046  VkDeviceSize offset);
    5047 
    5048  void ChangeSize(VkDeviceSize newSize);
    5049  void ChangeOffset(VkDeviceSize newOffset);
    5050 
    5051  // pMappedData not null means allocation is created with MAPPED flag.
    5052  void InitDedicatedAllocation(
    5053  uint32_t memoryTypeIndex,
    5054  VkDeviceMemory hMemory,
    5055  VmaSuballocationType suballocationType,
    5056  void* pMappedData,
    5057  VkDeviceSize size)
    5058  {
    5059  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5060  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5061  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5062  m_Alignment = 0;
    5063  m_Size = size;
    5064  m_SuballocationType = (uint8_t)suballocationType;
    5065  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5066  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5067  m_DedicatedAllocation.m_hMemory = hMemory;
    5068  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5069  }
    5070 
    5071  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5072  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5073  VkDeviceSize GetSize() const { return m_Size; }
    5074  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5075  void* GetUserData() const { return m_pUserData; }
    5076  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5077  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5078 
    5079  VmaDeviceMemoryBlock* GetBlock() const
    5080  {
    5081  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5082  return m_BlockAllocation.m_Block;
    5083  }
    5084  VkDeviceSize GetOffset() const;
    5085  VkDeviceMemory GetMemory() const;
    5086  uint32_t GetMemoryTypeIndex() const;
    5087  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5088  void* GetMappedData() const;
    5089  bool CanBecomeLost() const;
    5090 
    5091  uint32_t GetLastUseFrameIndex() const
    5092  {
    5093  return m_LastUseFrameIndex.load();
    5094  }
    5095  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5096  {
    5097  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5098  }
    5099  /*
    5100  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5101  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5102  - Else, returns false.
    5103 
    5104  If hAllocation is already lost, assert - you should not call it then.
    5105  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5106  */
    5107  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5108 
    5109  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5110  {
    5111  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5112  outInfo.blockCount = 1;
    5113  outInfo.allocationCount = 1;
    5114  outInfo.unusedRangeCount = 0;
    5115  outInfo.usedBytes = m_Size;
    5116  outInfo.unusedBytes = 0;
    5117  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5118  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5119  outInfo.unusedRangeSizeMax = 0;
    5120  }
    5121 
    5122  void BlockAllocMap();
    5123  void BlockAllocUnmap();
    5124  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5125  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5126 
    5127 #if VMA_STATS_STRING_ENABLED
    5128  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5129  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5130 
    5131  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5132  {
    5133  VMA_ASSERT(m_BufferImageUsage == 0);
    5134  m_BufferImageUsage = bufferImageUsage;
    5135  }
    5136 
    5137  void PrintParameters(class VmaJsonWriter& json) const;
    5138 #endif
    5139 
    5140 private:
    5141  VkDeviceSize m_Alignment;
    5142  VkDeviceSize m_Size;
    5143  void* m_pUserData;
    5144  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5145  uint8_t m_Type; // ALLOCATION_TYPE
    5146  uint8_t m_SuballocationType; // VmaSuballocationType
    5147  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5148  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5149  uint8_t m_MapCount;
    5150  uint8_t m_Flags; // enum FLAGS
    5151 
    5152  // Allocation out of VmaDeviceMemoryBlock.
    5153  struct BlockAllocation
    5154  {
    5155  VmaDeviceMemoryBlock* m_Block;
    5156  VkDeviceSize m_Offset;
    5157  bool m_CanBecomeLost;
    5158  };
    5159 
    5160  // Allocation for an object that has its own private VkDeviceMemory.
    5161  struct DedicatedAllocation
    5162  {
    5163  uint32_t m_MemoryTypeIndex;
    5164  VkDeviceMemory m_hMemory;
    5165  void* m_pMappedData; // Not null means memory is mapped.
    5166  };
    5167 
    5168  union
    5169  {
    5170  // Allocation out of VmaDeviceMemoryBlock.
    5171  BlockAllocation m_BlockAllocation;
    5172  // Allocation for an object that has its own private VkDeviceMemory.
    5173  DedicatedAllocation m_DedicatedAllocation;
    5174  };
    5175 
    5176 #if VMA_STATS_STRING_ENABLED
    5177  uint32_t m_CreationFrameIndex;
    5178  uint32_t m_BufferImageUsage; // 0 if unknown.
    5179 #endif
    5180 
    5181  void FreeUserDataString(VmaAllocator hAllocator);
    5182 };
    5183 
    5184 /*
    5185 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5186 allocated memory block or free.
    5187 */
    5188 struct VmaSuballocation
    5189 {
    5190  VkDeviceSize offset;
    5191  VkDeviceSize size;
    5192  VmaAllocation hAllocation;
    5193  VmaSuballocationType type;
    5194 };
    5195 
    5196 // Comparator for offsets.
    5197 struct VmaSuballocationOffsetLess
    5198 {
    5199  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5200  {
    5201  return lhs.offset < rhs.offset;
    5202  }
    5203 };
    5204 struct VmaSuballocationOffsetGreater
    5205 {
    5206  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5207  {
    5208  return lhs.offset > rhs.offset;
    5209  }
    5210 };
    5211 
    5212 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5213 
    5214 // Cost of one additional allocation lost, as equivalent in bytes.
    5215 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5216 
    5217 enum class VmaAllocationRequestType
    5218 {
    5219  Normal,
    5220  // Used by "Linear" algorithm.
    5221  UpperAddress,
    5222  EndOf1st,
    5223  EndOf2nd,
    5224 };
    5225 
    5226 /*
    5227 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5228 
    5229 If canMakeOtherLost was false:
    5230 - item points to a FREE suballocation.
    5231 - itemsToMakeLostCount is 0.
    5232 
    5233 If canMakeOtherLost was true:
    5234 - item points to first of sequence of suballocations, which are either FREE,
    5235  or point to VmaAllocations that can become lost.
    5236 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5237  the requested allocation to succeed.
    5238 */
    5239 struct VmaAllocationRequest
    5240 {
    5241  VkDeviceSize offset;
    5242  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5243  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5244  VmaSuballocationList::iterator item;
    5245  size_t itemsToMakeLostCount;
    5246  void* customData;
    5247  VmaAllocationRequestType type;
    5248 
    5249  VkDeviceSize CalcCost() const
    5250  {
    5251  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5252  }
    5253 };
    5254 
    5255 /*
    5256 Data structure used for bookkeeping of allocations and unused ranges of memory
    5257 in a single VkDeviceMemory block.
    5258 */
    5259 class VmaBlockMetadata
    5260 {
    5261 public:
    5262  VmaBlockMetadata(VmaAllocator hAllocator);
    5263  virtual ~VmaBlockMetadata() { }
    5264  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5265 
    5266  // Validates all data structures inside this object. If not valid, returns false.
    5267  virtual bool Validate() const = 0;
    5268  VkDeviceSize GetSize() const { return m_Size; }
    5269  virtual size_t GetAllocationCount() const = 0;
    5270  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5271  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5272  // Returns true if this block is empty - contains only single free suballocation.
    5273  virtual bool IsEmpty() const = 0;
    5274 
    5275  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5276  // Shouldn't modify blockCount.
    5277  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5278 
    5279 #if VMA_STATS_STRING_ENABLED
    5280  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5281 #endif
    5282 
    5283  // Tries to find a place for suballocation with given parameters inside this block.
    5284  // If succeeded, fills pAllocationRequest and returns true.
    5285  // If failed, returns false.
    5286  virtual bool CreateAllocationRequest(
    5287  uint32_t currentFrameIndex,
    5288  uint32_t frameInUseCount,
    5289  VkDeviceSize bufferImageGranularity,
    5290  VkDeviceSize allocSize,
    5291  VkDeviceSize allocAlignment,
    5292  bool upperAddress,
    5293  VmaSuballocationType allocType,
    5294  bool canMakeOtherLost,
    5295  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5296  uint32_t strategy,
    5297  VmaAllocationRequest* pAllocationRequest) = 0;
    5298 
    5299  virtual bool MakeRequestedAllocationsLost(
    5300  uint32_t currentFrameIndex,
    5301  uint32_t frameInUseCount,
    5302  VmaAllocationRequest* pAllocationRequest) = 0;
    5303 
    5304  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5305 
    5306  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5307 
    5308  // Makes actual allocation based on request. Request must already be checked and valid.
    5309  virtual void Alloc(
    5310  const VmaAllocationRequest& request,
    5311  VmaSuballocationType type,
    5312  VkDeviceSize allocSize,
    5313  VmaAllocation hAllocation) = 0;
    5314 
    5315  // Frees suballocation assigned to given memory region.
    5316  virtual void Free(const VmaAllocation allocation) = 0;
    5317  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5318 
    5319  // Tries to resize (grow or shrink) space for given allocation, in place.
    5320  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5321 
    5322 protected:
    5323  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5324 
    5325 #if VMA_STATS_STRING_ENABLED
    5326  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5327  VkDeviceSize unusedBytes,
    5328  size_t allocationCount,
    5329  size_t unusedRangeCount) const;
    5330  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5331  VkDeviceSize offset,
    5332  VmaAllocation hAllocation) const;
    5333  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5334  VkDeviceSize offset,
    5335  VkDeviceSize size) const;
    5336  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5337 #endif
    5338 
    5339 private:
    5340  VkDeviceSize m_Size;
    5341  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5342 };
    5343 
    5344 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5345  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5346  return false; \
    5347  } } while(false)
    5348 
    5349 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5350 {
    5351  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5352 public:
    5353  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5354  virtual ~VmaBlockMetadata_Generic();
    5355  virtual void Init(VkDeviceSize size);
    5356 
    5357  virtual bool Validate() const;
    5358  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5359  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5360  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5361  virtual bool IsEmpty() const;
    5362 
    5363  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5364  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5365 
    5366 #if VMA_STATS_STRING_ENABLED
    5367  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5368 #endif
    5369 
    5370  virtual bool CreateAllocationRequest(
    5371  uint32_t currentFrameIndex,
    5372  uint32_t frameInUseCount,
    5373  VkDeviceSize bufferImageGranularity,
    5374  VkDeviceSize allocSize,
    5375  VkDeviceSize allocAlignment,
    5376  bool upperAddress,
    5377  VmaSuballocationType allocType,
    5378  bool canMakeOtherLost,
    5379  uint32_t strategy,
    5380  VmaAllocationRequest* pAllocationRequest);
    5381 
    5382  virtual bool MakeRequestedAllocationsLost(
    5383  uint32_t currentFrameIndex,
    5384  uint32_t frameInUseCount,
    5385  VmaAllocationRequest* pAllocationRequest);
    5386 
    5387  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5388 
    5389  virtual VkResult CheckCorruption(const void* pBlockData);
    5390 
    5391  virtual void Alloc(
    5392  const VmaAllocationRequest& request,
    5393  VmaSuballocationType type,
    5394  VkDeviceSize allocSize,
    5395  VmaAllocation hAllocation);
    5396 
    5397  virtual void Free(const VmaAllocation allocation);
    5398  virtual void FreeAtOffset(VkDeviceSize offset);
    5399 
    5400  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5401 
    5403  // For defragmentation
    5404 
    5405  bool IsBufferImageGranularityConflictPossible(
    5406  VkDeviceSize bufferImageGranularity,
    5407  VmaSuballocationType& inOutPrevSuballocType) const;
    5408 
    5409 private:
    5410  friend class VmaDefragmentationAlgorithm_Generic;
    5411  friend class VmaDefragmentationAlgorithm_Fast;
    5412 
    5413  uint32_t m_FreeCount;
    5414  VkDeviceSize m_SumFreeSize;
    5415  VmaSuballocationList m_Suballocations;
    5416  // Suballocations that are free and have size greater than certain threshold.
    5417  // Sorted by size, ascending.
    5418  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5419 
    5420  bool ValidateFreeSuballocationList() const;
    5421 
    5422  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5423  // If yes, fills pOffset and returns true. If no, returns false.
    5424  bool CheckAllocation(
    5425  uint32_t currentFrameIndex,
    5426  uint32_t frameInUseCount,
    5427  VkDeviceSize bufferImageGranularity,
    5428  VkDeviceSize allocSize,
    5429  VkDeviceSize allocAlignment,
    5430  VmaSuballocationType allocType,
    5431  VmaSuballocationList::const_iterator suballocItem,
    5432  bool canMakeOtherLost,
    5433  VkDeviceSize* pOffset,
    5434  size_t* itemsToMakeLostCount,
    5435  VkDeviceSize* pSumFreeSize,
    5436  VkDeviceSize* pSumItemSize) const;
    5437  // Given free suballocation, it merges it with following one, which must also be free.
    5438  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5439  // Releases given suballocation, making it free.
    5440  // Merges it with adjacent free suballocations if applicable.
    5441  // Returns iterator to new free suballocation at this place.
    5442  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5443  // Given free suballocation, it inserts it into sorted list of
    5444  // m_FreeSuballocationsBySize if it's suitable.
    5445  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5446  // Given free suballocation, it removes it from sorted list of
    5447  // m_FreeSuballocationsBySize if it's suitable.
    5448  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5449 };
    5450 
    5451 /*
    5452 Allocations and their references in internal data structure look like this:
    5453 
    5454 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5455 
    5456  0 +-------+
    5457  | |
    5458  | |
    5459  | |
    5460  +-------+
    5461  | Alloc | 1st[m_1stNullItemsBeginCount]
    5462  +-------+
    5463  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5464  +-------+
    5465  | ... |
    5466  +-------+
    5467  | Alloc | 1st[1st.size() - 1]
    5468  +-------+
    5469  | |
    5470  | |
    5471  | |
    5472 GetSize() +-------+
    5473 
    5474 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5475 
    5476  0 +-------+
    5477  | Alloc | 2nd[0]
    5478  +-------+
    5479  | Alloc | 2nd[1]
    5480  +-------+
    5481  | ... |
    5482  +-------+
    5483  | Alloc | 2nd[2nd.size() - 1]
    5484  +-------+
    5485  | |
    5486  | |
    5487  | |
    5488  +-------+
    5489  | Alloc | 1st[m_1stNullItemsBeginCount]
    5490  +-------+
    5491  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5492  +-------+
    5493  | ... |
    5494  +-------+
    5495  | Alloc | 1st[1st.size() - 1]
    5496  +-------+
    5497  | |
    5498 GetSize() +-------+
    5499 
    5500 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5501 
    5502  0 +-------+
    5503  | |
    5504  | |
    5505  | |
    5506  +-------+
    5507  | Alloc | 1st[m_1stNullItemsBeginCount]
    5508  +-------+
    5509  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5510  +-------+
    5511  | ... |
    5512  +-------+
    5513  | Alloc | 1st[1st.size() - 1]
    5514  +-------+
    5515  | |
    5516  | |
    5517  | |
    5518  +-------+
    5519  | Alloc | 2nd[2nd.size() - 1]
    5520  +-------+
    5521  | ... |
    5522  +-------+
    5523  | Alloc | 2nd[1]
    5524  +-------+
    5525  | Alloc | 2nd[0]
    5526 GetSize() +-------+
    5527 
    5528 */
    5529 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5530 {
    5531  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5532 public:
    5533  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5534  virtual ~VmaBlockMetadata_Linear();
    5535  virtual void Init(VkDeviceSize size);
    5536 
    5537  virtual bool Validate() const;
    5538  virtual size_t GetAllocationCount() const;
    5539  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5540  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5541  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5542 
    5543  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5544  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5545 
    5546 #if VMA_STATS_STRING_ENABLED
    5547  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5548 #endif
    5549 
    5550  virtual bool CreateAllocationRequest(
    5551  uint32_t currentFrameIndex,
    5552  uint32_t frameInUseCount,
    5553  VkDeviceSize bufferImageGranularity,
    5554  VkDeviceSize allocSize,
    5555  VkDeviceSize allocAlignment,
    5556  bool upperAddress,
    5557  VmaSuballocationType allocType,
    5558  bool canMakeOtherLost,
    5559  uint32_t strategy,
    5560  VmaAllocationRequest* pAllocationRequest);
    5561 
    5562  virtual bool MakeRequestedAllocationsLost(
    5563  uint32_t currentFrameIndex,
    5564  uint32_t frameInUseCount,
    5565  VmaAllocationRequest* pAllocationRequest);
    5566 
    5567  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5568 
    5569  virtual VkResult CheckCorruption(const void* pBlockData);
    5570 
    5571  virtual void Alloc(
    5572  const VmaAllocationRequest& request,
    5573  VmaSuballocationType type,
    5574  VkDeviceSize allocSize,
    5575  VmaAllocation hAllocation);
    5576 
    5577  virtual void Free(const VmaAllocation allocation);
    5578  virtual void FreeAtOffset(VkDeviceSize offset);
    5579 
    5580 private:
    5581  /*
    5582  There are two suballocation vectors, used in ping-pong way.
    5583  The one with index m_1stVectorIndex is called 1st.
    5584  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5585  2nd can be non-empty only when 1st is not empty.
    5586  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5587  */
    5588  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5589 
    5590  enum SECOND_VECTOR_MODE
    5591  {
    5592  SECOND_VECTOR_EMPTY,
    5593  /*
    5594  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5595  all have smaller offset.
    5596  */
    5597  SECOND_VECTOR_RING_BUFFER,
    5598  /*
    5599  Suballocations in 2nd vector are upper side of double stack.
    5600  They all have offsets higher than those in 1st vector.
    5601  Top of this stack means smaller offsets, but higher indices in this vector.
    5602  */
    5603  SECOND_VECTOR_DOUBLE_STACK,
    5604  };
    5605 
    5606  VkDeviceSize m_SumFreeSize;
    5607  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5608  uint32_t m_1stVectorIndex;
    5609  SECOND_VECTOR_MODE m_2ndVectorMode;
    5610 
    5611  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5612  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5613  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5614  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5615 
    5616  // Number of items in 1st vector with hAllocation = null at the beginning.
    5617  size_t m_1stNullItemsBeginCount;
    5618  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5619  size_t m_1stNullItemsMiddleCount;
    5620  // Number of items in 2nd vector with hAllocation = null.
    5621  size_t m_2ndNullItemsCount;
    5622 
    5623  bool ShouldCompact1st() const;
    5624  void CleanupAfterFree();
    5625 
    5626  bool CreateAllocationRequest_LowerAddress(
    5627  uint32_t currentFrameIndex,
    5628  uint32_t frameInUseCount,
    5629  VkDeviceSize bufferImageGranularity,
    5630  VkDeviceSize allocSize,
    5631  VkDeviceSize allocAlignment,
    5632  VmaSuballocationType allocType,
    5633  bool canMakeOtherLost,
    5634  uint32_t strategy,
    5635  VmaAllocationRequest* pAllocationRequest);
    5636  bool CreateAllocationRequest_UpperAddress(
    5637  uint32_t currentFrameIndex,
    5638  uint32_t frameInUseCount,
    5639  VkDeviceSize bufferImageGranularity,
    5640  VkDeviceSize allocSize,
    5641  VkDeviceSize allocAlignment,
    5642  VmaSuballocationType allocType,
    5643  bool canMakeOtherLost,
    5644  uint32_t strategy,
    5645  VmaAllocationRequest* pAllocationRequest);
    5646 };
    5647 
    5648 /*
    5649 - GetSize() is the original size of allocated memory block.
    5650 - m_UsableSize is this size aligned down to a power of two.
    5651  All allocations and calculations happen relative to m_UsableSize.
    5652 - GetUnusableSize() is the difference between them.
    5653  It is repoted as separate, unused range, not available for allocations.
    5654 
    5655 Node at level 0 has size = m_UsableSize.
    5656 Each next level contains nodes with size 2 times smaller than current level.
    5657 m_LevelCount is the maximum number of levels to use in the current object.
    5658 */
    5659 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5660 {
    5661  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5662 public:
    5663  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5664  virtual ~VmaBlockMetadata_Buddy();
    5665  virtual void Init(VkDeviceSize size);
    5666 
    5667  virtual bool Validate() const;
    5668  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5669  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5670  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5671  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5672 
    5673  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5674  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5675 
    5676 #if VMA_STATS_STRING_ENABLED
    5677  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5678 #endif
    5679 
    5680  virtual bool CreateAllocationRequest(
    5681  uint32_t currentFrameIndex,
    5682  uint32_t frameInUseCount,
    5683  VkDeviceSize bufferImageGranularity,
    5684  VkDeviceSize allocSize,
    5685  VkDeviceSize allocAlignment,
    5686  bool upperAddress,
    5687  VmaSuballocationType allocType,
    5688  bool canMakeOtherLost,
    5689  uint32_t strategy,
    5690  VmaAllocationRequest* pAllocationRequest);
    5691 
    5692  virtual bool MakeRequestedAllocationsLost(
    5693  uint32_t currentFrameIndex,
    5694  uint32_t frameInUseCount,
    5695  VmaAllocationRequest* pAllocationRequest);
    5696 
    5697  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5698 
    5699  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5700 
    5701  virtual void Alloc(
    5702  const VmaAllocationRequest& request,
    5703  VmaSuballocationType type,
    5704  VkDeviceSize allocSize,
    5705  VmaAllocation hAllocation);
    5706 
    5707  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5708  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5709 
    5710 private:
    5711  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5712  static const size_t MAX_LEVELS = 30;
    5713 
    5714  struct ValidationContext
    5715  {
    5716  size_t calculatedAllocationCount;
    5717  size_t calculatedFreeCount;
    5718  VkDeviceSize calculatedSumFreeSize;
    5719 
    5720  ValidationContext() :
    5721  calculatedAllocationCount(0),
    5722  calculatedFreeCount(0),
    5723  calculatedSumFreeSize(0) { }
    5724  };
    5725 
    5726  struct Node
    5727  {
    5728  VkDeviceSize offset;
    5729  enum TYPE
    5730  {
    5731  TYPE_FREE,
    5732  TYPE_ALLOCATION,
    5733  TYPE_SPLIT,
    5734  TYPE_COUNT
    5735  } type;
    5736  Node* parent;
    5737  Node* buddy;
    5738 
    5739  union
    5740  {
    5741  struct
    5742  {
    5743  Node* prev;
    5744  Node* next;
    5745  } free;
    5746  struct
    5747  {
    5748  VmaAllocation alloc;
    5749  } allocation;
    5750  struct
    5751  {
    5752  Node* leftChild;
    5753  } split;
    5754  };
    5755  };
    5756 
    5757  // Size of the memory block aligned down to a power of two.
    5758  VkDeviceSize m_UsableSize;
    5759  uint32_t m_LevelCount;
    5760 
    5761  Node* m_Root;
    5762  struct {
    5763  Node* front;
    5764  Node* back;
    5765  } m_FreeList[MAX_LEVELS];
    5766  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5767  size_t m_AllocationCount;
    5768  // Number of nodes in the tree with type == TYPE_FREE.
    5769  size_t m_FreeCount;
    5770  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5771  VkDeviceSize m_SumFreeSize;
    5772 
    5773  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5774  void DeleteNode(Node* node);
    5775  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5776  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5777  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5778  // Alloc passed just for validation. Can be null.
    5779  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5780  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5781  // Adds node to the front of FreeList at given level.
    5782  // node->type must be FREE.
    5783  // node->free.prev, next can be undefined.
    5784  void AddToFreeListFront(uint32_t level, Node* node);
    5785  // Removes node from FreeList at given level.
    5786  // node->type must be FREE.
    5787  // node->free.prev, next stay untouched.
    5788  void RemoveFromFreeList(uint32_t level, Node* node);
    5789 
    5790 #if VMA_STATS_STRING_ENABLED
    5791  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5792 #endif
    5793 };
    5794 
    5795 /*
    5796 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5797 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5798 
    5799 Thread-safety: This class must be externally synchronized.
    5800 */
    5801 class VmaDeviceMemoryBlock
    5802 {
    5803  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5804 public:
    5805  VmaBlockMetadata* m_pMetadata;
    5806 
    5807  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5808 
    5809  ~VmaDeviceMemoryBlock()
    5810  {
    5811  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5812  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5813  }
    5814 
    5815  // Always call after construction.
    5816  void Init(
    5817  VmaAllocator hAllocator,
    5818  VmaPool hParentPool,
    5819  uint32_t newMemoryTypeIndex,
    5820  VkDeviceMemory newMemory,
    5821  VkDeviceSize newSize,
    5822  uint32_t id,
    5823  uint32_t algorithm);
    5824  // Always call before destruction.
    5825  void Destroy(VmaAllocator allocator);
    5826 
    5827  VmaPool GetParentPool() const { return m_hParentPool; }
    5828  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5829  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5830  uint32_t GetId() const { return m_Id; }
    5831  void* GetMappedData() const { return m_pMappedData; }
    5832 
    5833  // Validates all data structures inside this object. If not valid, returns false.
    5834  bool Validate() const;
    5835 
    5836  VkResult CheckCorruption(VmaAllocator hAllocator);
    5837 
    5838  // ppData can be null.
    5839  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5840  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5841 
    5842  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5843  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5844 
    5845  VkResult BindBufferMemory(
    5846  const VmaAllocator hAllocator,
    5847  const VmaAllocation hAllocation,
    5848  VkBuffer hBuffer);
    5849  VkResult BindImageMemory(
    5850  const VmaAllocator hAllocator,
    5851  const VmaAllocation hAllocation,
    5852  VkImage hImage);
    5853 
    5854 private:
    5855  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5856  uint32_t m_MemoryTypeIndex;
    5857  uint32_t m_Id;
    5858  VkDeviceMemory m_hMemory;
    5859 
    5860  /*
    5861  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5862  Also protects m_MapCount, m_pMappedData.
    5863  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5864  */
    5865  VMA_MUTEX m_Mutex;
    5866  uint32_t m_MapCount;
    5867  void* m_pMappedData;
    5868 };
    5869 
    5870 struct VmaPointerLess
    5871 {
    5872  bool operator()(const void* lhs, const void* rhs) const
    5873  {
    5874  return lhs < rhs;
    5875  }
    5876 };
    5877 
    5878 struct VmaDefragmentationMove
    5879 {
    5880  size_t srcBlockIndex;
    5881  size_t dstBlockIndex;
    5882  VkDeviceSize srcOffset;
    5883  VkDeviceSize dstOffset;
    5884  VkDeviceSize size;
    5885 };
    5886 
    5887 class VmaDefragmentationAlgorithm;
    5888 
    5889 /*
    5890 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5891 Vulkan memory type.
    5892 
    5893 Synchronized internally with a mutex.
    5894 */
    5895 struct VmaBlockVector
    5896 {
    5897  VMA_CLASS_NO_COPY(VmaBlockVector)
    5898 public:
    5899  VmaBlockVector(
    5900  VmaAllocator hAllocator,
    5901  VmaPool hParentPool,
    5902  uint32_t memoryTypeIndex,
    5903  VkDeviceSize preferredBlockSize,
    5904  size_t minBlockCount,
    5905  size_t maxBlockCount,
    5906  VkDeviceSize bufferImageGranularity,
    5907  uint32_t frameInUseCount,
    5908  bool isCustomPool,
    5909  bool explicitBlockSize,
    5910  uint32_t algorithm);
    5911  ~VmaBlockVector();
    5912 
    5913  VkResult CreateMinBlocks();
    5914 
    5915  VmaPool GetParentPool() const { return m_hParentPool; }
    5916  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5917  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5918  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5919  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5920  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5921 
    5922  void GetPoolStats(VmaPoolStats* pStats);
    5923 
    5924  bool IsEmpty() const { return m_Blocks.empty(); }
    5925  bool IsCorruptionDetectionEnabled() const;
    5926 
    5927  VkResult Allocate(
    5928  uint32_t currentFrameIndex,
    5929  VkDeviceSize size,
    5930  VkDeviceSize alignment,
    5931  const VmaAllocationCreateInfo& createInfo,
    5932  VmaSuballocationType suballocType,
    5933  size_t allocationCount,
    5934  VmaAllocation* pAllocations);
    5935 
    5936  void Free(
    5937  VmaAllocation hAllocation);
    5938 
    5939  // Adds statistics of this BlockVector to pStats.
    5940  void AddStats(VmaStats* pStats);
    5941 
    5942 #if VMA_STATS_STRING_ENABLED
    5943  void PrintDetailedMap(class VmaJsonWriter& json);
    5944 #endif
    5945 
    5946  void MakePoolAllocationsLost(
    5947  uint32_t currentFrameIndex,
    5948  size_t* pLostAllocationCount);
    5949  VkResult CheckCorruption();
    5950 
    5951  // Saves results in pCtx->res.
    5952  void Defragment(
    5953  class VmaBlockVectorDefragmentationContext* pCtx,
    5954  VmaDefragmentationStats* pStats,
    5955  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5956  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5957  VkCommandBuffer commandBuffer);
    5958  void DefragmentationEnd(
    5959  class VmaBlockVectorDefragmentationContext* pCtx,
    5960  VmaDefragmentationStats* pStats);
    5961 
    5963  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5964 
    5965  size_t GetBlockCount() const { return m_Blocks.size(); }
    5966  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5967  size_t CalcAllocationCount() const;
    5968  bool IsBufferImageGranularityConflictPossible() const;
    5969 
    5970 private:
    5971  friend class VmaDefragmentationAlgorithm_Generic;
    5972 
    5973  const VmaAllocator m_hAllocator;
    5974  const VmaPool m_hParentPool;
    5975  const uint32_t m_MemoryTypeIndex;
    5976  const VkDeviceSize m_PreferredBlockSize;
    5977  const size_t m_MinBlockCount;
    5978  const size_t m_MaxBlockCount;
    5979  const VkDeviceSize m_BufferImageGranularity;
    5980  const uint32_t m_FrameInUseCount;
    5981  const bool m_IsCustomPool;
    5982  const bool m_ExplicitBlockSize;
    5983  const uint32_t m_Algorithm;
    5984  /* There can be at most one allocation that is completely empty - a
    5985  hysteresis to avoid pessimistic case of alternating creation and destruction
    5986  of a VkDeviceMemory. */
    5987  bool m_HasEmptyBlock;
    5988  VMA_RW_MUTEX m_Mutex;
    5989  // Incrementally sorted by sumFreeSize, ascending.
    5990  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5991  uint32_t m_NextBlockId;
    5992 
    5993  VkDeviceSize CalcMaxBlockSize() const;
    5994 
    5995  // Finds and removes given block from vector.
    5996  void Remove(VmaDeviceMemoryBlock* pBlock);
    5997 
    5998  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5999  // after this call.
    6000  void IncrementallySortBlocks();
    6001 
    6002  VkResult AllocatePage(
    6003  uint32_t currentFrameIndex,
    6004  VkDeviceSize size,
    6005  VkDeviceSize alignment,
    6006  const VmaAllocationCreateInfo& createInfo,
    6007  VmaSuballocationType suballocType,
    6008  VmaAllocation* pAllocation);
    6009 
    6010  // To be used only without CAN_MAKE_OTHER_LOST flag.
    6011  VkResult AllocateFromBlock(
    6012  VmaDeviceMemoryBlock* pBlock,
    6013  uint32_t currentFrameIndex,
    6014  VkDeviceSize size,
    6015  VkDeviceSize alignment,
    6016  VmaAllocationCreateFlags allocFlags,
    6017  void* pUserData,
    6018  VmaSuballocationType suballocType,
    6019  uint32_t strategy,
    6020  VmaAllocation* pAllocation);
    6021 
    6022  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    6023 
    6024  // Saves result to pCtx->res.
    6025  void ApplyDefragmentationMovesCpu(
    6026  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6027  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    6028  // Saves result to pCtx->res.
    6029  void ApplyDefragmentationMovesGpu(
    6030  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6031  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6032  VkCommandBuffer commandBuffer);
    6033 
    6034  /*
    6035  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6036  - updated with new data.
    6037  */
    6038  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6039 };
    6040 
    6041 struct VmaPool_T
    6042 {
    6043  VMA_CLASS_NO_COPY(VmaPool_T)
    6044 public:
    6045  VmaBlockVector m_BlockVector;
    6046 
    6047  VmaPool_T(
    6048  VmaAllocator hAllocator,
    6049  const VmaPoolCreateInfo& createInfo,
    6050  VkDeviceSize preferredBlockSize);
    6051  ~VmaPool_T();
    6052 
    6053  uint32_t GetId() const { return m_Id; }
    6054  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6055 
    6056 #if VMA_STATS_STRING_ENABLED
    6057  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6058 #endif
    6059 
    6060 private:
    6061  uint32_t m_Id;
    6062 };
    6063 
    6064 /*
    6065 Performs defragmentation:
    6066 
    6067 - Updates `pBlockVector->m_pMetadata`.
    6068 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6069 - Does not move actual data, only returns requested moves as `moves`.
    6070 */
    6071 class VmaDefragmentationAlgorithm
    6072 {
    6073  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6074 public:
    6075  VmaDefragmentationAlgorithm(
    6076  VmaAllocator hAllocator,
    6077  VmaBlockVector* pBlockVector,
    6078  uint32_t currentFrameIndex) :
    6079  m_hAllocator(hAllocator),
    6080  m_pBlockVector(pBlockVector),
    6081  m_CurrentFrameIndex(currentFrameIndex)
    6082  {
    6083  }
    6084  virtual ~VmaDefragmentationAlgorithm()
    6085  {
    6086  }
    6087 
    6088  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6089  virtual void AddAll() = 0;
    6090 
    6091  virtual VkResult Defragment(
    6092  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6093  VkDeviceSize maxBytesToMove,
    6094  uint32_t maxAllocationsToMove) = 0;
    6095 
    6096  virtual VkDeviceSize GetBytesMoved() const = 0;
    6097  virtual uint32_t GetAllocationsMoved() const = 0;
    6098 
    6099 protected:
    6100  VmaAllocator const m_hAllocator;
    6101  VmaBlockVector* const m_pBlockVector;
    6102  const uint32_t m_CurrentFrameIndex;
    6103 
    6104  struct AllocationInfo
    6105  {
    6106  VmaAllocation m_hAllocation;
    6107  VkBool32* m_pChanged;
    6108 
    6109  AllocationInfo() :
    6110  m_hAllocation(VK_NULL_HANDLE),
    6111  m_pChanged(VMA_NULL)
    6112  {
    6113  }
    6114  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6115  m_hAllocation(hAlloc),
    6116  m_pChanged(pChanged)
    6117  {
    6118  }
    6119  };
    6120 };
    6121 
    6122 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6123 {
    6124  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6125 public:
    6126  VmaDefragmentationAlgorithm_Generic(
    6127  VmaAllocator hAllocator,
    6128  VmaBlockVector* pBlockVector,
    6129  uint32_t currentFrameIndex,
    6130  bool overlappingMoveSupported);
    6131  virtual ~VmaDefragmentationAlgorithm_Generic();
    6132 
    6133  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6134  virtual void AddAll() { m_AllAllocations = true; }
    6135 
    6136  virtual VkResult Defragment(
    6137  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6138  VkDeviceSize maxBytesToMove,
    6139  uint32_t maxAllocationsToMove);
    6140 
    6141  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6142  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6143 
    6144 private:
    6145  uint32_t m_AllocationCount;
    6146  bool m_AllAllocations;
    6147 
    6148  VkDeviceSize m_BytesMoved;
    6149  uint32_t m_AllocationsMoved;
    6150 
    6151  struct AllocationInfoSizeGreater
    6152  {
    6153  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6154  {
    6155  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6156  }
    6157  };
    6158 
    6159  struct AllocationInfoOffsetGreater
    6160  {
    6161  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6162  {
    6163  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6164  }
    6165  };
    6166 
    6167  struct BlockInfo
    6168  {
    6169  size_t m_OriginalBlockIndex;
    6170  VmaDeviceMemoryBlock* m_pBlock;
    6171  bool m_HasNonMovableAllocations;
    6172  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6173 
    6174  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6175  m_OriginalBlockIndex(SIZE_MAX),
    6176  m_pBlock(VMA_NULL),
    6177  m_HasNonMovableAllocations(true),
    6178  m_Allocations(pAllocationCallbacks)
    6179  {
    6180  }
    6181 
    6182  void CalcHasNonMovableAllocations()
    6183  {
    6184  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6185  const size_t defragmentAllocCount = m_Allocations.size();
    6186  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6187  }
    6188 
    6189  void SortAllocationsBySizeDescending()
    6190  {
    6191  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6192  }
    6193 
    6194  void SortAllocationsByOffsetDescending()
    6195  {
    6196  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6197  }
    6198  };
    6199 
    6200  struct BlockPointerLess
    6201  {
    6202  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6203  {
    6204  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6205  }
    6206  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6207  {
    6208  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6209  }
    6210  };
    6211 
    6212  // 1. Blocks with some non-movable allocations go first.
    6213  // 2. Blocks with smaller sumFreeSize go first.
    6214  struct BlockInfoCompareMoveDestination
    6215  {
    6216  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6217  {
    6218  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6219  {
    6220  return true;
    6221  }
    6222  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6223  {
    6224  return false;
    6225  }
    6226  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6227  {
    6228  return true;
    6229  }
    6230  return false;
    6231  }
    6232  };
    6233 
    6234  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6235  BlockInfoVector m_Blocks;
    6236 
    6237  VkResult DefragmentRound(
    6238  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6239  VkDeviceSize maxBytesToMove,
    6240  uint32_t maxAllocationsToMove);
    6241 
    6242  size_t CalcBlocksWithNonMovableCount() const;
    6243 
    6244  static bool MoveMakesSense(
    6245  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6246  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6247 };
    6248 
    6249 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6250 {
    6251  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6252 public:
    6253  VmaDefragmentationAlgorithm_Fast(
    6254  VmaAllocator hAllocator,
    6255  VmaBlockVector* pBlockVector,
    6256  uint32_t currentFrameIndex,
    6257  bool overlappingMoveSupported);
    6258  virtual ~VmaDefragmentationAlgorithm_Fast();
    6259 
    6260  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6261  virtual void AddAll() { m_AllAllocations = true; }
    6262 
    6263  virtual VkResult Defragment(
    6264  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6265  VkDeviceSize maxBytesToMove,
    6266  uint32_t maxAllocationsToMove);
    6267 
    6268  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6269  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6270 
    6271 private:
    6272  struct BlockInfo
    6273  {
    6274  size_t origBlockIndex;
    6275  };
    6276 
    6277  class FreeSpaceDatabase
    6278  {
    6279  public:
    6280  FreeSpaceDatabase()
    6281  {
    6282  FreeSpace s = {};
    6283  s.blockInfoIndex = SIZE_MAX;
    6284  for(size_t i = 0; i < MAX_COUNT; ++i)
    6285  {
    6286  m_FreeSpaces[i] = s;
    6287  }
    6288  }
    6289 
    6290  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6291  {
    6292  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6293  {
    6294  return;
    6295  }
    6296 
    6297  // Find first invalid or the smallest structure.
    6298  size_t bestIndex = SIZE_MAX;
    6299  for(size_t i = 0; i < MAX_COUNT; ++i)
    6300  {
    6301  // Empty structure.
    6302  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6303  {
    6304  bestIndex = i;
    6305  break;
    6306  }
    6307  if(m_FreeSpaces[i].size < size &&
    6308  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6309  {
    6310  bestIndex = i;
    6311  }
    6312  }
    6313 
    6314  if(bestIndex != SIZE_MAX)
    6315  {
    6316  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6317  m_FreeSpaces[bestIndex].offset = offset;
    6318  m_FreeSpaces[bestIndex].size = size;
    6319  }
    6320  }
    6321 
    6322  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6323  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6324  {
    6325  size_t bestIndex = SIZE_MAX;
    6326  VkDeviceSize bestFreeSpaceAfter = 0;
    6327  for(size_t i = 0; i < MAX_COUNT; ++i)
    6328  {
    6329  // Structure is valid.
    6330  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6331  {
    6332  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6333  // Allocation fits into this structure.
    6334  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6335  {
    6336  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6337  (dstOffset + size);
    6338  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6339  {
    6340  bestIndex = i;
    6341  bestFreeSpaceAfter = freeSpaceAfter;
    6342  }
    6343  }
    6344  }
    6345  }
    6346 
    6347  if(bestIndex != SIZE_MAX)
    6348  {
    6349  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6350  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6351 
    6352  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6353  {
    6354  // Leave this structure for remaining empty space.
    6355  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6356  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6357  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6358  }
    6359  else
    6360  {
    6361  // This structure becomes invalid.
    6362  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6363  }
    6364 
    6365  return true;
    6366  }
    6367 
    6368  return false;
    6369  }
    6370 
    6371  private:
    6372  static const size_t MAX_COUNT = 4;
    6373 
    6374  struct FreeSpace
    6375  {
    6376  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6377  VkDeviceSize offset;
    6378  VkDeviceSize size;
    6379  } m_FreeSpaces[MAX_COUNT];
    6380  };
    6381 
    6382  const bool m_OverlappingMoveSupported;
    6383 
    6384  uint32_t m_AllocationCount;
    6385  bool m_AllAllocations;
    6386 
    6387  VkDeviceSize m_BytesMoved;
    6388  uint32_t m_AllocationsMoved;
    6389 
    6390  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6391 
    6392  void PreprocessMetadata();
    6393  void PostprocessMetadata();
    6394  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6395 };
    6396 
    6397 struct VmaBlockDefragmentationContext
    6398 {
    6399  enum BLOCK_FLAG
    6400  {
    6401  BLOCK_FLAG_USED = 0x00000001,
    6402  };
    6403  uint32_t flags;
    6404  VkBuffer hBuffer;
    6405 };
    6406 
    6407 class VmaBlockVectorDefragmentationContext
    6408 {
    6409  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6410 public:
    6411  VkResult res;
    6412  bool mutexLocked;
    6413  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6414 
    6415  VmaBlockVectorDefragmentationContext(
    6416  VmaAllocator hAllocator,
    6417  VmaPool hCustomPool, // Optional.
    6418  VmaBlockVector* pBlockVector,
    6419  uint32_t currFrameIndex);
    6420  ~VmaBlockVectorDefragmentationContext();
    6421 
    6422  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6423  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6424  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6425 
    6426  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6427  void AddAll() { m_AllAllocations = true; }
    6428 
    6429  void Begin(bool overlappingMoveSupported);
    6430 
    6431 private:
    6432  const VmaAllocator m_hAllocator;
    6433  // Null if not from custom pool.
    6434  const VmaPool m_hCustomPool;
    6435  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6436  VmaBlockVector* const m_pBlockVector;
    6437  const uint32_t m_CurrFrameIndex;
    6438  // Owner of this object.
    6439  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6440 
    6441  struct AllocInfo
    6442  {
    6443  VmaAllocation hAlloc;
    6444  VkBool32* pChanged;
    6445  };
    6446  // Used between constructor and Begin.
    6447  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6448  bool m_AllAllocations;
    6449 };
    6450 
    6451 struct VmaDefragmentationContext_T
    6452 {
    6453 private:
    6454  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6455 public:
    6456  VmaDefragmentationContext_T(
    6457  VmaAllocator hAllocator,
    6458  uint32_t currFrameIndex,
    6459  uint32_t flags,
    6460  VmaDefragmentationStats* pStats);
    6461  ~VmaDefragmentationContext_T();
    6462 
    6463  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6464  void AddAllocations(
    6465  uint32_t allocationCount,
    6466  VmaAllocation* pAllocations,
    6467  VkBool32* pAllocationsChanged);
    6468 
    6469  /*
    6470  Returns:
    6471  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6472  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6473  - Negative value if error occured and object can be destroyed immediately.
    6474  */
    6475  VkResult Defragment(
    6476  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6477  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6478  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6479 
    6480 private:
    6481  const VmaAllocator m_hAllocator;
    6482  const uint32_t m_CurrFrameIndex;
    6483  const uint32_t m_Flags;
    6484  VmaDefragmentationStats* const m_pStats;
    6485  // Owner of these objects.
    6486  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6487  // Owner of these objects.
    6488  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6489 };
    6490 
    6491 #if VMA_RECORDING_ENABLED
    6492 
    6493 class VmaRecorder
    6494 {
    6495 public:
    6496  VmaRecorder();
    6497  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6498  void WriteConfiguration(
    6499  const VkPhysicalDeviceProperties& devProps,
    6500  const VkPhysicalDeviceMemoryProperties& memProps,
    6501  bool dedicatedAllocationExtensionEnabled);
    6502  ~VmaRecorder();
    6503 
    6504  void RecordCreateAllocator(uint32_t frameIndex);
    6505  void RecordDestroyAllocator(uint32_t frameIndex);
    6506  void RecordCreatePool(uint32_t frameIndex,
    6507  const VmaPoolCreateInfo& createInfo,
    6508  VmaPool pool);
    6509  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6510  void RecordAllocateMemory(uint32_t frameIndex,
    6511  const VkMemoryRequirements& vkMemReq,
    6512  const VmaAllocationCreateInfo& createInfo,
    6513  VmaAllocation allocation);
    6514  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6515  const VkMemoryRequirements& vkMemReq,
    6516  const VmaAllocationCreateInfo& createInfo,
    6517  uint64_t allocationCount,
    6518  const VmaAllocation* pAllocations);
    6519  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6520  const VkMemoryRequirements& vkMemReq,
    6521  bool requiresDedicatedAllocation,
    6522  bool prefersDedicatedAllocation,
    6523  const VmaAllocationCreateInfo& createInfo,
    6524  VmaAllocation allocation);
    6525  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6526  const VkMemoryRequirements& vkMemReq,
    6527  bool requiresDedicatedAllocation,
    6528  bool prefersDedicatedAllocation,
    6529  const VmaAllocationCreateInfo& createInfo,
    6530  VmaAllocation allocation);
    6531  void RecordFreeMemory(uint32_t frameIndex,
    6532  VmaAllocation allocation);
    6533  void RecordFreeMemoryPages(uint32_t frameIndex,
    6534  uint64_t allocationCount,
    6535  const VmaAllocation* pAllocations);
    6536  void RecordResizeAllocation(
    6537  uint32_t frameIndex,
    6538  VmaAllocation allocation,
    6539  VkDeviceSize newSize);
    6540  void RecordSetAllocationUserData(uint32_t frameIndex,
    6541  VmaAllocation allocation,
    6542  const void* pUserData);
    6543  void RecordCreateLostAllocation(uint32_t frameIndex,
    6544  VmaAllocation allocation);
    6545  void RecordMapMemory(uint32_t frameIndex,
    6546  VmaAllocation allocation);
    6547  void RecordUnmapMemory(uint32_t frameIndex,
    6548  VmaAllocation allocation);
    6549  void RecordFlushAllocation(uint32_t frameIndex,
    6550  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6551  void RecordInvalidateAllocation(uint32_t frameIndex,
    6552  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6553  void RecordCreateBuffer(uint32_t frameIndex,
    6554  const VkBufferCreateInfo& bufCreateInfo,
    6555  const VmaAllocationCreateInfo& allocCreateInfo,
    6556  VmaAllocation allocation);
    6557  void RecordCreateImage(uint32_t frameIndex,
    6558  const VkImageCreateInfo& imageCreateInfo,
    6559  const VmaAllocationCreateInfo& allocCreateInfo,
    6560  VmaAllocation allocation);
    6561  void RecordDestroyBuffer(uint32_t frameIndex,
    6562  VmaAllocation allocation);
    6563  void RecordDestroyImage(uint32_t frameIndex,
    6564  VmaAllocation allocation);
    6565  void RecordTouchAllocation(uint32_t frameIndex,
    6566  VmaAllocation allocation);
    6567  void RecordGetAllocationInfo(uint32_t frameIndex,
    6568  VmaAllocation allocation);
    6569  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6570  VmaPool pool);
    6571  void RecordDefragmentationBegin(uint32_t frameIndex,
    6572  const VmaDefragmentationInfo2& info,
    6574  void RecordDefragmentationEnd(uint32_t frameIndex,
    6576 
    6577 private:
    6578  struct CallParams
    6579  {
    6580  uint32_t threadId;
    6581  double time;
    6582  };
    6583 
    6584  class UserDataString
    6585  {
    6586  public:
    6587  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6588  const char* GetString() const { return m_Str; }
    6589 
    6590  private:
    6591  char m_PtrStr[17];
    6592  const char* m_Str;
    6593  };
    6594 
    6595  bool m_UseMutex;
    6596  VmaRecordFlags m_Flags;
    6597  FILE* m_File;
    6598  VMA_MUTEX m_FileMutex;
    6599  int64_t m_Freq;
    6600  int64_t m_StartCounter;
    6601 
    6602  void GetBasicParams(CallParams& outParams);
    6603 
    6604  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6605  template<typename T>
    6606  void PrintPointerList(uint64_t count, const T* pItems)
    6607  {
    6608  if(count)
    6609  {
    6610  fprintf(m_File, "%p", pItems[0]);
    6611  for(uint64_t i = 1; i < count; ++i)
    6612  {
    6613  fprintf(m_File, " %p", pItems[i]);
    6614  }
    6615  }
    6616  }
    6617 
    6618  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6619  void Flush();
    6620 };
    6621 
    6622 #endif // #if VMA_RECORDING_ENABLED
    6623 
    6624 /*
    6625 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6626 */
    6627 class VmaAllocationObjectAllocator
    6628 {
    6629  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6630 public:
    6631  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6632 
    6633  VmaAllocation Allocate();
    6634  void Free(VmaAllocation hAlloc);
    6635 
    6636 private:
    6637  VMA_MUTEX m_Mutex;
    6638  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6639 };
    6640 
    6641 // Main allocator object.
    6642 struct VmaAllocator_T
    6643 {
    6644  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6645 public:
    6646  bool m_UseMutex;
    6647  bool m_UseKhrDedicatedAllocation;
    6648  VkDevice m_hDevice;
    6649  bool m_AllocationCallbacksSpecified;
    6650  VkAllocationCallbacks m_AllocationCallbacks;
    6651  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6652  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6653 
    6654  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6655  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6656  VMA_MUTEX m_HeapSizeLimitMutex;
    6657 
    6658  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6659  VkPhysicalDeviceMemoryProperties m_MemProps;
    6660 
    6661  // Default pools.
    6662  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6663 
    6664  // Each vector is sorted by memory (handle value).
    6665  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6666  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6667  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6668 
    6669  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6670  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6671  ~VmaAllocator_T();
    6672 
    6673  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6674  {
    6675  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6676  }
    6677  const VmaVulkanFunctions& GetVulkanFunctions() const
    6678  {
    6679  return m_VulkanFunctions;
    6680  }
    6681 
    6682  VkDeviceSize GetBufferImageGranularity() const
    6683  {
    6684  return VMA_MAX(
    6685  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6686  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6687  }
    6688 
    6689  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6690  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6691 
    6692  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6693  {
    6694  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6695  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6696  }
    6697  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6698  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6699  {
    6700  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6701  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6702  }
    6703  // Minimum alignment for all allocations in specific memory type.
    6704  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6705  {
    6706  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6707  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6708  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6709  }
    6710 
    6711  bool IsIntegratedGpu() const
    6712  {
    6713  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6714  }
    6715 
    6716 #if VMA_RECORDING_ENABLED
    6717  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6718 #endif
    6719 
    6720  void GetBufferMemoryRequirements(
    6721  VkBuffer hBuffer,
    6722  VkMemoryRequirements& memReq,
    6723  bool& requiresDedicatedAllocation,
    6724  bool& prefersDedicatedAllocation) const;
    6725  void GetImageMemoryRequirements(
    6726  VkImage hImage,
    6727  VkMemoryRequirements& memReq,
    6728  bool& requiresDedicatedAllocation,
    6729  bool& prefersDedicatedAllocation) const;
    6730 
    6731  // Main allocation function.
    6732  VkResult AllocateMemory(
    6733  const VkMemoryRequirements& vkMemReq,
    6734  bool requiresDedicatedAllocation,
    6735  bool prefersDedicatedAllocation,
    6736  VkBuffer dedicatedBuffer,
    6737  VkImage dedicatedImage,
    6738  const VmaAllocationCreateInfo& createInfo,
    6739  VmaSuballocationType suballocType,
    6740  size_t allocationCount,
    6741  VmaAllocation* pAllocations);
    6742 
    6743  // Main deallocation function.
    6744  void FreeMemory(
    6745  size_t allocationCount,
    6746  const VmaAllocation* pAllocations);
    6747 
    6748  VkResult ResizeAllocation(
    6749  const VmaAllocation alloc,
    6750  VkDeviceSize newSize);
    6751 
    6752  void CalculateStats(VmaStats* pStats);
    6753 
    6754 #if VMA_STATS_STRING_ENABLED
    6755  void PrintDetailedMap(class VmaJsonWriter& json);
    6756 #endif
    6757 
    6758  VkResult DefragmentationBegin(
    6759  const VmaDefragmentationInfo2& info,
    6760  VmaDefragmentationStats* pStats,
    6761  VmaDefragmentationContext* pContext);
    6762  VkResult DefragmentationEnd(
    6763  VmaDefragmentationContext context);
    6764 
    6765  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6766  bool TouchAllocation(VmaAllocation hAllocation);
    6767 
    6768  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6769  void DestroyPool(VmaPool pool);
    6770  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6771 
    6772  void SetCurrentFrameIndex(uint32_t frameIndex);
    6773  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6774 
    6775  void MakePoolAllocationsLost(
    6776  VmaPool hPool,
    6777  size_t* pLostAllocationCount);
    6778  VkResult CheckPoolCorruption(VmaPool hPool);
    6779  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6780 
    6781  void CreateLostAllocation(VmaAllocation* pAllocation);
    6782 
    6783  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6784  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6785 
    6786  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6787  void Unmap(VmaAllocation hAllocation);
    6788 
    6789  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6790  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6791 
    6792  void FlushOrInvalidateAllocation(
    6793  VmaAllocation hAllocation,
    6794  VkDeviceSize offset, VkDeviceSize size,
    6795  VMA_CACHE_OPERATION op);
    6796 
    6797  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6798 
    6799  /*
    6800  Returns bit mask of memory types that can support defragmentation on GPU as
    6801  they support creation of required buffer for copy operations.
    6802  */
    6803  uint32_t GetGpuDefragmentationMemoryTypeBits();
    6804 
    6805 private:
    6806  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6807 
    6808  VkPhysicalDevice m_PhysicalDevice;
    6809  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6810  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
    6811 
    6812  VMA_RW_MUTEX m_PoolsMutex;
    6813  // Protected by m_PoolsMutex. Sorted by pointer value.
    6814  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6815  uint32_t m_NextPoolId;
    6816 
    6817  VmaVulkanFunctions m_VulkanFunctions;
    6818 
    6819 #if VMA_RECORDING_ENABLED
    6820  VmaRecorder* m_pRecorder;
    6821 #endif
    6822 
    6823  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6824 
    6825  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6826 
    6827  VkResult AllocateMemoryOfType(
    6828  VkDeviceSize size,
    6829  VkDeviceSize alignment,
    6830  bool dedicatedAllocation,
    6831  VkBuffer dedicatedBuffer,
    6832  VkImage dedicatedImage,
    6833  const VmaAllocationCreateInfo& createInfo,
    6834  uint32_t memTypeIndex,
    6835  VmaSuballocationType suballocType,
    6836  size_t allocationCount,
    6837  VmaAllocation* pAllocations);
    6838 
    6839  // Helper function only to be used inside AllocateDedicatedMemory.
    6840  VkResult AllocateDedicatedMemoryPage(
    6841  VkDeviceSize size,
    6842  VmaSuballocationType suballocType,
    6843  uint32_t memTypeIndex,
    6844  const VkMemoryAllocateInfo& allocInfo,
    6845  bool map,
    6846  bool isUserDataString,
    6847  void* pUserData,
    6848  VmaAllocation* pAllocation);
    6849 
    6850  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6851  VkResult AllocateDedicatedMemory(
    6852  VkDeviceSize size,
    6853  VmaSuballocationType suballocType,
    6854  uint32_t memTypeIndex,
    6855  bool map,
    6856  bool isUserDataString,
    6857  void* pUserData,
    6858  VkBuffer dedicatedBuffer,
    6859  VkImage dedicatedImage,
    6860  size_t allocationCount,
    6861  VmaAllocation* pAllocations);
    6862 
    6863  void FreeDedicatedMemory(VmaAllocation allocation);
    6864 
    6865  /*
    6866  Calculates and returns bit mask of memory types that can support defragmentation
    6867  on GPU as they support creation of required buffer for copy operations.
    6868  */
    6869  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
    6870 };
    6871 
    6873 // Memory allocation #2 after VmaAllocator_T definition
    6874 
    6875 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6876 {
    6877  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6878 }
    6879 
    6880 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6881 {
    6882  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6883 }
    6884 
    6885 template<typename T>
    6886 static T* VmaAllocate(VmaAllocator hAllocator)
    6887 {
    6888  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6889 }
    6890 
    6891 template<typename T>
    6892 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6893 {
    6894  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6895 }
    6896 
    6897 template<typename T>
    6898 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6899 {
    6900  if(ptr != VMA_NULL)
    6901  {
    6902  ptr->~T();
    6903  VmaFree(hAllocator, ptr);
    6904  }
    6905 }
    6906 
    6907 template<typename T>
    6908 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6909 {
    6910  if(ptr != VMA_NULL)
    6911  {
    6912  for(size_t i = count; i--; )
    6913  ptr[i].~T();
    6914  VmaFree(hAllocator, ptr);
    6915  }
    6916 }
    6917 
    6919 // VmaStringBuilder
    6920 
    6921 #if VMA_STATS_STRING_ENABLED
    6922 
    6923 class VmaStringBuilder
    6924 {
    6925 public:
    6926  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6927  size_t GetLength() const { return m_Data.size(); }
    6928  const char* GetData() const { return m_Data.data(); }
    6929 
    6930  void Add(char ch) { m_Data.push_back(ch); }
    6931  void Add(const char* pStr);
    6932  void AddNewLine() { Add('\n'); }
    6933  void AddNumber(uint32_t num);
    6934  void AddNumber(uint64_t num);
    6935  void AddPointer(const void* ptr);
    6936 
    6937 private:
    6938  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6939 };
    6940 
    6941 void VmaStringBuilder::Add(const char* pStr)
    6942 {
    6943  const size_t strLen = strlen(pStr);
    6944  if(strLen > 0)
    6945  {
    6946  const size_t oldCount = m_Data.size();
    6947  m_Data.resize(oldCount + strLen);
    6948  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6949  }
    6950 }
    6951 
    6952 void VmaStringBuilder::AddNumber(uint32_t num)
    6953 {
    6954  char buf[11];
    6955  VmaUint32ToStr(buf, sizeof(buf), num);
    6956  Add(buf);
    6957 }
    6958 
    6959 void VmaStringBuilder::AddNumber(uint64_t num)
    6960 {
    6961  char buf[21];
    6962  VmaUint64ToStr(buf, sizeof(buf), num);
    6963  Add(buf);
    6964 }
    6965 
    6966 void VmaStringBuilder::AddPointer(const void* ptr)
    6967 {
    6968  char buf[21];
    6969  VmaPtrToStr(buf, sizeof(buf), ptr);
    6970  Add(buf);
    6971 }
    6972 
    6973 #endif // #if VMA_STATS_STRING_ENABLED
    6974 
    6976 // VmaJsonWriter
    6977 
    6978 #if VMA_STATS_STRING_ENABLED
    6979 
    6980 class VmaJsonWriter
    6981 {
    6982  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6983 public:
    6984  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6985  ~VmaJsonWriter();
    6986 
    6987  void BeginObject(bool singleLine = false);
    6988  void EndObject();
    6989 
    6990  void BeginArray(bool singleLine = false);
    6991  void EndArray();
    6992 
    6993  void WriteString(const char* pStr);
    6994  void BeginString(const char* pStr = VMA_NULL);
    6995  void ContinueString(const char* pStr);
    6996  void ContinueString(uint32_t n);
    6997  void ContinueString(uint64_t n);
    6998  void ContinueString_Pointer(const void* ptr);
    6999  void EndString(const char* pStr = VMA_NULL);
    7000 
    7001  void WriteNumber(uint32_t n);
    7002  void WriteNumber(uint64_t n);
    7003  void WriteBool(bool b);
    7004  void WriteNull();
    7005 
    7006 private:
    7007  static const char* const INDENT;
    7008 
    7009  enum COLLECTION_TYPE
    7010  {
    7011  COLLECTION_TYPE_OBJECT,
    7012  COLLECTION_TYPE_ARRAY,
    7013  };
    7014  struct StackItem
    7015  {
    7016  COLLECTION_TYPE type;
    7017  uint32_t valueCount;
    7018  bool singleLineMode;
    7019  };
    7020 
    7021  VmaStringBuilder& m_SB;
    7022  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    7023  bool m_InsideString;
    7024 
    7025  void BeginValue(bool isString);
    7026  void WriteIndent(bool oneLess = false);
    7027 };
    7028 
    7029 const char* const VmaJsonWriter::INDENT = " ";
    7030 
    7031 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    7032  m_SB(sb),
    7033  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    7034  m_InsideString(false)
    7035 {
    7036 }
    7037 
    7038 VmaJsonWriter::~VmaJsonWriter()
    7039 {
    7040  VMA_ASSERT(!m_InsideString);
    7041  VMA_ASSERT(m_Stack.empty());
    7042 }
    7043 
    7044 void VmaJsonWriter::BeginObject(bool singleLine)
    7045 {
    7046  VMA_ASSERT(!m_InsideString);
    7047 
    7048  BeginValue(false);
    7049  m_SB.Add('{');
    7050 
    7051  StackItem item;
    7052  item.type = COLLECTION_TYPE_OBJECT;
    7053  item.valueCount = 0;
    7054  item.singleLineMode = singleLine;
    7055  m_Stack.push_back(item);
    7056 }
    7057 
    7058 void VmaJsonWriter::EndObject()
    7059 {
    7060  VMA_ASSERT(!m_InsideString);
    7061 
    7062  WriteIndent(true);
    7063  m_SB.Add('}');
    7064 
    7065  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7066  m_Stack.pop_back();
    7067 }
    7068 
    7069 void VmaJsonWriter::BeginArray(bool singleLine)
    7070 {
    7071  VMA_ASSERT(!m_InsideString);
    7072 
    7073  BeginValue(false);
    7074  m_SB.Add('[');
    7075 
    7076  StackItem item;
    7077  item.type = COLLECTION_TYPE_ARRAY;
    7078  item.valueCount = 0;
    7079  item.singleLineMode = singleLine;
    7080  m_Stack.push_back(item);
    7081 }
    7082 
    7083 void VmaJsonWriter::EndArray()
    7084 {
    7085  VMA_ASSERT(!m_InsideString);
    7086 
    7087  WriteIndent(true);
    7088  m_SB.Add(']');
    7089 
    7090  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7091  m_Stack.pop_back();
    7092 }
    7093 
    7094 void VmaJsonWriter::WriteString(const char* pStr)
    7095 {
    7096  BeginString(pStr);
    7097  EndString();
    7098 }
    7099 
    7100 void VmaJsonWriter::BeginString(const char* pStr)
    7101 {
    7102  VMA_ASSERT(!m_InsideString);
    7103 
    7104  BeginValue(true);
    7105  m_SB.Add('"');
    7106  m_InsideString = true;
    7107  if(pStr != VMA_NULL && pStr[0] != '\0')
    7108  {
    7109  ContinueString(pStr);
    7110  }
    7111 }
    7112 
    7113 void VmaJsonWriter::ContinueString(const char* pStr)
    7114 {
    7115  VMA_ASSERT(m_InsideString);
    7116 
    7117  const size_t strLen = strlen(pStr);
    7118  for(size_t i = 0; i < strLen; ++i)
    7119  {
    7120  char ch = pStr[i];
    7121  if(ch == '\\')
    7122  {
    7123  m_SB.Add("\\\\");
    7124  }
    7125  else if(ch == '"')
    7126  {
    7127  m_SB.Add("\\\"");
    7128  }
    7129  else if(ch >= 32)
    7130  {
    7131  m_SB.Add(ch);
    7132  }
    7133  else switch(ch)
    7134  {
    7135  case '\b':
    7136  m_SB.Add("\\b");
    7137  break;
    7138  case '\f':
    7139  m_SB.Add("\\f");
    7140  break;
    7141  case '\n':
    7142  m_SB.Add("\\n");
    7143  break;
    7144  case '\r':
    7145  m_SB.Add("\\r");
    7146  break;
    7147  case '\t':
    7148  m_SB.Add("\\t");
    7149  break;
    7150  default:
    7151  VMA_ASSERT(0 && "Character not currently supported.");
    7152  break;
    7153  }
    7154  }
    7155 }
    7156 
    7157 void VmaJsonWriter::ContinueString(uint32_t n)
    7158 {
    7159  VMA_ASSERT(m_InsideString);
    7160  m_SB.AddNumber(n);
    7161 }
    7162 
    7163 void VmaJsonWriter::ContinueString(uint64_t n)
    7164 {
    7165  VMA_ASSERT(m_InsideString);
    7166  m_SB.AddNumber(n);
    7167 }
    7168 
    7169 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7170 {
    7171  VMA_ASSERT(m_InsideString);
    7172  m_SB.AddPointer(ptr);
    7173 }
    7174 
    7175 void VmaJsonWriter::EndString(const char* pStr)
    7176 {
    7177  VMA_ASSERT(m_InsideString);
    7178  if(pStr != VMA_NULL && pStr[0] != '\0')
    7179  {
    7180  ContinueString(pStr);
    7181  }
    7182  m_SB.Add('"');
    7183  m_InsideString = false;
    7184 }
    7185 
    7186 void VmaJsonWriter::WriteNumber(uint32_t n)
    7187 {
    7188  VMA_ASSERT(!m_InsideString);
    7189  BeginValue(false);
    7190  m_SB.AddNumber(n);
    7191 }
    7192 
    7193 void VmaJsonWriter::WriteNumber(uint64_t n)
    7194 {
    7195  VMA_ASSERT(!m_InsideString);
    7196  BeginValue(false);
    7197  m_SB.AddNumber(n);
    7198 }
    7199 
    7200 void VmaJsonWriter::WriteBool(bool b)
    7201 {
    7202  VMA_ASSERT(!m_InsideString);
    7203  BeginValue(false);
    7204  m_SB.Add(b ? "true" : "false");
    7205 }
    7206 
    7207 void VmaJsonWriter::WriteNull()
    7208 {
    7209  VMA_ASSERT(!m_InsideString);
    7210  BeginValue(false);
    7211  m_SB.Add("null");
    7212 }
    7213 
    7214 void VmaJsonWriter::BeginValue(bool isString)
    7215 {
    7216  if(!m_Stack.empty())
    7217  {
    7218  StackItem& currItem = m_Stack.back();
    7219  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7220  currItem.valueCount % 2 == 0)
    7221  {
    7222  VMA_ASSERT(isString);
    7223  }
    7224 
    7225  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7226  currItem.valueCount % 2 != 0)
    7227  {
    7228  m_SB.Add(": ");
    7229  }
    7230  else if(currItem.valueCount > 0)
    7231  {
    7232  m_SB.Add(", ");
    7233  WriteIndent();
    7234  }
    7235  else
    7236  {
    7237  WriteIndent();
    7238  }
    7239  ++currItem.valueCount;
    7240  }
    7241 }
    7242 
    7243 void VmaJsonWriter::WriteIndent(bool oneLess)
    7244 {
    7245  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7246  {
    7247  m_SB.AddNewLine();
    7248 
    7249  size_t count = m_Stack.size();
    7250  if(count > 0 && oneLess)
    7251  {
    7252  --count;
    7253  }
    7254  for(size_t i = 0; i < count; ++i)
    7255  {
    7256  m_SB.Add(INDENT);
    7257  }
    7258  }
    7259 }
    7260 
    7261 #endif // #if VMA_STATS_STRING_ENABLED
    7262 
    7264 
    7265 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7266 {
    7267  if(IsUserDataString())
    7268  {
    7269  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7270 
    7271  FreeUserDataString(hAllocator);
    7272 
    7273  if(pUserData != VMA_NULL)
    7274  {
    7275  const char* const newStrSrc = (char*)pUserData;
    7276  const size_t newStrLen = strlen(newStrSrc);
    7277  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7278  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7279  m_pUserData = newStrDst;
    7280  }
    7281  }
    7282  else
    7283  {
    7284  m_pUserData = pUserData;
    7285  }
    7286 }
    7287 
    7288 void VmaAllocation_T::ChangeBlockAllocation(
    7289  VmaAllocator hAllocator,
    7290  VmaDeviceMemoryBlock* block,
    7291  VkDeviceSize offset)
    7292 {
    7293  VMA_ASSERT(block != VMA_NULL);
    7294  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7295 
    7296  // Move mapping reference counter from old block to new block.
    7297  if(block != m_BlockAllocation.m_Block)
    7298  {
    7299  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7300  if(IsPersistentMap())
    7301  ++mapRefCount;
    7302  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7303  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7304  }
    7305 
    7306  m_BlockAllocation.m_Block = block;
    7307  m_BlockAllocation.m_Offset = offset;
    7308 }
    7309 
    7310 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7311 {
    7312  VMA_ASSERT(newSize > 0);
    7313  m_Size = newSize;
    7314 }
    7315 
    7316 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7317 {
    7318  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7319  m_BlockAllocation.m_Offset = newOffset;
    7320 }
    7321 
    7322 VkDeviceSize VmaAllocation_T::GetOffset() const
    7323 {
    7324  switch(m_Type)
    7325  {
    7326  case ALLOCATION_TYPE_BLOCK:
    7327  return m_BlockAllocation.m_Offset;
    7328  case ALLOCATION_TYPE_DEDICATED:
    7329  return 0;
    7330  default:
    7331  VMA_ASSERT(0);
    7332  return 0;
    7333  }
    7334 }
    7335 
    7336 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7337 {
    7338  switch(m_Type)
    7339  {
    7340  case ALLOCATION_TYPE_BLOCK:
    7341  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7342  case ALLOCATION_TYPE_DEDICATED:
    7343  return m_DedicatedAllocation.m_hMemory;
    7344  default:
    7345  VMA_ASSERT(0);
    7346  return VK_NULL_HANDLE;
    7347  }
    7348 }
    7349 
    7350 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7351 {
    7352  switch(m_Type)
    7353  {
    7354  case ALLOCATION_TYPE_BLOCK:
    7355  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7356  case ALLOCATION_TYPE_DEDICATED:
    7357  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7358  default:
    7359  VMA_ASSERT(0);
    7360  return UINT32_MAX;
    7361  }
    7362 }
    7363 
    7364 void* VmaAllocation_T::GetMappedData() const
    7365 {
    7366  switch(m_Type)
    7367  {
    7368  case ALLOCATION_TYPE_BLOCK:
    7369  if(m_MapCount != 0)
    7370  {
    7371  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7372  VMA_ASSERT(pBlockData != VMA_NULL);
    7373  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7374  }
    7375  else
    7376  {
    7377  return VMA_NULL;
    7378  }
    7379  break;
    7380  case ALLOCATION_TYPE_DEDICATED:
    7381  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7382  return m_DedicatedAllocation.m_pMappedData;
    7383  default:
    7384  VMA_ASSERT(0);
    7385  return VMA_NULL;
    7386  }
    7387 }
    7388 
    7389 bool VmaAllocation_T::CanBecomeLost() const
    7390 {
    7391  switch(m_Type)
    7392  {
    7393  case ALLOCATION_TYPE_BLOCK:
    7394  return m_BlockAllocation.m_CanBecomeLost;
    7395  case ALLOCATION_TYPE_DEDICATED:
    7396  return false;
    7397  default:
    7398  VMA_ASSERT(0);
    7399  return false;
    7400  }
    7401 }
    7402 
    7403 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7404 {
    7405  VMA_ASSERT(CanBecomeLost());
    7406 
    7407  /*
    7408  Warning: This is a carefully designed algorithm.
    7409  Do not modify unless you really know what you're doing :)
    7410  */
    7411  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7412  for(;;)
    7413  {
    7414  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7415  {
    7416  VMA_ASSERT(0);
    7417  return false;
    7418  }
    7419  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7420  {
    7421  return false;
    7422  }
    7423  else // Last use time earlier than current time.
    7424  {
    7425  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7426  {
    7427  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7428  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7429  return true;
    7430  }
    7431  }
    7432  }
    7433 }
    7434 
    7435 #if VMA_STATS_STRING_ENABLED
    7436 
    7437 // Correspond to values of enum VmaSuballocationType.
    7438 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7439  "FREE",
    7440  "UNKNOWN",
    7441  "BUFFER",
    7442  "IMAGE_UNKNOWN",
    7443  "IMAGE_LINEAR",
    7444  "IMAGE_OPTIMAL",
    7445 };
    7446 
    7447 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7448 {
    7449  json.WriteString("Type");
    7450  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7451 
    7452  json.WriteString("Size");
    7453  json.WriteNumber(m_Size);
    7454 
    7455  if(m_pUserData != VMA_NULL)
    7456  {
    7457  json.WriteString("UserData");
    7458  if(IsUserDataString())
    7459  {
    7460  json.WriteString((const char*)m_pUserData);
    7461  }
    7462  else
    7463  {
    7464  json.BeginString();
    7465  json.ContinueString_Pointer(m_pUserData);
    7466  json.EndString();
    7467  }
    7468  }
    7469 
    7470  json.WriteString("CreationFrameIndex");
    7471  json.WriteNumber(m_CreationFrameIndex);
    7472 
    7473  json.WriteString("LastUseFrameIndex");
    7474  json.WriteNumber(GetLastUseFrameIndex());
    7475 
    7476  if(m_BufferImageUsage != 0)
    7477  {
    7478  json.WriteString("Usage");
    7479  json.WriteNumber(m_BufferImageUsage);
    7480  }
    7481 }
    7482 
    7483 #endif
    7484 
    7485 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7486 {
    7487  VMA_ASSERT(IsUserDataString());
    7488  if(m_pUserData != VMA_NULL)
    7489  {
    7490  char* const oldStr = (char*)m_pUserData;
    7491  const size_t oldStrLen = strlen(oldStr);
    7492  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7493  m_pUserData = VMA_NULL;
    7494  }
    7495 }
    7496 
    7497 void VmaAllocation_T::BlockAllocMap()
    7498 {
    7499  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7500 
    7501  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7502  {
    7503  ++m_MapCount;
    7504  }
    7505  else
    7506  {
    7507  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7508  }
    7509 }
    7510 
    7511 void VmaAllocation_T::BlockAllocUnmap()
    7512 {
    7513  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7514 
    7515  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7516  {
    7517  --m_MapCount;
    7518  }
    7519  else
    7520  {
    7521  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7522  }
    7523 }
    7524 
    7525 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7526 {
    7527  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7528 
    7529  if(m_MapCount != 0)
    7530  {
    7531  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7532  {
    7533  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7534  *ppData = m_DedicatedAllocation.m_pMappedData;
    7535  ++m_MapCount;
    7536  return VK_SUCCESS;
    7537  }
    7538  else
    7539  {
    7540  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7541  return VK_ERROR_MEMORY_MAP_FAILED;
    7542  }
    7543  }
    7544  else
    7545  {
    7546  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7547  hAllocator->m_hDevice,
    7548  m_DedicatedAllocation.m_hMemory,
    7549  0, // offset
    7550  VK_WHOLE_SIZE,
    7551  0, // flags
    7552  ppData);
    7553  if(result == VK_SUCCESS)
    7554  {
    7555  m_DedicatedAllocation.m_pMappedData = *ppData;
    7556  m_MapCount = 1;
    7557  }
    7558  return result;
    7559  }
    7560 }
    7561 
    7562 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7563 {
    7564  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7565 
    7566  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7567  {
    7568  --m_MapCount;
    7569  if(m_MapCount == 0)
    7570  {
    7571  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7572  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7573  hAllocator->m_hDevice,
    7574  m_DedicatedAllocation.m_hMemory);
    7575  }
    7576  }
    7577  else
    7578  {
    7579  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7580  }
    7581 }
    7582 
    7583 #if VMA_STATS_STRING_ENABLED
    7584 
    7585 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7586 {
    7587  json.BeginObject();
    7588 
    7589  json.WriteString("Blocks");
    7590  json.WriteNumber(stat.blockCount);
    7591 
    7592  json.WriteString("Allocations");
    7593  json.WriteNumber(stat.allocationCount);
    7594 
    7595  json.WriteString("UnusedRanges");
    7596  json.WriteNumber(stat.unusedRangeCount);
    7597 
    7598  json.WriteString("UsedBytes");
    7599  json.WriteNumber(stat.usedBytes);
    7600 
    7601  json.WriteString("UnusedBytes");
    7602  json.WriteNumber(stat.unusedBytes);
    7603 
    7604  if(stat.allocationCount > 1)
    7605  {
    7606  json.WriteString("AllocationSize");
    7607  json.BeginObject(true);
    7608  json.WriteString("Min");
    7609  json.WriteNumber(stat.allocationSizeMin);
    7610  json.WriteString("Avg");
    7611  json.WriteNumber(stat.allocationSizeAvg);
    7612  json.WriteString("Max");
    7613  json.WriteNumber(stat.allocationSizeMax);
    7614  json.EndObject();
    7615  }
    7616 
    7617  if(stat.unusedRangeCount > 1)
    7618  {
    7619  json.WriteString("UnusedRangeSize");
    7620  json.BeginObject(true);
    7621  json.WriteString("Min");
    7622  json.WriteNumber(stat.unusedRangeSizeMin);
    7623  json.WriteString("Avg");
    7624  json.WriteNumber(stat.unusedRangeSizeAvg);
    7625  json.WriteString("Max");
    7626  json.WriteNumber(stat.unusedRangeSizeMax);
    7627  json.EndObject();
    7628  }
    7629 
    7630  json.EndObject();
    7631 }
    7632 
    7633 #endif // #if VMA_STATS_STRING_ENABLED
    7634 
    7635 struct VmaSuballocationItemSizeLess
    7636 {
    7637  bool operator()(
    7638  const VmaSuballocationList::iterator lhs,
    7639  const VmaSuballocationList::iterator rhs) const
    7640  {
    7641  return lhs->size < rhs->size;
    7642  }
    7643  bool operator()(
    7644  const VmaSuballocationList::iterator lhs,
    7645  VkDeviceSize rhsSize) const
    7646  {
    7647  return lhs->size < rhsSize;
    7648  }
    7649 };
    7650 
    7651 
    7653 // class VmaBlockMetadata
    7654 
    7655 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7656  m_Size(0),
    7657  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7658 {
    7659 }
    7660 
    7661 #if VMA_STATS_STRING_ENABLED
    7662 
    7663 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7664  VkDeviceSize unusedBytes,
    7665  size_t allocationCount,
    7666  size_t unusedRangeCount) const
    7667 {
    7668  json.BeginObject();
    7669 
    7670  json.WriteString("TotalBytes");
    7671  json.WriteNumber(GetSize());
    7672 
    7673  json.WriteString("UnusedBytes");
    7674  json.WriteNumber(unusedBytes);
    7675 
    7676  json.WriteString("Allocations");
    7677  json.WriteNumber((uint64_t)allocationCount);
    7678 
    7679  json.WriteString("UnusedRanges");
    7680  json.WriteNumber((uint64_t)unusedRangeCount);
    7681 
    7682  json.WriteString("Suballocations");
    7683  json.BeginArray();
    7684 }
    7685 
    7686 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7687  VkDeviceSize offset,
    7688  VmaAllocation hAllocation) const
    7689 {
    7690  json.BeginObject(true);
    7691 
    7692  json.WriteString("Offset");
    7693  json.WriteNumber(offset);
    7694 
    7695  hAllocation->PrintParameters(json);
    7696 
    7697  json.EndObject();
    7698 }
    7699 
    7700 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7701  VkDeviceSize offset,
    7702  VkDeviceSize size) const
    7703 {
    7704  json.BeginObject(true);
    7705 
    7706  json.WriteString("Offset");
    7707  json.WriteNumber(offset);
    7708 
    7709  json.WriteString("Type");
    7710  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7711 
    7712  json.WriteString("Size");
    7713  json.WriteNumber(size);
    7714 
    7715  json.EndObject();
    7716 }
    7717 
    7718 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7719 {
    7720  json.EndArray();
    7721  json.EndObject();
    7722 }
    7723 
    7724 #endif // #if VMA_STATS_STRING_ENABLED
    7725 
    7727 // class VmaBlockMetadata_Generic
    7728 
    7729 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7730  VmaBlockMetadata(hAllocator),
    7731  m_FreeCount(0),
    7732  m_SumFreeSize(0),
    7733  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7734  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7735 {
    7736 }
    7737 
    7738 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7739 {
    7740 }
    7741 
    7742 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7743 {
    7744  VmaBlockMetadata::Init(size);
    7745 
    7746  m_FreeCount = 1;
    7747  m_SumFreeSize = size;
    7748 
    7749  VmaSuballocation suballoc = {};
    7750  suballoc.offset = 0;
    7751  suballoc.size = size;
    7752  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7753  suballoc.hAllocation = VK_NULL_HANDLE;
    7754 
    7755  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7756  m_Suballocations.push_back(suballoc);
    7757  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7758  --suballocItem;
    7759  m_FreeSuballocationsBySize.push_back(suballocItem);
    7760 }
    7761 
    7762 bool VmaBlockMetadata_Generic::Validate() const
    7763 {
    7764  VMA_VALIDATE(!m_Suballocations.empty());
    7765 
    7766  // Expected offset of new suballocation as calculated from previous ones.
    7767  VkDeviceSize calculatedOffset = 0;
    7768  // Expected number of free suballocations as calculated from traversing their list.
    7769  uint32_t calculatedFreeCount = 0;
    7770  // Expected sum size of free suballocations as calculated from traversing their list.
    7771  VkDeviceSize calculatedSumFreeSize = 0;
    7772  // Expected number of free suballocations that should be registered in
    7773  // m_FreeSuballocationsBySize calculated from traversing their list.
    7774  size_t freeSuballocationsToRegister = 0;
    7775  // True if previous visited suballocation was free.
    7776  bool prevFree = false;
    7777 
    7778  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7779  suballocItem != m_Suballocations.cend();
    7780  ++suballocItem)
    7781  {
    7782  const VmaSuballocation& subAlloc = *suballocItem;
    7783 
    7784  // Actual offset of this suballocation doesn't match expected one.
    7785  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7786 
    7787  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7788  // Two adjacent free suballocations are invalid. They should be merged.
    7789  VMA_VALIDATE(!prevFree || !currFree);
    7790 
    7791  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7792 
    7793  if(currFree)
    7794  {
    7795  calculatedSumFreeSize += subAlloc.size;
    7796  ++calculatedFreeCount;
    7797  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7798  {
    7799  ++freeSuballocationsToRegister;
    7800  }
    7801 
    7802  // Margin required between allocations - every free space must be at least that large.
    7803  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7804  }
    7805  else
    7806  {
    7807  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7808  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7809 
    7810  // Margin required between allocations - previous allocation must be free.
    7811  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7812  }
    7813 
    7814  calculatedOffset += subAlloc.size;
    7815  prevFree = currFree;
    7816  }
    7817 
    7818  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7819  // match expected one.
    7820  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7821 
    7822  VkDeviceSize lastSize = 0;
    7823  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7824  {
    7825  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7826 
    7827  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7828  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7829  // They must be sorted by size ascending.
    7830  VMA_VALIDATE(suballocItem->size >= lastSize);
    7831 
    7832  lastSize = suballocItem->size;
    7833  }
    7834 
    7835  // Check if totals match calculacted values.
    7836  VMA_VALIDATE(ValidateFreeSuballocationList());
    7837  VMA_VALIDATE(calculatedOffset == GetSize());
    7838  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7839  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7840 
    7841  return true;
    7842 }
    7843 
    7844 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7845 {
    7846  if(!m_FreeSuballocationsBySize.empty())
    7847  {
    7848  return m_FreeSuballocationsBySize.back()->size;
    7849  }
    7850  else
    7851  {
    7852  return 0;
    7853  }
    7854 }
    7855 
    7856 bool VmaBlockMetadata_Generic::IsEmpty() const
    7857 {
    7858  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7859 }
    7860 
    7861 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7862 {
    7863  outInfo.blockCount = 1;
    7864 
    7865  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7866  outInfo.allocationCount = rangeCount - m_FreeCount;
    7867  outInfo.unusedRangeCount = m_FreeCount;
    7868 
    7869  outInfo.unusedBytes = m_SumFreeSize;
    7870  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7871 
    7872  outInfo.allocationSizeMin = UINT64_MAX;
    7873  outInfo.allocationSizeMax = 0;
    7874  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7875  outInfo.unusedRangeSizeMax = 0;
    7876 
    7877  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7878  suballocItem != m_Suballocations.cend();
    7879  ++suballocItem)
    7880  {
    7881  const VmaSuballocation& suballoc = *suballocItem;
    7882  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7883  {
    7884  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7885  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7886  }
    7887  else
    7888  {
    7889  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7890  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7891  }
    7892  }
    7893 }
    7894 
    7895 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7896 {
    7897  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7898 
    7899  inoutStats.size += GetSize();
    7900  inoutStats.unusedSize += m_SumFreeSize;
    7901  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7902  inoutStats.unusedRangeCount += m_FreeCount;
    7903  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7904 }
    7905 
    7906 #if VMA_STATS_STRING_ENABLED
    7907 
    7908 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7909 {
    7910  PrintDetailedMap_Begin(json,
    7911  m_SumFreeSize, // unusedBytes
    7912  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7913  m_FreeCount); // unusedRangeCount
    7914 
    7915  size_t i = 0;
    7916  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7917  suballocItem != m_Suballocations.cend();
    7918  ++suballocItem, ++i)
    7919  {
    7920  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7921  {
    7922  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7923  }
    7924  else
    7925  {
    7926  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7927  }
    7928  }
    7929 
    7930  PrintDetailedMap_End(json);
    7931 }
    7932 
    7933 #endif // #if VMA_STATS_STRING_ENABLED
    7934 
    7935 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7936  uint32_t currentFrameIndex,
    7937  uint32_t frameInUseCount,
    7938  VkDeviceSize bufferImageGranularity,
    7939  VkDeviceSize allocSize,
    7940  VkDeviceSize allocAlignment,
    7941  bool upperAddress,
    7942  VmaSuballocationType allocType,
    7943  bool canMakeOtherLost,
    7944  uint32_t strategy,
    7945  VmaAllocationRequest* pAllocationRequest)
    7946 {
    7947  VMA_ASSERT(allocSize > 0);
    7948  VMA_ASSERT(!upperAddress);
    7949  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7950  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7951  VMA_HEAVY_ASSERT(Validate());
    7952 
    7953  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7954 
    7955  // There is not enough total free space in this block to fullfill the request: Early return.
    7956  if(canMakeOtherLost == false &&
    7957  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7958  {
    7959  return false;
    7960  }
    7961 
    7962  // New algorithm, efficiently searching freeSuballocationsBySize.
    7963  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7964  if(freeSuballocCount > 0)
    7965  {
    7967  {
    7968  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7969  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7970  m_FreeSuballocationsBySize.data(),
    7971  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7972  allocSize + 2 * VMA_DEBUG_MARGIN,
    7973  VmaSuballocationItemSizeLess());
    7974  size_t index = it - m_FreeSuballocationsBySize.data();
    7975  for(; index < freeSuballocCount; ++index)
    7976  {
    7977  if(CheckAllocation(
    7978  currentFrameIndex,
    7979  frameInUseCount,
    7980  bufferImageGranularity,
    7981  allocSize,
    7982  allocAlignment,
    7983  allocType,
    7984  m_FreeSuballocationsBySize[index],
    7985  false, // canMakeOtherLost
    7986  &pAllocationRequest->offset,
    7987  &pAllocationRequest->itemsToMakeLostCount,
    7988  &pAllocationRequest->sumFreeSize,
    7989  &pAllocationRequest->sumItemSize))
    7990  {
    7991  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7992  return true;
    7993  }
    7994  }
    7995  }
    7996  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7997  {
    7998  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7999  it != m_Suballocations.end();
    8000  ++it)
    8001  {
    8002  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    8003  currentFrameIndex,
    8004  frameInUseCount,
    8005  bufferImageGranularity,
    8006  allocSize,
    8007  allocAlignment,
    8008  allocType,
    8009  it,
    8010  false, // canMakeOtherLost
    8011  &pAllocationRequest->offset,
    8012  &pAllocationRequest->itemsToMakeLostCount,
    8013  &pAllocationRequest->sumFreeSize,
    8014  &pAllocationRequest->sumItemSize))
    8015  {
    8016  pAllocationRequest->item = it;
    8017  return true;
    8018  }
    8019  }
    8020  }
    8021  else // WORST_FIT, FIRST_FIT
    8022  {
    8023  // Search staring from biggest suballocations.
    8024  for(size_t index = freeSuballocCount; index--; )
    8025  {
    8026  if(CheckAllocation(
    8027  currentFrameIndex,
    8028  frameInUseCount,
    8029  bufferImageGranularity,
    8030  allocSize,
    8031  allocAlignment,
    8032  allocType,
    8033  m_FreeSuballocationsBySize[index],
    8034  false, // canMakeOtherLost
    8035  &pAllocationRequest->offset,
    8036  &pAllocationRequest->itemsToMakeLostCount,
    8037  &pAllocationRequest->sumFreeSize,
    8038  &pAllocationRequest->sumItemSize))
    8039  {
    8040  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8041  return true;
    8042  }
    8043  }
    8044  }
    8045  }
    8046 
    8047  if(canMakeOtherLost)
    8048  {
    8049  // Brute-force algorithm. TODO: Come up with something better.
    8050 
    8051  bool found = false;
    8052  VmaAllocationRequest tmpAllocRequest = {};
    8053  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8054  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8055  suballocIt != m_Suballocations.end();
    8056  ++suballocIt)
    8057  {
    8058  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8059  suballocIt->hAllocation->CanBecomeLost())
    8060  {
    8061  if(CheckAllocation(
    8062  currentFrameIndex,
    8063  frameInUseCount,
    8064  bufferImageGranularity,
    8065  allocSize,
    8066  allocAlignment,
    8067  allocType,
    8068  suballocIt,
    8069  canMakeOtherLost,
    8070  &tmpAllocRequest.offset,
    8071  &tmpAllocRequest.itemsToMakeLostCount,
    8072  &tmpAllocRequest.sumFreeSize,
    8073  &tmpAllocRequest.sumItemSize))
    8074  {
    8076  {
    8077  *pAllocationRequest = tmpAllocRequest;
    8078  pAllocationRequest->item = suballocIt;
    8079  break;
    8080  }
    8081  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8082  {
    8083  *pAllocationRequest = tmpAllocRequest;
    8084  pAllocationRequest->item = suballocIt;
    8085  found = true;
    8086  }
    8087  }
    8088  }
    8089  }
    8090 
    8091  return found;
    8092  }
    8093 
    8094  return false;
    8095 }
    8096 
    8097 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8098  uint32_t currentFrameIndex,
    8099  uint32_t frameInUseCount,
    8100  VmaAllocationRequest* pAllocationRequest)
    8101 {
    8102  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8103 
    8104  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8105  {
    8106  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8107  {
    8108  ++pAllocationRequest->item;
    8109  }
    8110  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8111  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8112  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8113  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8114  {
    8115  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8116  --pAllocationRequest->itemsToMakeLostCount;
    8117  }
    8118  else
    8119  {
    8120  return false;
    8121  }
    8122  }
    8123 
    8124  VMA_HEAVY_ASSERT(Validate());
    8125  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8126  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8127 
    8128  return true;
    8129 }
    8130 
    8131 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8132 {
    8133  uint32_t lostAllocationCount = 0;
    8134  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8135  it != m_Suballocations.end();
    8136  ++it)
    8137  {
    8138  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8139  it->hAllocation->CanBecomeLost() &&
    8140  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8141  {
    8142  it = FreeSuballocation(it);
    8143  ++lostAllocationCount;
    8144  }
    8145  }
    8146  return lostAllocationCount;
    8147 }
    8148 
    8149 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8150 {
    8151  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8152  it != m_Suballocations.end();
    8153  ++it)
    8154  {
    8155  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8156  {
    8157  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8158  {
    8159  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8160  return VK_ERROR_VALIDATION_FAILED_EXT;
    8161  }
    8162  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8163  {
    8164  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8165  return VK_ERROR_VALIDATION_FAILED_EXT;
    8166  }
    8167  }
    8168  }
    8169 
    8170  return VK_SUCCESS;
    8171 }
    8172 
    8173 void VmaBlockMetadata_Generic::Alloc(
    8174  const VmaAllocationRequest& request,
    8175  VmaSuballocationType type,
    8176  VkDeviceSize allocSize,
    8177  VmaAllocation hAllocation)
    8178 {
    8179  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8180  VMA_ASSERT(request.item != m_Suballocations.end());
    8181  VmaSuballocation& suballoc = *request.item;
    8182  // Given suballocation is a free block.
    8183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8184  // Given offset is inside this suballocation.
    8185  VMA_ASSERT(request.offset >= suballoc.offset);
    8186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8189 
    8190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8191  // it to become used.
    8192  UnregisterFreeSuballocation(request.item);
    8193 
    8194  suballoc.offset = request.offset;
    8195  suballoc.size = allocSize;
    8196  suballoc.type = type;
    8197  suballoc.hAllocation = hAllocation;
    8198 
    8199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8200  if(paddingEnd)
    8201  {
    8202  VmaSuballocation paddingSuballoc = {};
    8203  paddingSuballoc.offset = request.offset + allocSize;
    8204  paddingSuballoc.size = paddingEnd;
    8205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8206  VmaSuballocationList::iterator next = request.item;
    8207  ++next;
    8208  const VmaSuballocationList::iterator paddingEndItem =
    8209  m_Suballocations.insert(next, paddingSuballoc);
    8210  RegisterFreeSuballocation(paddingEndItem);
    8211  }
    8212 
    8213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8214  if(paddingBegin)
    8215  {
    8216  VmaSuballocation paddingSuballoc = {};
    8217  paddingSuballoc.offset = request.offset - paddingBegin;
    8218  paddingSuballoc.size = paddingBegin;
    8219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8220  const VmaSuballocationList::iterator paddingBeginItem =
    8221  m_Suballocations.insert(request.item, paddingSuballoc);
    8222  RegisterFreeSuballocation(paddingBeginItem);
    8223  }
    8224 
    8225  // Update totals.
    8226  m_FreeCount = m_FreeCount - 1;
    8227  if(paddingBegin > 0)
    8228  {
    8229  ++m_FreeCount;
    8230  }
    8231  if(paddingEnd > 0)
    8232  {
    8233  ++m_FreeCount;
    8234  }
    8235  m_SumFreeSize -= allocSize;
    8236 }
    8237 
    8238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8239 {
    8240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8241  suballocItem != m_Suballocations.end();
    8242  ++suballocItem)
    8243  {
    8244  VmaSuballocation& suballoc = *suballocItem;
    8245  if(suballoc.hAllocation == allocation)
    8246  {
    8247  FreeSuballocation(suballocItem);
    8248  VMA_HEAVY_ASSERT(Validate());
    8249  return;
    8250  }
    8251  }
    8252  VMA_ASSERT(0 && "Not found!");
    8253 }
    8254 
    8255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8256 {
    8257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8258  suballocItem != m_Suballocations.end();
    8259  ++suballocItem)
    8260  {
    8261  VmaSuballocation& suballoc = *suballocItem;
    8262  if(suballoc.offset == offset)
    8263  {
    8264  FreeSuballocation(suballocItem);
    8265  return;
    8266  }
    8267  }
    8268  VMA_ASSERT(0 && "Not found!");
    8269 }
    8270 
    8271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8272 {
    8273  typedef VmaSuballocationList::iterator iter_type;
    8274  for(iter_type suballocItem = m_Suballocations.begin();
    8275  suballocItem != m_Suballocations.end();
    8276  ++suballocItem)
    8277  {
    8278  VmaSuballocation& suballoc = *suballocItem;
    8279  if(suballoc.hAllocation == alloc)
    8280  {
    8281  iter_type nextItem = suballocItem;
    8282  ++nextItem;
    8283 
    8284  // Should have been ensured on higher level.
    8285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8286 
    8287  // Shrinking.
    8288  if(newSize < alloc->GetSize())
    8289  {
    8290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8291 
    8292  // There is next item.
    8293  if(nextItem != m_Suballocations.end())
    8294  {
    8295  // Next item is free.
    8296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8297  {
    8298  // Grow this next item backward.
    8299  UnregisterFreeSuballocation(nextItem);
    8300  nextItem->offset -= sizeDiff;
    8301  nextItem->size += sizeDiff;
    8302  RegisterFreeSuballocation(nextItem);
    8303  }
    8304  // Next item is not free.
    8305  else
    8306  {
    8307  // Create free item after current one.
    8308  VmaSuballocation newFreeSuballoc;
    8309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8310  newFreeSuballoc.offset = suballoc.offset + newSize;
    8311  newFreeSuballoc.size = sizeDiff;
    8312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8314  RegisterFreeSuballocation(newFreeSuballocIt);
    8315 
    8316  ++m_FreeCount;
    8317  }
    8318  }
    8319  // This is the last item.
    8320  else
    8321  {
    8322  // Create free item at the end.
    8323  VmaSuballocation newFreeSuballoc;
    8324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8325  newFreeSuballoc.offset = suballoc.offset + newSize;
    8326  newFreeSuballoc.size = sizeDiff;
    8327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8328  m_Suballocations.push_back(newFreeSuballoc);
    8329 
    8330  iter_type newFreeSuballocIt = m_Suballocations.end();
    8331  RegisterFreeSuballocation(--newFreeSuballocIt);
    8332 
    8333  ++m_FreeCount;
    8334  }
    8335 
    8336  suballoc.size = newSize;
    8337  m_SumFreeSize += sizeDiff;
    8338  }
    8339  // Growing.
    8340  else
    8341  {
    8342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8343 
    8344  // There is next item.
    8345  if(nextItem != m_Suballocations.end())
    8346  {
    8347  // Next item is free.
    8348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8349  {
    8350  // There is not enough free space, including margin.
    8351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8352  {
    8353  return false;
    8354  }
    8355 
    8356  // There is more free space than required.
    8357  if(nextItem->size > sizeDiff)
    8358  {
    8359  // Move and shrink this next item.
    8360  UnregisterFreeSuballocation(nextItem);
    8361  nextItem->offset += sizeDiff;
    8362  nextItem->size -= sizeDiff;
    8363  RegisterFreeSuballocation(nextItem);
    8364  }
    8365  // There is exactly the amount of free space required.
    8366  else
    8367  {
    8368  // Remove this next free item.
    8369  UnregisterFreeSuballocation(nextItem);
    8370  m_Suballocations.erase(nextItem);
    8371  --m_FreeCount;
    8372  }
    8373  }
    8374  // Next item is not free - there is no space to grow.
    8375  else
    8376  {
    8377  return false;
    8378  }
    8379  }
    8380  // This is the last item - there is no space to grow.
    8381  else
    8382  {
    8383  return false;
    8384  }
    8385 
    8386  suballoc.size = newSize;
    8387  m_SumFreeSize -= sizeDiff;
    8388  }
    8389 
    8390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8391  return true;
    8392  }
    8393  }
    8394  VMA_ASSERT(0 && "Not found!");
    8395  return false;
    8396 }
    8397 
    8398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8399 {
    8400  VkDeviceSize lastSize = 0;
    8401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8402  {
    8403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8404 
    8405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8407  VMA_VALIDATE(it->size >= lastSize);
    8408  lastSize = it->size;
    8409  }
    8410  return true;
    8411 }
    8412 
    8413 bool VmaBlockMetadata_Generic::CheckAllocation(
    8414  uint32_t currentFrameIndex,
    8415  uint32_t frameInUseCount,
    8416  VkDeviceSize bufferImageGranularity,
    8417  VkDeviceSize allocSize,
    8418  VkDeviceSize allocAlignment,
    8419  VmaSuballocationType allocType,
    8420  VmaSuballocationList::const_iterator suballocItem,
    8421  bool canMakeOtherLost,
    8422  VkDeviceSize* pOffset,
    8423  size_t* itemsToMakeLostCount,
    8424  VkDeviceSize* pSumFreeSize,
    8425  VkDeviceSize* pSumItemSize) const
    8426 {
    8427  VMA_ASSERT(allocSize > 0);
    8428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8430  VMA_ASSERT(pOffset != VMA_NULL);
    8431 
    8432  *itemsToMakeLostCount = 0;
    8433  *pSumFreeSize = 0;
    8434  *pSumItemSize = 0;
    8435 
    8436  if(canMakeOtherLost)
    8437  {
    8438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8439  {
    8440  *pSumFreeSize = suballocItem->size;
    8441  }
    8442  else
    8443  {
    8444  if(suballocItem->hAllocation->CanBecomeLost() &&
    8445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8446  {
    8447  ++*itemsToMakeLostCount;
    8448  *pSumItemSize = suballocItem->size;
    8449  }
    8450  else
    8451  {
    8452  return false;
    8453  }
    8454  }
    8455 
    8456  // Remaining size is too small for this request: Early return.
    8457  if(GetSize() - suballocItem->offset < allocSize)
    8458  {
    8459  return false;
    8460  }
    8461 
    8462  // Start from offset equal to beginning of this suballocation.
    8463  *pOffset = suballocItem->offset;
    8464 
    8465  // Apply VMA_DEBUG_MARGIN at the beginning.
    8466  if(VMA_DEBUG_MARGIN > 0)
    8467  {
    8468  *pOffset += VMA_DEBUG_MARGIN;
    8469  }
    8470 
    8471  // Apply alignment.
    8472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8473 
    8474  // Check previous suballocations for BufferImageGranularity conflicts.
    8475  // Make bigger alignment if necessary.
    8476  if(bufferImageGranularity > 1)
    8477  {
    8478  bool bufferImageGranularityConflict = false;
    8479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8480  while(prevSuballocItem != m_Suballocations.cbegin())
    8481  {
    8482  --prevSuballocItem;
    8483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8485  {
    8486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8487  {
    8488  bufferImageGranularityConflict = true;
    8489  break;
    8490  }
    8491  }
    8492  else
    8493  // Already on previous page.
    8494  break;
    8495  }
    8496  if(bufferImageGranularityConflict)
    8497  {
    8498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8499  }
    8500  }
    8501 
    8502  // Now that we have final *pOffset, check if we are past suballocItem.
    8503  // If yes, return false - this function should be called for another suballocItem as starting point.
    8504  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8505  {
    8506  return false;
    8507  }
    8508 
    8509  // Calculate padding at the beginning based on current offset.
    8510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8511 
    8512  // Calculate required margin at the end.
    8513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8514 
    8515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8516  // Another early return check.
    8517  if(suballocItem->offset + totalSize > GetSize())
    8518  {
    8519  return false;
    8520  }
    8521 
    8522  // Advance lastSuballocItem until desired size is reached.
    8523  // Update itemsToMakeLostCount.
    8524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8525  if(totalSize > suballocItem->size)
    8526  {
    8527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8528  while(remainingSize > 0)
    8529  {
    8530  ++lastSuballocItem;
    8531  if(lastSuballocItem == m_Suballocations.cend())
    8532  {
    8533  return false;
    8534  }
    8535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8536  {
    8537  *pSumFreeSize += lastSuballocItem->size;
    8538  }
    8539  else
    8540  {
    8541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8544  {
    8545  ++*itemsToMakeLostCount;
    8546  *pSumItemSize += lastSuballocItem->size;
    8547  }
    8548  else
    8549  {
    8550  return false;
    8551  }
    8552  }
    8553  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8554  remainingSize - lastSuballocItem->size : 0;
    8555  }
    8556  }
    8557 
    8558  // Check next suballocations for BufferImageGranularity conflicts.
    8559  // If conflict exists, we must mark more allocations lost or fail.
    8560  if(bufferImageGranularity > 1)
    8561  {
    8562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8563  ++nextSuballocItem;
    8564  while(nextSuballocItem != m_Suballocations.cend())
    8565  {
    8566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8568  {
    8569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8570  {
    8571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8574  {
    8575  ++*itemsToMakeLostCount;
    8576  }
    8577  else
    8578  {
    8579  return false;
    8580  }
    8581  }
    8582  }
    8583  else
    8584  {
    8585  // Already on next page.
    8586  break;
    8587  }
    8588  ++nextSuballocItem;
    8589  }
    8590  }
    8591  }
    8592  else
    8593  {
    8594  const VmaSuballocation& suballoc = *suballocItem;
    8595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8596 
    8597  *pSumFreeSize = suballoc.size;
    8598 
    8599  // Size of this suballocation is too small for this request: Early return.
    8600  if(suballoc.size < allocSize)
    8601  {
    8602  return false;
    8603  }
    8604 
    8605  // Start from offset equal to beginning of this suballocation.
    8606  *pOffset = suballoc.offset;
    8607 
    8608  // Apply VMA_DEBUG_MARGIN at the beginning.
    8609  if(VMA_DEBUG_MARGIN > 0)
    8610  {
    8611  *pOffset += VMA_DEBUG_MARGIN;
    8612  }
    8613 
    8614  // Apply alignment.
    8615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8616 
    8617  // Check previous suballocations for BufferImageGranularity conflicts.
    8618  // Make bigger alignment if necessary.
    8619  if(bufferImageGranularity > 1)
    8620  {
    8621  bool bufferImageGranularityConflict = false;
    8622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8623  while(prevSuballocItem != m_Suballocations.cbegin())
    8624  {
    8625  --prevSuballocItem;
    8626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8628  {
    8629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8630  {
    8631  bufferImageGranularityConflict = true;
    8632  break;
    8633  }
    8634  }
    8635  else
    8636  // Already on previous page.
    8637  break;
    8638  }
    8639  if(bufferImageGranularityConflict)
    8640  {
    8641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8642  }
    8643  }
    8644 
    8645  // Calculate padding at the beginning based on current offset.
    8646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8647 
    8648  // Calculate required margin at the end.
    8649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8650 
    8651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8653  {
    8654  return false;
    8655  }
    8656 
    8657  // Check next suballocations for BufferImageGranularity conflicts.
    8658  // If conflict exists, allocation cannot be made here.
    8659  if(bufferImageGranularity > 1)
    8660  {
    8661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8662  ++nextSuballocItem;
    8663  while(nextSuballocItem != m_Suballocations.cend())
    8664  {
    8665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8667  {
    8668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8669  {
    8670  return false;
    8671  }
    8672  }
    8673  else
    8674  {
    8675  // Already on next page.
    8676  break;
    8677  }
    8678  ++nextSuballocItem;
    8679  }
    8680  }
    8681  }
    8682 
    8683  // All tests passed: Success. pOffset is already filled.
    8684  return true;
    8685 }
    8686 
    8687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8688 {
    8689  VMA_ASSERT(item != m_Suballocations.end());
    8690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8691 
    8692  VmaSuballocationList::iterator nextItem = item;
    8693  ++nextItem;
    8694  VMA_ASSERT(nextItem != m_Suballocations.end());
    8695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8696 
    8697  item->size += nextItem->size;
    8698  --m_FreeCount;
    8699  m_Suballocations.erase(nextItem);
    8700 }
    8701 
    8702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8703 {
    8704  // Change this suballocation to be marked as free.
    8705  VmaSuballocation& suballoc = *suballocItem;
    8706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8707  suballoc.hAllocation = VK_NULL_HANDLE;
    8708 
    8709  // Update totals.
    8710  ++m_FreeCount;
    8711  m_SumFreeSize += suballoc.size;
    8712 
    8713  // Merge with previous and/or next suballocation if it's also free.
    8714  bool mergeWithNext = false;
    8715  bool mergeWithPrev = false;
    8716 
    8717  VmaSuballocationList::iterator nextItem = suballocItem;
    8718  ++nextItem;
    8719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8720  {
    8721  mergeWithNext = true;
    8722  }
    8723 
    8724  VmaSuballocationList::iterator prevItem = suballocItem;
    8725  if(suballocItem != m_Suballocations.begin())
    8726  {
    8727  --prevItem;
    8728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8729  {
    8730  mergeWithPrev = true;
    8731  }
    8732  }
    8733 
    8734  if(mergeWithNext)
    8735  {
    8736  UnregisterFreeSuballocation(nextItem);
    8737  MergeFreeWithNext(suballocItem);
    8738  }
    8739 
    8740  if(mergeWithPrev)
    8741  {
    8742  UnregisterFreeSuballocation(prevItem);
    8743  MergeFreeWithNext(prevItem);
    8744  RegisterFreeSuballocation(prevItem);
    8745  return prevItem;
    8746  }
    8747  else
    8748  {
    8749  RegisterFreeSuballocation(suballocItem);
    8750  return suballocItem;
    8751  }
    8752 }
    8753 
    8754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8755 {
    8756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8757  VMA_ASSERT(item->size > 0);
    8758 
    8759  // You may want to enable this validation at the beginning or at the end of
    8760  // this function, depending on what do you want to check.
    8761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8762 
    8763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8764  {
    8765  if(m_FreeSuballocationsBySize.empty())
    8766  {
    8767  m_FreeSuballocationsBySize.push_back(item);
    8768  }
    8769  else
    8770  {
    8771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8772  }
    8773  }
    8774 
    8775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8776 }
    8777 
    8778 
    8779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8780 {
    8781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8782  VMA_ASSERT(item->size > 0);
    8783 
    8784  // You may want to enable this validation at the beginning or at the end of
    8785  // this function, depending on what do you want to check.
    8786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8787 
    8788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8789  {
    8790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8791  m_FreeSuballocationsBySize.data(),
    8792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8793  item,
    8794  VmaSuballocationItemSizeLess());
    8795  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8796  index < m_FreeSuballocationsBySize.size();
    8797  ++index)
    8798  {
    8799  if(m_FreeSuballocationsBySize[index] == item)
    8800  {
    8801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8802  return;
    8803  }
    8804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8805  }
    8806  VMA_ASSERT(0 && "Not found.");
    8807  }
    8808 
    8809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8810 }
    8811 
    8812 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8813  VkDeviceSize bufferImageGranularity,
    8814  VmaSuballocationType& inOutPrevSuballocType) const
    8815 {
    8816  if(bufferImageGranularity == 1 || IsEmpty())
    8817  {
    8818  return false;
    8819  }
    8820 
    8821  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8822  bool typeConflictFound = false;
    8823  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8824  it != m_Suballocations.cend();
    8825  ++it)
    8826  {
    8827  const VmaSuballocationType suballocType = it->type;
    8828  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8829  {
    8830  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8831  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8832  {
    8833  typeConflictFound = true;
    8834  }
    8835  inOutPrevSuballocType = suballocType;
    8836  }
    8837  }
    8838 
    8839  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8840 }
    8841 
    8843 // class VmaBlockMetadata_Linear
    8844 
    8845 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8846  VmaBlockMetadata(hAllocator),
    8847  m_SumFreeSize(0),
    8848  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8849  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8850  m_1stVectorIndex(0),
    8851  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8852  m_1stNullItemsBeginCount(0),
    8853  m_1stNullItemsMiddleCount(0),
    8854  m_2ndNullItemsCount(0)
    8855 {
    8856 }
    8857 
    8858 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8859 {
    8860 }
    8861 
    8862 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8863 {
    8864  VmaBlockMetadata::Init(size);
    8865  m_SumFreeSize = size;
    8866 }
    8867 
    8868 bool VmaBlockMetadata_Linear::Validate() const
    8869 {
    8870  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8871  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8872 
    8873  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8874  VMA_VALIDATE(!suballocations1st.empty() ||
    8875  suballocations2nd.empty() ||
    8876  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8877 
    8878  if(!suballocations1st.empty())
    8879  {
    8880  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8881  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8882  // Null item at the end should be just pop_back().
    8883  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8884  }
    8885  if(!suballocations2nd.empty())
    8886  {
    8887  // Null item at the end should be just pop_back().
    8888  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8889  }
    8890 
    8891  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8892  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8893 
    8894  VkDeviceSize sumUsedSize = 0;
    8895  const size_t suballoc1stCount = suballocations1st.size();
    8896  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8897 
    8898  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8899  {
    8900  const size_t suballoc2ndCount = suballocations2nd.size();
    8901  size_t nullItem2ndCount = 0;
    8902  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8903  {
    8904  const VmaSuballocation& suballoc = suballocations2nd[i];
    8905  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8906 
    8907  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8908  VMA_VALIDATE(suballoc.offset >= offset);
    8909 
    8910  if(!currFree)
    8911  {
    8912  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8913  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8914  sumUsedSize += suballoc.size;
    8915  }
    8916  else
    8917  {
    8918  ++nullItem2ndCount;
    8919  }
    8920 
    8921  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8922  }
    8923 
    8924  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8925  }
    8926 
    8927  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8928  {
    8929  const VmaSuballocation& suballoc = suballocations1st[i];
    8930  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8931  suballoc.hAllocation == VK_NULL_HANDLE);
    8932  }
    8933 
    8934  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8935 
    8936  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8937  {
    8938  const VmaSuballocation& suballoc = suballocations1st[i];
    8939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8940 
    8941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8942  VMA_VALIDATE(suballoc.offset >= offset);
    8943  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8944 
    8945  if(!currFree)
    8946  {
    8947  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8948  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8949  sumUsedSize += suballoc.size;
    8950  }
    8951  else
    8952  {
    8953  ++nullItem1stCount;
    8954  }
    8955 
    8956  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8957  }
    8958  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8959 
    8960  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8961  {
    8962  const size_t suballoc2ndCount = suballocations2nd.size();
    8963  size_t nullItem2ndCount = 0;
    8964  for(size_t i = suballoc2ndCount; i--; )
    8965  {
    8966  const VmaSuballocation& suballoc = suballocations2nd[i];
    8967  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8968 
    8969  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8970  VMA_VALIDATE(suballoc.offset >= offset);
    8971 
    8972  if(!currFree)
    8973  {
    8974  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8975  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8976  sumUsedSize += suballoc.size;
    8977  }
    8978  else
    8979  {
    8980  ++nullItem2ndCount;
    8981  }
    8982 
    8983  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8984  }
    8985 
    8986  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8987  }
    8988 
    8989  VMA_VALIDATE(offset <= GetSize());
    8990  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8991 
    8992  return true;
    8993 }
    8994 
    8995 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8996 {
    8997  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8998  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8999 }
    9000 
    9001 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    9002 {
    9003  const VkDeviceSize size = GetSize();
    9004 
    9005  /*
    9006  We don't consider gaps inside allocation vectors with freed allocations because
    9007  they are not suitable for reuse in linear allocator. We consider only space that
    9008  is available for new allocations.
    9009  */
    9010  if(IsEmpty())
    9011  {
    9012  return size;
    9013  }
    9014 
    9015  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9016 
    9017  switch(m_2ndVectorMode)
    9018  {
    9019  case SECOND_VECTOR_EMPTY:
    9020  /*
    9021  Available space is after end of 1st, as well as before beginning of 1st (which
    9022  whould make it a ring buffer).
    9023  */
    9024  {
    9025  const size_t suballocations1stCount = suballocations1st.size();
    9026  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    9027  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9028  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    9029  return VMA_MAX(
    9030  firstSuballoc.offset,
    9031  size - (lastSuballoc.offset + lastSuballoc.size));
    9032  }
    9033  break;
    9034 
    9035  case SECOND_VECTOR_RING_BUFFER:
    9036  /*
    9037  Available space is only between end of 2nd and beginning of 1st.
    9038  */
    9039  {
    9040  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9041  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    9042  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    9043  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    9044  }
    9045  break;
    9046 
    9047  case SECOND_VECTOR_DOUBLE_STACK:
    9048  /*
    9049  Available space is only between end of 1st and top of 2nd.
    9050  */
    9051  {
    9052  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9053  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9054  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9055  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9056  }
    9057  break;
    9058 
    9059  default:
    9060  VMA_ASSERT(0);
    9061  return 0;
    9062  }
    9063 }
    9064 
    9065 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9066 {
    9067  const VkDeviceSize size = GetSize();
    9068  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9069  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9070  const size_t suballoc1stCount = suballocations1st.size();
    9071  const size_t suballoc2ndCount = suballocations2nd.size();
    9072 
    9073  outInfo.blockCount = 1;
    9074  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9075  outInfo.unusedRangeCount = 0;
    9076  outInfo.usedBytes = 0;
    9077  outInfo.allocationSizeMin = UINT64_MAX;
    9078  outInfo.allocationSizeMax = 0;
    9079  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9080  outInfo.unusedRangeSizeMax = 0;
    9081 
    9082  VkDeviceSize lastOffset = 0;
    9083 
    9084  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9085  {
    9086  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9087  size_t nextAlloc2ndIndex = 0;
    9088  while(lastOffset < freeSpace2ndTo1stEnd)
    9089  {
    9090  // Find next non-null allocation or move nextAllocIndex to the end.
    9091  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9092  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9093  {
    9094  ++nextAlloc2ndIndex;
    9095  }
    9096 
    9097  // Found non-null allocation.
    9098  if(nextAlloc2ndIndex < suballoc2ndCount)
    9099  {
    9100  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9101 
    9102  // 1. Process free space before this allocation.
    9103  if(lastOffset < suballoc.offset)
    9104  {
    9105  // There is free space from lastOffset to suballoc.offset.
    9106  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9107  ++outInfo.unusedRangeCount;
    9108  outInfo.unusedBytes += unusedRangeSize;
    9109  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9110  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9111  }
    9112 
    9113  // 2. Process this allocation.
    9114  // There is allocation with suballoc.offset, suballoc.size.
    9115  outInfo.usedBytes += suballoc.size;
    9116  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9117  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9118 
    9119  // 3. Prepare for next iteration.
    9120  lastOffset = suballoc.offset + suballoc.size;
    9121  ++nextAlloc2ndIndex;
    9122  }
    9123  // We are at the end.
    9124  else
    9125  {
    9126  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9127  if(lastOffset < freeSpace2ndTo1stEnd)
    9128  {
    9129  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9130  ++outInfo.unusedRangeCount;
    9131  outInfo.unusedBytes += unusedRangeSize;
    9132  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9133  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9134  }
    9135 
    9136  // End of loop.
    9137  lastOffset = freeSpace2ndTo1stEnd;
    9138  }
    9139  }
    9140  }
    9141 
    9142  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9143  const VkDeviceSize freeSpace1stTo2ndEnd =
    9144  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9145  while(lastOffset < freeSpace1stTo2ndEnd)
    9146  {
    9147  // Find next non-null allocation or move nextAllocIndex to the end.
    9148  while(nextAlloc1stIndex < suballoc1stCount &&
    9149  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9150  {
    9151  ++nextAlloc1stIndex;
    9152  }
    9153 
    9154  // Found non-null allocation.
    9155  if(nextAlloc1stIndex < suballoc1stCount)
    9156  {
    9157  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9158 
    9159  // 1. Process free space before this allocation.
    9160  if(lastOffset < suballoc.offset)
    9161  {
    9162  // There is free space from lastOffset to suballoc.offset.
    9163  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9164  ++outInfo.unusedRangeCount;
    9165  outInfo.unusedBytes += unusedRangeSize;
    9166  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9167  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9168  }
    9169 
    9170  // 2. Process this allocation.
    9171  // There is allocation with suballoc.offset, suballoc.size.
    9172  outInfo.usedBytes += suballoc.size;
    9173  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9174  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9175 
    9176  // 3. Prepare for next iteration.
    9177  lastOffset = suballoc.offset + suballoc.size;
    9178  ++nextAlloc1stIndex;
    9179  }
    9180  // We are at the end.
    9181  else
    9182  {
    9183  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9184  if(lastOffset < freeSpace1stTo2ndEnd)
    9185  {
    9186  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9187  ++outInfo.unusedRangeCount;
    9188  outInfo.unusedBytes += unusedRangeSize;
    9189  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9190  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9191  }
    9192 
    9193  // End of loop.
    9194  lastOffset = freeSpace1stTo2ndEnd;
    9195  }
    9196  }
    9197 
    9198  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9199  {
    9200  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9201  while(lastOffset < size)
    9202  {
    9203  // Find next non-null allocation or move nextAllocIndex to the end.
    9204  while(nextAlloc2ndIndex != SIZE_MAX &&
    9205  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9206  {
    9207  --nextAlloc2ndIndex;
    9208  }
    9209 
    9210  // Found non-null allocation.
    9211  if(nextAlloc2ndIndex != SIZE_MAX)
    9212  {
    9213  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9214 
    9215  // 1. Process free space before this allocation.
    9216  if(lastOffset < suballoc.offset)
    9217  {
    9218  // There is free space from lastOffset to suballoc.offset.
    9219  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9220  ++outInfo.unusedRangeCount;
    9221  outInfo.unusedBytes += unusedRangeSize;
    9222  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9223  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9224  }
    9225 
    9226  // 2. Process this allocation.
    9227  // There is allocation with suballoc.offset, suballoc.size.
    9228  outInfo.usedBytes += suballoc.size;
    9229  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9230  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9231 
    9232  // 3. Prepare for next iteration.
    9233  lastOffset = suballoc.offset + suballoc.size;
    9234  --nextAlloc2ndIndex;
    9235  }
    9236  // We are at the end.
    9237  else
    9238  {
    9239  // There is free space from lastOffset to size.
    9240  if(lastOffset < size)
    9241  {
    9242  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9243  ++outInfo.unusedRangeCount;
    9244  outInfo.unusedBytes += unusedRangeSize;
    9245  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9246  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9247  }
    9248 
    9249  // End of loop.
    9250  lastOffset = size;
    9251  }
    9252  }
    9253  }
    9254 
    9255  outInfo.unusedBytes = size - outInfo.usedBytes;
    9256 }
    9257 
    9258 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9259 {
    9260  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9261  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9262  const VkDeviceSize size = GetSize();
    9263  const size_t suballoc1stCount = suballocations1st.size();
    9264  const size_t suballoc2ndCount = suballocations2nd.size();
    9265 
    9266  inoutStats.size += size;
    9267 
    9268  VkDeviceSize lastOffset = 0;
    9269 
    9270  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9271  {
    9272  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9273  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9274  while(lastOffset < freeSpace2ndTo1stEnd)
    9275  {
    9276  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9277  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9278  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9279  {
    9280  ++nextAlloc2ndIndex;
    9281  }
    9282 
    9283  // Found non-null allocation.
    9284  if(nextAlloc2ndIndex < suballoc2ndCount)
    9285  {
    9286  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9287 
    9288  // 1. Process free space before this allocation.
    9289  if(lastOffset < suballoc.offset)
    9290  {
    9291  // There is free space from lastOffset to suballoc.offset.
    9292  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9293  inoutStats.unusedSize += unusedRangeSize;
    9294  ++inoutStats.unusedRangeCount;
    9295  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9296  }
    9297 
    9298  // 2. Process this allocation.
    9299  // There is allocation with suballoc.offset, suballoc.size.
    9300  ++inoutStats.allocationCount;
    9301 
    9302  // 3. Prepare for next iteration.
    9303  lastOffset = suballoc.offset + suballoc.size;
    9304  ++nextAlloc2ndIndex;
    9305  }
    9306  // We are at the end.
    9307  else
    9308  {
    9309  if(lastOffset < freeSpace2ndTo1stEnd)
    9310  {
    9311  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9312  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9313  inoutStats.unusedSize += unusedRangeSize;
    9314  ++inoutStats.unusedRangeCount;
    9315  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9316  }
    9317 
    9318  // End of loop.
    9319  lastOffset = freeSpace2ndTo1stEnd;
    9320  }
    9321  }
    9322  }
    9323 
    9324  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9325  const VkDeviceSize freeSpace1stTo2ndEnd =
    9326  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9327  while(lastOffset < freeSpace1stTo2ndEnd)
    9328  {
    9329  // Find next non-null allocation or move nextAllocIndex to the end.
    9330  while(nextAlloc1stIndex < suballoc1stCount &&
    9331  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9332  {
    9333  ++nextAlloc1stIndex;
    9334  }
    9335 
    9336  // Found non-null allocation.
    9337  if(nextAlloc1stIndex < suballoc1stCount)
    9338  {
    9339  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9340 
    9341  // 1. Process free space before this allocation.
    9342  if(lastOffset < suballoc.offset)
    9343  {
    9344  // There is free space from lastOffset to suballoc.offset.
    9345  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9346  inoutStats.unusedSize += unusedRangeSize;
    9347  ++inoutStats.unusedRangeCount;
    9348  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9349  }
    9350 
    9351  // 2. Process this allocation.
    9352  // There is allocation with suballoc.offset, suballoc.size.
    9353  ++inoutStats.allocationCount;
    9354 
    9355  // 3. Prepare for next iteration.
    9356  lastOffset = suballoc.offset + suballoc.size;
    9357  ++nextAlloc1stIndex;
    9358  }
    9359  // We are at the end.
    9360  else
    9361  {
    9362  if(lastOffset < freeSpace1stTo2ndEnd)
    9363  {
    9364  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9365  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9366  inoutStats.unusedSize += unusedRangeSize;
    9367  ++inoutStats.unusedRangeCount;
    9368  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9369  }
    9370 
    9371  // End of loop.
    9372  lastOffset = freeSpace1stTo2ndEnd;
    9373  }
    9374  }
    9375 
    9376  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9377  {
    9378  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9379  while(lastOffset < size)
    9380  {
    9381  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9382  while(nextAlloc2ndIndex != SIZE_MAX &&
    9383  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9384  {
    9385  --nextAlloc2ndIndex;
    9386  }
    9387 
    9388  // Found non-null allocation.
    9389  if(nextAlloc2ndIndex != SIZE_MAX)
    9390  {
    9391  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9392 
    9393  // 1. Process free space before this allocation.
    9394  if(lastOffset < suballoc.offset)
    9395  {
    9396  // There is free space from lastOffset to suballoc.offset.
    9397  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9398  inoutStats.unusedSize += unusedRangeSize;
    9399  ++inoutStats.unusedRangeCount;
    9400  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9401  }
    9402 
    9403  // 2. Process this allocation.
    9404  // There is allocation with suballoc.offset, suballoc.size.
    9405  ++inoutStats.allocationCount;
    9406 
    9407  // 3. Prepare for next iteration.
    9408  lastOffset = suballoc.offset + suballoc.size;
    9409  --nextAlloc2ndIndex;
    9410  }
    9411  // We are at the end.
    9412  else
    9413  {
    9414  if(lastOffset < size)
    9415  {
    9416  // There is free space from lastOffset to size.
    9417  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9418  inoutStats.unusedSize += unusedRangeSize;
    9419  ++inoutStats.unusedRangeCount;
    9420  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9421  }
    9422 
    9423  // End of loop.
    9424  lastOffset = size;
    9425  }
    9426  }
    9427  }
    9428 }
    9429 
    9430 #if VMA_STATS_STRING_ENABLED
    9431 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9432 {
    9433  const VkDeviceSize size = GetSize();
    9434  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9435  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9436  const size_t suballoc1stCount = suballocations1st.size();
    9437  const size_t suballoc2ndCount = suballocations2nd.size();
    9438 
    9439  // FIRST PASS
    9440 
    9441  size_t unusedRangeCount = 0;
    9442  VkDeviceSize usedBytes = 0;
    9443 
    9444  VkDeviceSize lastOffset = 0;
    9445 
    9446  size_t alloc2ndCount = 0;
    9447  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9448  {
    9449  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9450  size_t nextAlloc2ndIndex = 0;
    9451  while(lastOffset < freeSpace2ndTo1stEnd)
    9452  {
    9453  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9454  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9455  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9456  {
    9457  ++nextAlloc2ndIndex;
    9458  }
    9459 
    9460  // Found non-null allocation.
    9461  if(nextAlloc2ndIndex < suballoc2ndCount)
    9462  {
    9463  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9464 
    9465  // 1. Process free space before this allocation.
    9466  if(lastOffset < suballoc.offset)
    9467  {
    9468  // There is free space from lastOffset to suballoc.offset.
    9469  ++unusedRangeCount;
    9470  }
    9471 
    9472  // 2. Process this allocation.
    9473  // There is allocation with suballoc.offset, suballoc.size.
    9474  ++alloc2ndCount;
    9475  usedBytes += suballoc.size;
    9476 
    9477  // 3. Prepare for next iteration.
    9478  lastOffset = suballoc.offset + suballoc.size;
    9479  ++nextAlloc2ndIndex;
    9480  }
    9481  // We are at the end.
    9482  else
    9483  {
    9484  if(lastOffset < freeSpace2ndTo1stEnd)
    9485  {
    9486  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9487  ++unusedRangeCount;
    9488  }
    9489 
    9490  // End of loop.
    9491  lastOffset = freeSpace2ndTo1stEnd;
    9492  }
    9493  }
    9494  }
    9495 
    9496  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9497  size_t alloc1stCount = 0;
    9498  const VkDeviceSize freeSpace1stTo2ndEnd =
    9499  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9500  while(lastOffset < freeSpace1stTo2ndEnd)
    9501  {
    9502  // Find next non-null allocation or move nextAllocIndex to the end.
    9503  while(nextAlloc1stIndex < suballoc1stCount &&
    9504  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9505  {
    9506  ++nextAlloc1stIndex;
    9507  }
    9508 
    9509  // Found non-null allocation.
    9510  if(nextAlloc1stIndex < suballoc1stCount)
    9511  {
    9512  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9513 
    9514  // 1. Process free space before this allocation.
    9515  if(lastOffset < suballoc.offset)
    9516  {
    9517  // There is free space from lastOffset to suballoc.offset.
    9518  ++unusedRangeCount;
    9519  }
    9520 
    9521  // 2. Process this allocation.
    9522  // There is allocation with suballoc.offset, suballoc.size.
    9523  ++alloc1stCount;
    9524  usedBytes += suballoc.size;
    9525 
    9526  // 3. Prepare for next iteration.
    9527  lastOffset = suballoc.offset + suballoc.size;
    9528  ++nextAlloc1stIndex;
    9529  }
    9530  // We are at the end.
    9531  else
    9532  {
    9533  if(lastOffset < size)
    9534  {
    9535  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9536  ++unusedRangeCount;
    9537  }
    9538 
    9539  // End of loop.
    9540  lastOffset = freeSpace1stTo2ndEnd;
    9541  }
    9542  }
    9543 
    9544  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9545  {
    9546  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9547  while(lastOffset < size)
    9548  {
    9549  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9550  while(nextAlloc2ndIndex != SIZE_MAX &&
    9551  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9552  {
    9553  --nextAlloc2ndIndex;
    9554  }
    9555 
    9556  // Found non-null allocation.
    9557  if(nextAlloc2ndIndex != SIZE_MAX)
    9558  {
    9559  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9560 
    9561  // 1. Process free space before this allocation.
    9562  if(lastOffset < suballoc.offset)
    9563  {
    9564  // There is free space from lastOffset to suballoc.offset.
    9565  ++unusedRangeCount;
    9566  }
    9567 
    9568  // 2. Process this allocation.
    9569  // There is allocation with suballoc.offset, suballoc.size.
    9570  ++alloc2ndCount;
    9571  usedBytes += suballoc.size;
    9572 
    9573  // 3. Prepare for next iteration.
    9574  lastOffset = suballoc.offset + suballoc.size;
    9575  --nextAlloc2ndIndex;
    9576  }
    9577  // We are at the end.
    9578  else
    9579  {
    9580  if(lastOffset < size)
    9581  {
    9582  // There is free space from lastOffset to size.
    9583  ++unusedRangeCount;
    9584  }
    9585 
    9586  // End of loop.
    9587  lastOffset = size;
    9588  }
    9589  }
    9590  }
    9591 
    9592  const VkDeviceSize unusedBytes = size - usedBytes;
    9593  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9594 
    9595  // SECOND PASS
    9596  lastOffset = 0;
    9597 
    9598  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9599  {
    9600  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9601  size_t nextAlloc2ndIndex = 0;
    9602  while(lastOffset < freeSpace2ndTo1stEnd)
    9603  {
    9604  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9605  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9606  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9607  {
    9608  ++nextAlloc2ndIndex;
    9609  }
    9610 
    9611  // Found non-null allocation.
    9612  if(nextAlloc2ndIndex < suballoc2ndCount)
    9613  {
    9614  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9615 
    9616  // 1. Process free space before this allocation.
    9617  if(lastOffset < suballoc.offset)
    9618  {
    9619  // There is free space from lastOffset to suballoc.offset.
    9620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9621  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9622  }
    9623 
    9624  // 2. Process this allocation.
    9625  // There is allocation with suballoc.offset, suballoc.size.
    9626  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9627 
    9628  // 3. Prepare for next iteration.
    9629  lastOffset = suballoc.offset + suballoc.size;
    9630  ++nextAlloc2ndIndex;
    9631  }
    9632  // We are at the end.
    9633  else
    9634  {
    9635  if(lastOffset < freeSpace2ndTo1stEnd)
    9636  {
    9637  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9638  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9640  }
    9641 
    9642  // End of loop.
    9643  lastOffset = freeSpace2ndTo1stEnd;
    9644  }
    9645  }
    9646  }
    9647 
    9648  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9649  while(lastOffset < freeSpace1stTo2ndEnd)
    9650  {
    9651  // Find next non-null allocation or move nextAllocIndex to the end.
    9652  while(nextAlloc1stIndex < suballoc1stCount &&
    9653  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9654  {
    9655  ++nextAlloc1stIndex;
    9656  }
    9657 
    9658  // Found non-null allocation.
    9659  if(nextAlloc1stIndex < suballoc1stCount)
    9660  {
    9661  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9662 
    9663  // 1. Process free space before this allocation.
    9664  if(lastOffset < suballoc.offset)
    9665  {
    9666  // There is free space from lastOffset to suballoc.offset.
    9667  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9668  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9669  }
    9670 
    9671  // 2. Process this allocation.
    9672  // There is allocation with suballoc.offset, suballoc.size.
    9673  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9674 
    9675  // 3. Prepare for next iteration.
    9676  lastOffset = suballoc.offset + suballoc.size;
    9677  ++nextAlloc1stIndex;
    9678  }
    9679  // We are at the end.
    9680  else
    9681  {
    9682  if(lastOffset < freeSpace1stTo2ndEnd)
    9683  {
    9684  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9685  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9687  }
    9688 
    9689  // End of loop.
    9690  lastOffset = freeSpace1stTo2ndEnd;
    9691  }
    9692  }
    9693 
    9694  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9695  {
    9696  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9697  while(lastOffset < size)
    9698  {
    9699  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9700  while(nextAlloc2ndIndex != SIZE_MAX &&
    9701  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9702  {
    9703  --nextAlloc2ndIndex;
    9704  }
    9705 
    9706  // Found non-null allocation.
    9707  if(nextAlloc2ndIndex != SIZE_MAX)
    9708  {
    9709  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9710 
    9711  // 1. Process free space before this allocation.
    9712  if(lastOffset < suballoc.offset)
    9713  {
    9714  // There is free space from lastOffset to suballoc.offset.
    9715  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9716  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9717  }
    9718 
    9719  // 2. Process this allocation.
    9720  // There is allocation with suballoc.offset, suballoc.size.
    9721  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9722 
    9723  // 3. Prepare for next iteration.
    9724  lastOffset = suballoc.offset + suballoc.size;
    9725  --nextAlloc2ndIndex;
    9726  }
    9727  // We are at the end.
    9728  else
    9729  {
    9730  if(lastOffset < size)
    9731  {
    9732  // There is free space from lastOffset to size.
    9733  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9734  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9735  }
    9736 
    9737  // End of loop.
    9738  lastOffset = size;
    9739  }
    9740  }
    9741  }
    9742 
    9743  PrintDetailedMap_End(json);
    9744 }
    9745 #endif // #if VMA_STATS_STRING_ENABLED
    9746 
    9747 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9748  uint32_t currentFrameIndex,
    9749  uint32_t frameInUseCount,
    9750  VkDeviceSize bufferImageGranularity,
    9751  VkDeviceSize allocSize,
    9752  VkDeviceSize allocAlignment,
    9753  bool upperAddress,
    9754  VmaSuballocationType allocType,
    9755  bool canMakeOtherLost,
    9756  uint32_t strategy,
    9757  VmaAllocationRequest* pAllocationRequest)
    9758 {
    9759  VMA_ASSERT(allocSize > 0);
    9760  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9761  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9762  VMA_HEAVY_ASSERT(Validate());
    9763  return upperAddress ?
    9764  CreateAllocationRequest_UpperAddress(
    9765  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9766  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9767  CreateAllocationRequest_LowerAddress(
    9768  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9769  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9770 }
    9771 
    9772 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9773  uint32_t currentFrameIndex,
    9774  uint32_t frameInUseCount,
    9775  VkDeviceSize bufferImageGranularity,
    9776  VkDeviceSize allocSize,
    9777  VkDeviceSize allocAlignment,
    9778  VmaSuballocationType allocType,
    9779  bool canMakeOtherLost,
    9780  uint32_t strategy,
    9781  VmaAllocationRequest* pAllocationRequest)
    9782 {
    9783  const VkDeviceSize size = GetSize();
    9784  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9785  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9786 
    9787  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9788  {
    9789  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9790  return false;
    9791  }
    9792 
    9793  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9794  if(allocSize > size)
    9795  {
    9796  return false;
    9797  }
    9798  VkDeviceSize resultBaseOffset = size - allocSize;
    9799  if(!suballocations2nd.empty())
    9800  {
    9801  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9802  resultBaseOffset = lastSuballoc.offset - allocSize;
    9803  if(allocSize > lastSuballoc.offset)
    9804  {
    9805  return false;
    9806  }
    9807  }
    9808 
    9809  // Start from offset equal to end of free space.
    9810  VkDeviceSize resultOffset = resultBaseOffset;
    9811 
    9812  // Apply VMA_DEBUG_MARGIN at the end.
    9813  if(VMA_DEBUG_MARGIN > 0)
    9814  {
    9815  if(resultOffset < VMA_DEBUG_MARGIN)
    9816  {
    9817  return false;
    9818  }
    9819  resultOffset -= VMA_DEBUG_MARGIN;
    9820  }
    9821 
    9822  // Apply alignment.
    9823  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9824 
    9825  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9826  // Make bigger alignment if necessary.
    9827  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9828  {
    9829  bool bufferImageGranularityConflict = false;
    9830  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9831  {
    9832  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9833  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9834  {
    9835  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9836  {
    9837  bufferImageGranularityConflict = true;
    9838  break;
    9839  }
    9840  }
    9841  else
    9842  // Already on previous page.
    9843  break;
    9844  }
    9845  if(bufferImageGranularityConflict)
    9846  {
    9847  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9848  }
    9849  }
    9850 
    9851  // There is enough free space.
    9852  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9853  suballocations1st.back().offset + suballocations1st.back().size :
    9854  0;
    9855  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9856  {
    9857  // Check previous suballocations for BufferImageGranularity conflicts.
    9858  // If conflict exists, allocation cannot be made here.
    9859  if(bufferImageGranularity > 1)
    9860  {
    9861  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9862  {
    9863  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9864  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9865  {
    9866  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9867  {
    9868  return false;
    9869  }
    9870  }
    9871  else
    9872  {
    9873  // Already on next page.
    9874  break;
    9875  }
    9876  }
    9877  }
    9878 
    9879  // All tests passed: Success.
    9880  pAllocationRequest->offset = resultOffset;
    9881  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9882  pAllocationRequest->sumItemSize = 0;
    9883  // pAllocationRequest->item unused.
    9884  pAllocationRequest->itemsToMakeLostCount = 0;
    9885  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9886  return true;
    9887  }
    9888 
    9889  return false;
    9890 }
    9891 
    9892 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9893  uint32_t currentFrameIndex,
    9894  uint32_t frameInUseCount,
    9895  VkDeviceSize bufferImageGranularity,
    9896  VkDeviceSize allocSize,
    9897  VkDeviceSize allocAlignment,
    9898  VmaSuballocationType allocType,
    9899  bool canMakeOtherLost,
    9900  uint32_t strategy,
    9901  VmaAllocationRequest* pAllocationRequest)
    9902 {
    9903  const VkDeviceSize size = GetSize();
    9904  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9905  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9906 
    9907  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9908  {
    9909  // Try to allocate at the end of 1st vector.
    9910 
    9911  VkDeviceSize resultBaseOffset = 0;
    9912  if(!suballocations1st.empty())
    9913  {
    9914  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9915  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9916  }
    9917 
    9918  // Start from offset equal to beginning of free space.
    9919  VkDeviceSize resultOffset = resultBaseOffset;
    9920 
    9921  // Apply VMA_DEBUG_MARGIN at the beginning.
    9922  if(VMA_DEBUG_MARGIN > 0)
    9923  {
    9924  resultOffset += VMA_DEBUG_MARGIN;
    9925  }
    9926 
    9927  // Apply alignment.
    9928  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9929 
    9930  // Check previous suballocations for BufferImageGranularity conflicts.
    9931  // Make bigger alignment if necessary.
    9932  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9933  {
    9934  bool bufferImageGranularityConflict = false;
    9935  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9936  {
    9937  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9938  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9939  {
    9940  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9941  {
    9942  bufferImageGranularityConflict = true;
    9943  break;
    9944  }
    9945  }
    9946  else
    9947  // Already on previous page.
    9948  break;
    9949  }
    9950  if(bufferImageGranularityConflict)
    9951  {
    9952  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9953  }
    9954  }
    9955 
    9956  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9957  suballocations2nd.back().offset : size;
    9958 
    9959  // There is enough free space at the end after alignment.
    9960  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9961  {
    9962  // Check next suballocations for BufferImageGranularity conflicts.
    9963  // If conflict exists, allocation cannot be made here.
    9964  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9965  {
    9966  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9967  {
    9968  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9969  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9970  {
    9971  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9972  {
    9973  return false;
    9974  }
    9975  }
    9976  else
    9977  {
    9978  // Already on previous page.
    9979  break;
    9980  }
    9981  }
    9982  }
    9983 
    9984  // All tests passed: Success.
    9985  pAllocationRequest->offset = resultOffset;
    9986  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9987  pAllocationRequest->sumItemSize = 0;
    9988  // pAllocationRequest->item, customData unused.
    9989  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    9990  pAllocationRequest->itemsToMakeLostCount = 0;
    9991  return true;
    9992  }
    9993  }
    9994 
    9995  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9996  // beginning of 1st vector as the end of free space.
    9997  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9998  {
    9999  VMA_ASSERT(!suballocations1st.empty());
    10000 
    10001  VkDeviceSize resultBaseOffset = 0;
    10002  if(!suballocations2nd.empty())
    10003  {
    10004  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10005  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    10006  }
    10007 
    10008  // Start from offset equal to beginning of free space.
    10009  VkDeviceSize resultOffset = resultBaseOffset;
    10010 
    10011  // Apply VMA_DEBUG_MARGIN at the beginning.
    10012  if(VMA_DEBUG_MARGIN > 0)
    10013  {
    10014  resultOffset += VMA_DEBUG_MARGIN;
    10015  }
    10016 
    10017  // Apply alignment.
    10018  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    10019 
    10020  // Check previous suballocations for BufferImageGranularity conflicts.
    10021  // Make bigger alignment if necessary.
    10022  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    10023  {
    10024  bool bufferImageGranularityConflict = false;
    10025  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    10026  {
    10027  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    10028  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    10029  {
    10030  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    10031  {
    10032  bufferImageGranularityConflict = true;
    10033  break;
    10034  }
    10035  }
    10036  else
    10037  // Already on previous page.
    10038  break;
    10039  }
    10040  if(bufferImageGranularityConflict)
    10041  {
    10042  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    10043  }
    10044  }
    10045 
    10046  pAllocationRequest->itemsToMakeLostCount = 0;
    10047  pAllocationRequest->sumItemSize = 0;
    10048  size_t index1st = m_1stNullItemsBeginCount;
    10049 
    10050  if(canMakeOtherLost)
    10051  {
    10052  while(index1st < suballocations1st.size() &&
    10053  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10054  {
    10055  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10056  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10057  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10058  {
    10059  // No problem.
    10060  }
    10061  else
    10062  {
    10063  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10064  if(suballoc.hAllocation->CanBecomeLost() &&
    10065  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10066  {
    10067  ++pAllocationRequest->itemsToMakeLostCount;
    10068  pAllocationRequest->sumItemSize += suballoc.size;
    10069  }
    10070  else
    10071  {
    10072  return false;
    10073  }
    10074  }
    10075  ++index1st;
    10076  }
    10077 
    10078  // Check next suballocations for BufferImageGranularity conflicts.
    10079  // If conflict exists, we must mark more allocations lost or fail.
    10080  if(bufferImageGranularity > 1)
    10081  {
    10082  while(index1st < suballocations1st.size())
    10083  {
    10084  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10085  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10086  {
    10087  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10088  {
    10089  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10090  if(suballoc.hAllocation->CanBecomeLost() &&
    10091  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10092  {
    10093  ++pAllocationRequest->itemsToMakeLostCount;
    10094  pAllocationRequest->sumItemSize += suballoc.size;
    10095  }
    10096  else
    10097  {
    10098  return false;
    10099  }
    10100  }
    10101  }
    10102  else
    10103  {
    10104  // Already on next page.
    10105  break;
    10106  }
    10107  ++index1st;
    10108  }
    10109  }
    10110 
    10111  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10112  if(index1st == suballocations1st.size() &&
    10113  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10114  {
    10115  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10116  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10117  }
    10118  }
    10119 
    10120  // There is enough free space at the end after alignment.
    10121  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10122  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10123  {
    10124  // Check next suballocations for BufferImageGranularity conflicts.
    10125  // If conflict exists, allocation cannot be made here.
    10126  if(bufferImageGranularity > 1)
    10127  {
    10128  for(size_t nextSuballocIndex = index1st;
    10129  nextSuballocIndex < suballocations1st.size();
    10130  nextSuballocIndex++)
    10131  {
    10132  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10133  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10134  {
    10135  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10136  {
    10137  return false;
    10138  }
    10139  }
    10140  else
    10141  {
    10142  // Already on next page.
    10143  break;
    10144  }
    10145  }
    10146  }
    10147 
    10148  // All tests passed: Success.
    10149  pAllocationRequest->offset = resultOffset;
    10150  pAllocationRequest->sumFreeSize =
    10151  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10152  - resultBaseOffset
    10153  - pAllocationRequest->sumItemSize;
    10154  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10155  // pAllocationRequest->item, customData unused.
    10156  return true;
    10157  }
    10158  }
    10159 
    10160  return false;
    10161 }
    10162 
    10163 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10164  uint32_t currentFrameIndex,
    10165  uint32_t frameInUseCount,
    10166  VmaAllocationRequest* pAllocationRequest)
    10167 {
    10168  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10169  {
    10170  return true;
    10171  }
    10172 
    10173  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10174 
    10175  // We always start from 1st.
    10176  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10177  size_t index = m_1stNullItemsBeginCount;
    10178  size_t madeLostCount = 0;
    10179  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10180  {
    10181  if(index == suballocations->size())
    10182  {
    10183  index = 0;
    10184  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10185  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10186  {
    10187  suballocations = &AccessSuballocations2nd();
    10188  }
    10189  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10190  // suballocations continues pointing at AccessSuballocations1st().
    10191  VMA_ASSERT(!suballocations->empty());
    10192  }
    10193  VmaSuballocation& suballoc = (*suballocations)[index];
    10194  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10195  {
    10196  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10197  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10198  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10199  {
    10200  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10201  suballoc.hAllocation = VK_NULL_HANDLE;
    10202  m_SumFreeSize += suballoc.size;
    10203  if(suballocations == &AccessSuballocations1st())
    10204  {
    10205  ++m_1stNullItemsMiddleCount;
    10206  }
    10207  else
    10208  {
    10209  ++m_2ndNullItemsCount;
    10210  }
    10211  ++madeLostCount;
    10212  }
    10213  else
    10214  {
    10215  return false;
    10216  }
    10217  }
    10218  ++index;
    10219  }
    10220 
    10221  CleanupAfterFree();
    10222  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10223 
    10224  return true;
    10225 }
    10226 
    10227 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10228 {
    10229  uint32_t lostAllocationCount = 0;
    10230 
    10231  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10232  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10233  {
    10234  VmaSuballocation& suballoc = suballocations1st[i];
    10235  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10236  suballoc.hAllocation->CanBecomeLost() &&
    10237  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10238  {
    10239  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10240  suballoc.hAllocation = VK_NULL_HANDLE;
    10241  ++m_1stNullItemsMiddleCount;
    10242  m_SumFreeSize += suballoc.size;
    10243  ++lostAllocationCount;
    10244  }
    10245  }
    10246 
    10247  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10248  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10249  {
    10250  VmaSuballocation& suballoc = suballocations2nd[i];
    10251  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10252  suballoc.hAllocation->CanBecomeLost() &&
    10253  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10254  {
    10255  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10256  suballoc.hAllocation = VK_NULL_HANDLE;
    10257  ++m_2ndNullItemsCount;
    10258  m_SumFreeSize += suballoc.size;
    10259  ++lostAllocationCount;
    10260  }
    10261  }
    10262 
    10263  if(lostAllocationCount)
    10264  {
    10265  CleanupAfterFree();
    10266  }
    10267 
    10268  return lostAllocationCount;
    10269 }
    10270 
    10271 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10272 {
    10273  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10274  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10275  {
    10276  const VmaSuballocation& suballoc = suballocations1st[i];
    10277  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10278  {
    10279  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10280  {
    10281  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10282  return VK_ERROR_VALIDATION_FAILED_EXT;
    10283  }
    10284  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10285  {
    10286  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10287  return VK_ERROR_VALIDATION_FAILED_EXT;
    10288  }
    10289  }
    10290  }
    10291 
    10292  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10293  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10294  {
    10295  const VmaSuballocation& suballoc = suballocations2nd[i];
    10296  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10297  {
    10298  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10299  {
    10300  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10301  return VK_ERROR_VALIDATION_FAILED_EXT;
    10302  }
    10303  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10304  {
    10305  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10306  return VK_ERROR_VALIDATION_FAILED_EXT;
    10307  }
    10308  }
    10309  }
    10310 
    10311  return VK_SUCCESS;
    10312 }
    10313 
    10314 void VmaBlockMetadata_Linear::Alloc(
    10315  const VmaAllocationRequest& request,
    10316  VmaSuballocationType type,
    10317  VkDeviceSize allocSize,
    10318  VmaAllocation hAllocation)
    10319 {
    10320  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10321 
    10322  switch(request.type)
    10323  {
    10324  case VmaAllocationRequestType::UpperAddress:
    10325  {
    10326  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10327  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10328  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10329  suballocations2nd.push_back(newSuballoc);
    10330  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10331  }
    10332  break;
    10333  case VmaAllocationRequestType::EndOf1st:
    10334  {
    10335  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10336 
    10337  VMA_ASSERT(suballocations1st.empty() ||
    10338  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10339  // Check if it fits before the end of the block.
    10340  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10341 
    10342  suballocations1st.push_back(newSuballoc);
    10343  }
    10344  break;
    10345  case VmaAllocationRequestType::EndOf2nd:
    10346  {
    10347  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10348  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10349  VMA_ASSERT(!suballocations1st.empty() &&
    10350  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10351  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10352 
    10353  switch(m_2ndVectorMode)
    10354  {
    10355  case SECOND_VECTOR_EMPTY:
    10356  // First allocation from second part ring buffer.
    10357  VMA_ASSERT(suballocations2nd.empty());
    10358  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10359  break;
    10360  case SECOND_VECTOR_RING_BUFFER:
    10361  // 2-part ring buffer is already started.
    10362  VMA_ASSERT(!suballocations2nd.empty());
    10363  break;
    10364  case SECOND_VECTOR_DOUBLE_STACK:
    10365  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10366  break;
    10367  default:
    10368  VMA_ASSERT(0);
    10369  }
    10370 
    10371  suballocations2nd.push_back(newSuballoc);
    10372  }
    10373  break;
    10374  default:
    10375  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10376  }
    10377 
    10378  m_SumFreeSize -= newSuballoc.size;
    10379 }
    10380 
    10381 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10382 {
    10383  FreeAtOffset(allocation->GetOffset());
    10384 }
    10385 
    10386 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10387 {
    10388  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10389  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10390 
    10391  if(!suballocations1st.empty())
    10392  {
    10393  // First allocation: Mark it as next empty at the beginning.
    10394  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10395  if(firstSuballoc.offset == offset)
    10396  {
    10397  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10398  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10399  m_SumFreeSize += firstSuballoc.size;
    10400  ++m_1stNullItemsBeginCount;
    10401  CleanupAfterFree();
    10402  return;
    10403  }
    10404  }
    10405 
    10406  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10407  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10408  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10409  {
    10410  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10411  if(lastSuballoc.offset == offset)
    10412  {
    10413  m_SumFreeSize += lastSuballoc.size;
    10414  suballocations2nd.pop_back();
    10415  CleanupAfterFree();
    10416  return;
    10417  }
    10418  }
    10419  // Last allocation in 1st vector.
    10420  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10421  {
    10422  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10423  if(lastSuballoc.offset == offset)
    10424  {
    10425  m_SumFreeSize += lastSuballoc.size;
    10426  suballocations1st.pop_back();
    10427  CleanupAfterFree();
    10428  return;
    10429  }
    10430  }
    10431 
    10432  // Item from the middle of 1st vector.
    10433  {
    10434  VmaSuballocation refSuballoc;
    10435  refSuballoc.offset = offset;
    10436  // Rest of members stays uninitialized intentionally for better performance.
    10437  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
    10438  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10439  suballocations1st.end(),
    10440  refSuballoc,
    10441  VmaSuballocationOffsetLess());
    10442  if(it != suballocations1st.end())
    10443  {
    10444  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10445  it->hAllocation = VK_NULL_HANDLE;
    10446  ++m_1stNullItemsMiddleCount;
    10447  m_SumFreeSize += it->size;
    10448  CleanupAfterFree();
    10449  return;
    10450  }
    10451  }
    10452 
    10453  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10454  {
    10455  // Item from the middle of 2nd vector.
    10456  VmaSuballocation refSuballoc;
    10457  refSuballoc.offset = offset;
    10458  // Rest of members stays uninitialized intentionally for better performance.
    10459  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10460  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
    10461  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
    10462  if(it != suballocations2nd.end())
    10463  {
    10464  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10465  it->hAllocation = VK_NULL_HANDLE;
    10466  ++m_2ndNullItemsCount;
    10467  m_SumFreeSize += it->size;
    10468  CleanupAfterFree();
    10469  return;
    10470  }
    10471  }
    10472 
    10473  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10474 }
    10475 
    10476 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10477 {
    10478  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10479  const size_t suballocCount = AccessSuballocations1st().size();
    10480  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10481 }
    10482 
    10483 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10484 {
    10485  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10486  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10487 
    10488  if(IsEmpty())
    10489  {
    10490  suballocations1st.clear();
    10491  suballocations2nd.clear();
    10492  m_1stNullItemsBeginCount = 0;
    10493  m_1stNullItemsMiddleCount = 0;
    10494  m_2ndNullItemsCount = 0;
    10495  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10496  }
    10497  else
    10498  {
    10499  const size_t suballoc1stCount = suballocations1st.size();
    10500  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10501  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10502 
    10503  // Find more null items at the beginning of 1st vector.
    10504  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10505  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10506  {
    10507  ++m_1stNullItemsBeginCount;
    10508  --m_1stNullItemsMiddleCount;
    10509  }
    10510 
    10511  // Find more null items at the end of 1st vector.
    10512  while(m_1stNullItemsMiddleCount > 0 &&
    10513  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10514  {
    10515  --m_1stNullItemsMiddleCount;
    10516  suballocations1st.pop_back();
    10517  }
    10518 
    10519  // Find more null items at the end of 2nd vector.
    10520  while(m_2ndNullItemsCount > 0 &&
    10521  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10522  {
    10523  --m_2ndNullItemsCount;
    10524  suballocations2nd.pop_back();
    10525  }
    10526 
    10527  // Find more null items at the beginning of 2nd vector.
    10528  while(m_2ndNullItemsCount > 0 &&
    10529  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10530  {
    10531  --m_2ndNullItemsCount;
    10532  VmaVectorRemove(suballocations2nd, 0);
    10533  }
    10534 
    10535  if(ShouldCompact1st())
    10536  {
    10537  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10538  size_t srcIndex = m_1stNullItemsBeginCount;
    10539  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10540  {
    10541  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10542  {
    10543  ++srcIndex;
    10544  }
    10545  if(dstIndex != srcIndex)
    10546  {
    10547  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10548  }
    10549  ++srcIndex;
    10550  }
    10551  suballocations1st.resize(nonNullItemCount);
    10552  m_1stNullItemsBeginCount = 0;
    10553  m_1stNullItemsMiddleCount = 0;
    10554  }
    10555 
    10556  // 2nd vector became empty.
    10557  if(suballocations2nd.empty())
    10558  {
    10559  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10560  }
    10561 
    10562  // 1st vector became empty.
    10563  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10564  {
    10565  suballocations1st.clear();
    10566  m_1stNullItemsBeginCount = 0;
    10567 
    10568  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10569  {
    10570  // Swap 1st with 2nd. Now 2nd is empty.
    10571  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10572  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10573  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10574  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10575  {
    10576  ++m_1stNullItemsBeginCount;
    10577  --m_1stNullItemsMiddleCount;
    10578  }
    10579  m_2ndNullItemsCount = 0;
    10580  m_1stVectorIndex ^= 1;
    10581  }
    10582  }
    10583  }
    10584 
    10585  VMA_HEAVY_ASSERT(Validate());
    10586 }
    10587 
    10588 
    10590 // class VmaBlockMetadata_Buddy
    10591 
    10592 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10593  VmaBlockMetadata(hAllocator),
    10594  m_Root(VMA_NULL),
    10595  m_AllocationCount(0),
    10596  m_FreeCount(1),
    10597  m_SumFreeSize(0)
    10598 {
    10599  memset(m_FreeList, 0, sizeof(m_FreeList));
    10600 }
    10601 
    10602 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10603 {
    10604  DeleteNode(m_Root);
    10605 }
    10606 
    10607 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10608 {
    10609  VmaBlockMetadata::Init(size);
    10610 
    10611  m_UsableSize = VmaPrevPow2(size);
    10612  m_SumFreeSize = m_UsableSize;
    10613 
    10614  // Calculate m_LevelCount.
    10615  m_LevelCount = 1;
    10616  while(m_LevelCount < MAX_LEVELS &&
    10617  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10618  {
    10619  ++m_LevelCount;
    10620  }
    10621 
    10622  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10623  rootNode->offset = 0;
    10624  rootNode->type = Node::TYPE_FREE;
    10625  rootNode->parent = VMA_NULL;
    10626  rootNode->buddy = VMA_NULL;
    10627 
    10628  m_Root = rootNode;
    10629  AddToFreeListFront(0, rootNode);
    10630 }
    10631 
    10632 bool VmaBlockMetadata_Buddy::Validate() const
    10633 {
    10634  // Validate tree.
    10635  ValidationContext ctx;
    10636  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10637  {
    10638  VMA_VALIDATE(false && "ValidateNode failed.");
    10639  }
    10640  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10641  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10642 
    10643  // Validate free node lists.
    10644  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10645  {
    10646  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10647  m_FreeList[level].front->free.prev == VMA_NULL);
    10648 
    10649  for(Node* node = m_FreeList[level].front;
    10650  node != VMA_NULL;
    10651  node = node->free.next)
    10652  {
    10653  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10654 
    10655  if(node->free.next == VMA_NULL)
    10656  {
    10657  VMA_VALIDATE(m_FreeList[level].back == node);
    10658  }
    10659  else
    10660  {
    10661  VMA_VALIDATE(node->free.next->free.prev == node);
    10662  }
    10663  }
    10664  }
    10665 
    10666  // Validate that free lists ar higher levels are empty.
    10667  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10668  {
    10669  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10670  }
    10671 
    10672  return true;
    10673 }
    10674 
    10675 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10676 {
    10677  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10678  {
    10679  if(m_FreeList[level].front != VMA_NULL)
    10680  {
    10681  return LevelToNodeSize(level);
    10682  }
    10683  }
    10684  return 0;
    10685 }
    10686 
    10687 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10688 {
    10689  const VkDeviceSize unusableSize = GetUnusableSize();
    10690 
    10691  outInfo.blockCount = 1;
    10692 
    10693  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10694  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10695 
    10696  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10697  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10698  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10699 
    10700  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10701 
    10702  if(unusableSize > 0)
    10703  {
    10704  ++outInfo.unusedRangeCount;
    10705  outInfo.unusedBytes += unusableSize;
    10706  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10707  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10708  }
    10709 }
    10710 
    10711 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10712 {
    10713  const VkDeviceSize unusableSize = GetUnusableSize();
    10714 
    10715  inoutStats.size += GetSize();
    10716  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10717  inoutStats.allocationCount += m_AllocationCount;
    10718  inoutStats.unusedRangeCount += m_FreeCount;
    10719  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10720 
    10721  if(unusableSize > 0)
    10722  {
    10723  ++inoutStats.unusedRangeCount;
    10724  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10725  }
    10726 }
    10727 
    10728 #if VMA_STATS_STRING_ENABLED
    10729 
    10730 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10731 {
    10732  // TODO optimize
    10733  VmaStatInfo stat;
    10734  CalcAllocationStatInfo(stat);
    10735 
    10736  PrintDetailedMap_Begin(
    10737  json,
    10738  stat.unusedBytes,
    10739  stat.allocationCount,
    10740  stat.unusedRangeCount);
    10741 
    10742  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10743 
    10744  const VkDeviceSize unusableSize = GetUnusableSize();
    10745  if(unusableSize > 0)
    10746  {
    10747  PrintDetailedMap_UnusedRange(json,
    10748  m_UsableSize, // offset
    10749  unusableSize); // size
    10750  }
    10751 
    10752  PrintDetailedMap_End(json);
    10753 }
    10754 
    10755 #endif // #if VMA_STATS_STRING_ENABLED
    10756 
    10757 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10758  uint32_t currentFrameIndex,
    10759  uint32_t frameInUseCount,
    10760  VkDeviceSize bufferImageGranularity,
    10761  VkDeviceSize allocSize,
    10762  VkDeviceSize allocAlignment,
    10763  bool upperAddress,
    10764  VmaSuballocationType allocType,
    10765  bool canMakeOtherLost,
    10766  uint32_t strategy,
    10767  VmaAllocationRequest* pAllocationRequest)
    10768 {
    10769  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10770 
    10771  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10772  // Whenever it might be an OPTIMAL image...
    10773  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10774  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10775  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10776  {
    10777  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10778  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10779  }
    10780 
    10781  if(allocSize > m_UsableSize)
    10782  {
    10783  return false;
    10784  }
    10785 
    10786  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10787  for(uint32_t level = targetLevel + 1; level--; )
    10788  {
    10789  for(Node* freeNode = m_FreeList[level].front;
    10790  freeNode != VMA_NULL;
    10791  freeNode = freeNode->free.next)
    10792  {
    10793  if(freeNode->offset % allocAlignment == 0)
    10794  {
    10795  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10796  pAllocationRequest->offset = freeNode->offset;
    10797  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10798  pAllocationRequest->sumItemSize = 0;
    10799  pAllocationRequest->itemsToMakeLostCount = 0;
    10800  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10801  return true;
    10802  }
    10803  }
    10804  }
    10805 
    10806  return false;
    10807 }
    10808 
    10809 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10810  uint32_t currentFrameIndex,
    10811  uint32_t frameInUseCount,
    10812  VmaAllocationRequest* pAllocationRequest)
    10813 {
    10814  /*
    10815  Lost allocations are not supported in buddy allocator at the moment.
    10816  Support might be added in the future.
    10817  */
    10818  return pAllocationRequest->itemsToMakeLostCount == 0;
    10819 }
    10820 
    10821 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10822 {
    10823  /*
    10824  Lost allocations are not supported in buddy allocator at the moment.
    10825  Support might be added in the future.
    10826  */
    10827  return 0;
    10828 }
    10829 
    10830 void VmaBlockMetadata_Buddy::Alloc(
    10831  const VmaAllocationRequest& request,
    10832  VmaSuballocationType type,
    10833  VkDeviceSize allocSize,
    10834  VmaAllocation hAllocation)
    10835 {
    10836  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10837 
    10838  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10839  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10840 
    10841  Node* currNode = m_FreeList[currLevel].front;
    10842  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10843  while(currNode->offset != request.offset)
    10844  {
    10845  currNode = currNode->free.next;
    10846  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10847  }
    10848 
    10849  // Go down, splitting free nodes.
    10850  while(currLevel < targetLevel)
    10851  {
    10852  // currNode is already first free node at currLevel.
    10853  // Remove it from list of free nodes at this currLevel.
    10854  RemoveFromFreeList(currLevel, currNode);
    10855 
    10856  const uint32_t childrenLevel = currLevel + 1;
    10857 
    10858  // Create two free sub-nodes.
    10859  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10860  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10861 
    10862  leftChild->offset = currNode->offset;
    10863  leftChild->type = Node::TYPE_FREE;
    10864  leftChild->parent = currNode;
    10865  leftChild->buddy = rightChild;
    10866 
    10867  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10868  rightChild->type = Node::TYPE_FREE;
    10869  rightChild->parent = currNode;
    10870  rightChild->buddy = leftChild;
    10871 
    10872  // Convert current currNode to split type.
    10873  currNode->type = Node::TYPE_SPLIT;
    10874  currNode->split.leftChild = leftChild;
    10875 
    10876  // Add child nodes to free list. Order is important!
    10877  AddToFreeListFront(childrenLevel, rightChild);
    10878  AddToFreeListFront(childrenLevel, leftChild);
    10879 
    10880  ++m_FreeCount;
    10881  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10882  ++currLevel;
    10883  currNode = m_FreeList[currLevel].front;
    10884 
    10885  /*
    10886  We can be sure that currNode, as left child of node previously split,
    10887  also fullfills the alignment requirement.
    10888  */
    10889  }
    10890 
    10891  // Remove from free list.
    10892  VMA_ASSERT(currLevel == targetLevel &&
    10893  currNode != VMA_NULL &&
    10894  currNode->type == Node::TYPE_FREE);
    10895  RemoveFromFreeList(currLevel, currNode);
    10896 
    10897  // Convert to allocation node.
    10898  currNode->type = Node::TYPE_ALLOCATION;
    10899  currNode->allocation.alloc = hAllocation;
    10900 
    10901  ++m_AllocationCount;
    10902  --m_FreeCount;
    10903  m_SumFreeSize -= allocSize;
    10904 }
    10905 
    10906 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10907 {
    10908  if(node->type == Node::TYPE_SPLIT)
    10909  {
    10910  DeleteNode(node->split.leftChild->buddy);
    10911  DeleteNode(node->split.leftChild);
    10912  }
    10913 
    10914  vma_delete(GetAllocationCallbacks(), node);
    10915 }
    10916 
    10917 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10918 {
    10919  VMA_VALIDATE(level < m_LevelCount);
    10920  VMA_VALIDATE(curr->parent == parent);
    10921  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10922  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10923  switch(curr->type)
    10924  {
    10925  case Node::TYPE_FREE:
    10926  // curr->free.prev, next are validated separately.
    10927  ctx.calculatedSumFreeSize += levelNodeSize;
    10928  ++ctx.calculatedFreeCount;
    10929  break;
    10930  case Node::TYPE_ALLOCATION:
    10931  ++ctx.calculatedAllocationCount;
    10932  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10933  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10934  break;
    10935  case Node::TYPE_SPLIT:
    10936  {
    10937  const uint32_t childrenLevel = level + 1;
    10938  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10939  const Node* const leftChild = curr->split.leftChild;
    10940  VMA_VALIDATE(leftChild != VMA_NULL);
    10941  VMA_VALIDATE(leftChild->offset == curr->offset);
    10942  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10943  {
    10944  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10945  }
    10946  const Node* const rightChild = leftChild->buddy;
    10947  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10948  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10949  {
    10950  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10951  }
    10952  }
    10953  break;
    10954  default:
    10955  return false;
    10956  }
    10957 
    10958  return true;
    10959 }
    10960 
    10961 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10962 {
    10963  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10964  uint32_t level = 0;
    10965  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10966  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10967  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10968  {
    10969  ++level;
    10970  currLevelNodeSize = nextLevelNodeSize;
    10971  nextLevelNodeSize = currLevelNodeSize >> 1;
    10972  }
    10973  return level;
    10974 }
    10975 
    10976 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10977 {
    10978  // Find node and level.
    10979  Node* node = m_Root;
    10980  VkDeviceSize nodeOffset = 0;
    10981  uint32_t level = 0;
    10982  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10983  while(node->type == Node::TYPE_SPLIT)
    10984  {
    10985  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10986  if(offset < nodeOffset + nextLevelSize)
    10987  {
    10988  node = node->split.leftChild;
    10989  }
    10990  else
    10991  {
    10992  node = node->split.leftChild->buddy;
    10993  nodeOffset += nextLevelSize;
    10994  }
    10995  ++level;
    10996  levelNodeSize = nextLevelSize;
    10997  }
    10998 
    10999  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    11000  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    11001 
    11002  ++m_FreeCount;
    11003  --m_AllocationCount;
    11004  m_SumFreeSize += alloc->GetSize();
    11005 
    11006  node->type = Node::TYPE_FREE;
    11007 
    11008  // Join free nodes if possible.
    11009  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    11010  {
    11011  RemoveFromFreeList(level, node->buddy);
    11012  Node* const parent = node->parent;
    11013 
    11014  vma_delete(GetAllocationCallbacks(), node->buddy);
    11015  vma_delete(GetAllocationCallbacks(), node);
    11016  parent->type = Node::TYPE_FREE;
    11017 
    11018  node = parent;
    11019  --level;
    11020  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    11021  --m_FreeCount;
    11022  }
    11023 
    11024  AddToFreeListFront(level, node);
    11025 }
    11026 
    11027 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    11028 {
    11029  switch(node->type)
    11030  {
    11031  case Node::TYPE_FREE:
    11032  ++outInfo.unusedRangeCount;
    11033  outInfo.unusedBytes += levelNodeSize;
    11034  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    11035  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    11036  break;
    11037  case Node::TYPE_ALLOCATION:
    11038  {
    11039  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11040  ++outInfo.allocationCount;
    11041  outInfo.usedBytes += allocSize;
    11042  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    11043  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    11044 
    11045  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    11046  if(unusedRangeSize > 0)
    11047  {
    11048  ++outInfo.unusedRangeCount;
    11049  outInfo.unusedBytes += unusedRangeSize;
    11050  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11051  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11052  }
    11053  }
    11054  break;
    11055  case Node::TYPE_SPLIT:
    11056  {
    11057  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11058  const Node* const leftChild = node->split.leftChild;
    11059  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11060  const Node* const rightChild = leftChild->buddy;
    11061  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11062  }
    11063  break;
    11064  default:
    11065  VMA_ASSERT(0);
    11066  }
    11067 }
    11068 
    11069 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11070 {
    11071  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11072 
    11073  // List is empty.
    11074  Node* const frontNode = m_FreeList[level].front;
    11075  if(frontNode == VMA_NULL)
    11076  {
    11077  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11078  node->free.prev = node->free.next = VMA_NULL;
    11079  m_FreeList[level].front = m_FreeList[level].back = node;
    11080  }
    11081  else
    11082  {
    11083  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11084  node->free.prev = VMA_NULL;
    11085  node->free.next = frontNode;
    11086  frontNode->free.prev = node;
    11087  m_FreeList[level].front = node;
    11088  }
    11089 }
    11090 
    11091 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11092 {
    11093  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11094 
    11095  // It is at the front.
    11096  if(node->free.prev == VMA_NULL)
    11097  {
    11098  VMA_ASSERT(m_FreeList[level].front == node);
    11099  m_FreeList[level].front = node->free.next;
    11100  }
    11101  else
    11102  {
    11103  Node* const prevFreeNode = node->free.prev;
    11104  VMA_ASSERT(prevFreeNode->free.next == node);
    11105  prevFreeNode->free.next = node->free.next;
    11106  }
    11107 
    11108  // It is at the back.
    11109  if(node->free.next == VMA_NULL)
    11110  {
    11111  VMA_ASSERT(m_FreeList[level].back == node);
    11112  m_FreeList[level].back = node->free.prev;
    11113  }
    11114  else
    11115  {
    11116  Node* const nextFreeNode = node->free.next;
    11117  VMA_ASSERT(nextFreeNode->free.prev == node);
    11118  nextFreeNode->free.prev = node->free.prev;
    11119  }
    11120 }
    11121 
    11122 #if VMA_STATS_STRING_ENABLED
    11123 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11124 {
    11125  switch(node->type)
    11126  {
    11127  case Node::TYPE_FREE:
    11128  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11129  break;
    11130  case Node::TYPE_ALLOCATION:
    11131  {
    11132  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11133  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11134  if(allocSize < levelNodeSize)
    11135  {
    11136  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11137  }
    11138  }
    11139  break;
    11140  case Node::TYPE_SPLIT:
    11141  {
    11142  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11143  const Node* const leftChild = node->split.leftChild;
    11144  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11145  const Node* const rightChild = leftChild->buddy;
    11146  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11147  }
    11148  break;
    11149  default:
    11150  VMA_ASSERT(0);
    11151  }
    11152 }
    11153 #endif // #if VMA_STATS_STRING_ENABLED
    11154 
    11155 
    11157 // class VmaDeviceMemoryBlock
    11158 
    11159 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11160  m_pMetadata(VMA_NULL),
    11161  m_MemoryTypeIndex(UINT32_MAX),
    11162  m_Id(0),
    11163  m_hMemory(VK_NULL_HANDLE),
    11164  m_MapCount(0),
    11165  m_pMappedData(VMA_NULL)
    11166 {
    11167 }
    11168 
    11169 void VmaDeviceMemoryBlock::Init(
    11170  VmaAllocator hAllocator,
    11171  VmaPool hParentPool,
    11172  uint32_t newMemoryTypeIndex,
    11173  VkDeviceMemory newMemory,
    11174  VkDeviceSize newSize,
    11175  uint32_t id,
    11176  uint32_t algorithm)
    11177 {
    11178  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11179 
    11180  m_hParentPool = hParentPool;
    11181  m_MemoryTypeIndex = newMemoryTypeIndex;
    11182  m_Id = id;
    11183  m_hMemory = newMemory;
    11184 
    11185  switch(algorithm)
    11186  {
    11188  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11189  break;
    11191  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11192  break;
    11193  default:
    11194  VMA_ASSERT(0);
    11195  // Fall-through.
    11196  case 0:
    11197  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11198  }
    11199  m_pMetadata->Init(newSize);
    11200 }
    11201 
    11202 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11203 {
    11204  // This is the most important assert in the entire library.
    11205  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11206  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11207 
    11208  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11209  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11210  m_hMemory = VK_NULL_HANDLE;
    11211 
    11212  vma_delete(allocator, m_pMetadata);
    11213  m_pMetadata = VMA_NULL;
    11214 }
    11215 
    11216 bool VmaDeviceMemoryBlock::Validate() const
    11217 {
    11218  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11219  (m_pMetadata->GetSize() != 0));
    11220 
    11221  return m_pMetadata->Validate();
    11222 }
    11223 
    11224 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11225 {
    11226  void* pData = nullptr;
    11227  VkResult res = Map(hAllocator, 1, &pData);
    11228  if(res != VK_SUCCESS)
    11229  {
    11230  return res;
    11231  }
    11232 
    11233  res = m_pMetadata->CheckCorruption(pData);
    11234 
    11235  Unmap(hAllocator, 1);
    11236 
    11237  return res;
    11238 }
    11239 
    11240 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11241 {
    11242  if(count == 0)
    11243  {
    11244  return VK_SUCCESS;
    11245  }
    11246 
    11247  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11248  if(m_MapCount != 0)
    11249  {
    11250  m_MapCount += count;
    11251  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11252  if(ppData != VMA_NULL)
    11253  {
    11254  *ppData = m_pMappedData;
    11255  }
    11256  return VK_SUCCESS;
    11257  }
    11258  else
    11259  {
    11260  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11261  hAllocator->m_hDevice,
    11262  m_hMemory,
    11263  0, // offset
    11264  VK_WHOLE_SIZE,
    11265  0, // flags
    11266  &m_pMappedData);
    11267  if(result == VK_SUCCESS)
    11268  {
    11269  if(ppData != VMA_NULL)
    11270  {
    11271  *ppData = m_pMappedData;
    11272  }
    11273  m_MapCount = count;
    11274  }
    11275  return result;
    11276  }
    11277 }
    11278 
    11279 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11280 {
    11281  if(count == 0)
    11282  {
    11283  return;
    11284  }
    11285 
    11286  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11287  if(m_MapCount >= count)
    11288  {
    11289  m_MapCount -= count;
    11290  if(m_MapCount == 0)
    11291  {
    11292  m_pMappedData = VMA_NULL;
    11293  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11294  }
    11295  }
    11296  else
    11297  {
    11298  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11299  }
    11300 }
    11301 
    11302 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11303 {
    11304  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11305  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11306 
    11307  void* pData;
    11308  VkResult res = Map(hAllocator, 1, &pData);
    11309  if(res != VK_SUCCESS)
    11310  {
    11311  return res;
    11312  }
    11313 
    11314  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11315  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11316 
    11317  Unmap(hAllocator, 1);
    11318 
    11319  return VK_SUCCESS;
    11320 }
    11321 
    11322 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11323 {
    11324  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11325  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11326 
    11327  void* pData;
    11328  VkResult res = Map(hAllocator, 1, &pData);
    11329  if(res != VK_SUCCESS)
    11330  {
    11331  return res;
    11332  }
    11333 
    11334  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11335  {
    11336  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11337  }
    11338  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11339  {
    11340  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11341  }
    11342 
    11343  Unmap(hAllocator, 1);
    11344 
    11345  return VK_SUCCESS;
    11346 }
    11347 
    11348 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11349  const VmaAllocator hAllocator,
    11350  const VmaAllocation hAllocation,
    11351  VkBuffer hBuffer)
    11352 {
    11353  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11354  hAllocation->GetBlock() == this);
    11355  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11356  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11357  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11358  hAllocator->m_hDevice,
    11359  hBuffer,
    11360  m_hMemory,
    11361  hAllocation->GetOffset());
    11362 }
    11363 
    11364 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11365  const VmaAllocator hAllocator,
    11366  const VmaAllocation hAllocation,
    11367  VkImage hImage)
    11368 {
    11369  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11370  hAllocation->GetBlock() == this);
    11371  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11372  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11373  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11374  hAllocator->m_hDevice,
    11375  hImage,
    11376  m_hMemory,
    11377  hAllocation->GetOffset());
    11378 }
    11379 
    11380 static void InitStatInfo(VmaStatInfo& outInfo)
    11381 {
    11382  memset(&outInfo, 0, sizeof(outInfo));
    11383  outInfo.allocationSizeMin = UINT64_MAX;
    11384  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11385 }
    11386 
    11387 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11388 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11389 {
    11390  inoutInfo.blockCount += srcInfo.blockCount;
    11391  inoutInfo.allocationCount += srcInfo.allocationCount;
    11392  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11393  inoutInfo.usedBytes += srcInfo.usedBytes;
    11394  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11395  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11396  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11397  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11398  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11399 }
    11400 
    11401 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11402 {
    11403  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11404  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11405  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11406  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11407 }
    11408 
    11409 VmaPool_T::VmaPool_T(
    11410  VmaAllocator hAllocator,
    11411  const VmaPoolCreateInfo& createInfo,
    11412  VkDeviceSize preferredBlockSize) :
    11413  m_BlockVector(
    11414  hAllocator,
    11415  this, // hParentPool
    11416  createInfo.memoryTypeIndex,
    11417  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11418  createInfo.minBlockCount,
    11419  createInfo.maxBlockCount,
    11420  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11421  createInfo.frameInUseCount,
    11422  true, // isCustomPool
    11423  createInfo.blockSize != 0, // explicitBlockSize
    11424  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11425  m_Id(0)
    11426 {
    11427 }
    11428 
    11429 VmaPool_T::~VmaPool_T()
    11430 {
    11431 }
    11432 
    11433 #if VMA_STATS_STRING_ENABLED
    11434 
    11435 #endif // #if VMA_STATS_STRING_ENABLED
    11436 
    11437 VmaBlockVector::VmaBlockVector(
    11438  VmaAllocator hAllocator,
    11439  VmaPool hParentPool,
    11440  uint32_t memoryTypeIndex,
    11441  VkDeviceSize preferredBlockSize,
    11442  size_t minBlockCount,
    11443  size_t maxBlockCount,
    11444  VkDeviceSize bufferImageGranularity,
    11445  uint32_t frameInUseCount,
    11446  bool isCustomPool,
    11447  bool explicitBlockSize,
    11448  uint32_t algorithm) :
    11449  m_hAllocator(hAllocator),
    11450  m_hParentPool(hParentPool),
    11451  m_MemoryTypeIndex(memoryTypeIndex),
    11452  m_PreferredBlockSize(preferredBlockSize),
    11453  m_MinBlockCount(minBlockCount),
    11454  m_MaxBlockCount(maxBlockCount),
    11455  m_BufferImageGranularity(bufferImageGranularity),
    11456  m_FrameInUseCount(frameInUseCount),
    11457  m_IsCustomPool(isCustomPool),
    11458  m_ExplicitBlockSize(explicitBlockSize),
    11459  m_Algorithm(algorithm),
    11460  m_HasEmptyBlock(false),
    11461  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11462  m_NextBlockId(0)
    11463 {
    11464 }
    11465 
    11466 VmaBlockVector::~VmaBlockVector()
    11467 {
    11468  for(size_t i = m_Blocks.size(); i--; )
    11469  {
    11470  m_Blocks[i]->Destroy(m_hAllocator);
    11471  vma_delete(m_hAllocator, m_Blocks[i]);
    11472  }
    11473 }
    11474 
    11475 VkResult VmaBlockVector::CreateMinBlocks()
    11476 {
    11477  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11478  {
    11479  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11480  if(res != VK_SUCCESS)
    11481  {
    11482  return res;
    11483  }
    11484  }
    11485  return VK_SUCCESS;
    11486 }
    11487 
    11488 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11489 {
    11490  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11491 
    11492  const size_t blockCount = m_Blocks.size();
    11493 
    11494  pStats->size = 0;
    11495  pStats->unusedSize = 0;
    11496  pStats->allocationCount = 0;
    11497  pStats->unusedRangeCount = 0;
    11498  pStats->unusedRangeSizeMax = 0;
    11499  pStats->blockCount = blockCount;
    11500 
    11501  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11502  {
    11503  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11504  VMA_ASSERT(pBlock);
    11505  VMA_HEAVY_ASSERT(pBlock->Validate());
    11506  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11507  }
    11508 }
    11509 
    11510 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11511 {
    11512  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11513  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11514  (VMA_DEBUG_MARGIN > 0) &&
    11515  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11516  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11517 }
    11518 
    11519 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11520 
    11521 VkResult VmaBlockVector::Allocate(
    11522  uint32_t currentFrameIndex,
    11523  VkDeviceSize size,
    11524  VkDeviceSize alignment,
    11525  const VmaAllocationCreateInfo& createInfo,
    11526  VmaSuballocationType suballocType,
    11527  size_t allocationCount,
    11528  VmaAllocation* pAllocations)
    11529 {
    11530  size_t allocIndex;
    11531  VkResult res = VK_SUCCESS;
    11532 
    11533  if(IsCorruptionDetectionEnabled())
    11534  {
    11535  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11536  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11537  }
    11538 
    11539  {
    11540  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11541  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11542  {
    11543  res = AllocatePage(
    11544  currentFrameIndex,
    11545  size,
    11546  alignment,
    11547  createInfo,
    11548  suballocType,
    11549  pAllocations + allocIndex);
    11550  if(res != VK_SUCCESS)
    11551  {
    11552  break;
    11553  }
    11554  }
    11555  }
    11556 
    11557  if(res != VK_SUCCESS)
    11558  {
    11559  // Free all already created allocations.
    11560  while(allocIndex--)
    11561  {
    11562  Free(pAllocations[allocIndex]);
    11563  }
    11564  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11565  }
    11566 
    11567  return res;
    11568 }
    11569 
    11570 VkResult VmaBlockVector::AllocatePage(
    11571  uint32_t currentFrameIndex,
    11572  VkDeviceSize size,
    11573  VkDeviceSize alignment,
    11574  const VmaAllocationCreateInfo& createInfo,
    11575  VmaSuballocationType suballocType,
    11576  VmaAllocation* pAllocation)
    11577 {
    11578  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11579  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11580  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11581  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11582  const bool canCreateNewBlock =
    11583  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11584  (m_Blocks.size() < m_MaxBlockCount);
    11585  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11586 
    11587  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11588  // Which in turn is available only when maxBlockCount = 1.
    11589  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11590  {
    11591  canMakeOtherLost = false;
    11592  }
    11593 
    11594  // Upper address can only be used with linear allocator and within single memory block.
    11595  if(isUpperAddress &&
    11596  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11597  {
    11598  return VK_ERROR_FEATURE_NOT_PRESENT;
    11599  }
    11600 
    11601  // Validate strategy.
    11602  switch(strategy)
    11603  {
    11604  case 0:
    11606  break;
    11610  break;
    11611  default:
    11612  return VK_ERROR_FEATURE_NOT_PRESENT;
    11613  }
    11614 
    11615  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11616  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11617  {
    11618  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11619  }
    11620 
    11621  /*
    11622  Under certain condition, this whole section can be skipped for optimization, so
    11623  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11624  e.g. for custom pools with linear algorithm.
    11625  */
    11626  if(!canMakeOtherLost || canCreateNewBlock)
    11627  {
    11628  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11629  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11631 
    11632  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11633  {
    11634  // Use only last block.
    11635  if(!m_Blocks.empty())
    11636  {
    11637  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11638  VMA_ASSERT(pCurrBlock);
    11639  VkResult res = AllocateFromBlock(
    11640  pCurrBlock,
    11641  currentFrameIndex,
    11642  size,
    11643  alignment,
    11644  allocFlagsCopy,
    11645  createInfo.pUserData,
    11646  suballocType,
    11647  strategy,
    11648  pAllocation);
    11649  if(res == VK_SUCCESS)
    11650  {
    11651  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11652  return VK_SUCCESS;
    11653  }
    11654  }
    11655  }
    11656  else
    11657  {
    11659  {
    11660  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11661  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11662  {
    11663  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11664  VMA_ASSERT(pCurrBlock);
    11665  VkResult res = AllocateFromBlock(
    11666  pCurrBlock,
    11667  currentFrameIndex,
    11668  size,
    11669  alignment,
    11670  allocFlagsCopy,
    11671  createInfo.pUserData,
    11672  suballocType,
    11673  strategy,
    11674  pAllocation);
    11675  if(res == VK_SUCCESS)
    11676  {
    11677  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11678  return VK_SUCCESS;
    11679  }
    11680  }
    11681  }
    11682  else // WORST_FIT, FIRST_FIT
    11683  {
    11684  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11685  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11686  {
    11687  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11688  VMA_ASSERT(pCurrBlock);
    11689  VkResult res = AllocateFromBlock(
    11690  pCurrBlock,
    11691  currentFrameIndex,
    11692  size,
    11693  alignment,
    11694  allocFlagsCopy,
    11695  createInfo.pUserData,
    11696  suballocType,
    11697  strategy,
    11698  pAllocation);
    11699  if(res == VK_SUCCESS)
    11700  {
    11701  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11702  return VK_SUCCESS;
    11703  }
    11704  }
    11705  }
    11706  }
    11707 
    11708  // 2. Try to create new block.
    11709  if(canCreateNewBlock)
    11710  {
    11711  // Calculate optimal size for new block.
    11712  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11713  uint32_t newBlockSizeShift = 0;
    11714  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11715 
    11716  if(!m_ExplicitBlockSize)
    11717  {
    11718  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11719  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11720  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11721  {
    11722  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11723  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11724  {
    11725  newBlockSize = smallerNewBlockSize;
    11726  ++newBlockSizeShift;
    11727  }
    11728  else
    11729  {
    11730  break;
    11731  }
    11732  }
    11733  }
    11734 
    11735  size_t newBlockIndex = 0;
    11736  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11737  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11738  if(!m_ExplicitBlockSize)
    11739  {
    11740  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11741  {
    11742  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11743  if(smallerNewBlockSize >= size)
    11744  {
    11745  newBlockSize = smallerNewBlockSize;
    11746  ++newBlockSizeShift;
    11747  res = CreateBlock(newBlockSize, &newBlockIndex);
    11748  }
    11749  else
    11750  {
    11751  break;
    11752  }
    11753  }
    11754  }
    11755 
    11756  if(res == VK_SUCCESS)
    11757  {
    11758  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11759  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11760 
    11761  res = AllocateFromBlock(
    11762  pBlock,
    11763  currentFrameIndex,
    11764  size,
    11765  alignment,
    11766  allocFlagsCopy,
    11767  createInfo.pUserData,
    11768  suballocType,
    11769  strategy,
    11770  pAllocation);
    11771  if(res == VK_SUCCESS)
    11772  {
    11773  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11774  return VK_SUCCESS;
    11775  }
    11776  else
    11777  {
    11778  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11780  }
    11781  }
    11782  }
    11783  }
    11784 
    11785  // 3. Try to allocate from existing blocks with making other allocations lost.
    11786  if(canMakeOtherLost)
    11787  {
    11788  uint32_t tryIndex = 0;
    11789  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11790  {
    11791  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11792  VmaAllocationRequest bestRequest = {};
    11793  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11794 
    11795  // 1. Search existing allocations.
    11797  {
    11798  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11799  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11800  {
    11801  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11802  VMA_ASSERT(pCurrBlock);
    11803  VmaAllocationRequest currRequest = {};
    11804  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11805  currentFrameIndex,
    11806  m_FrameInUseCount,
    11807  m_BufferImageGranularity,
    11808  size,
    11809  alignment,
    11810  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11811  suballocType,
    11812  canMakeOtherLost,
    11813  strategy,
    11814  &currRequest))
    11815  {
    11816  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11817  if(pBestRequestBlock == VMA_NULL ||
    11818  currRequestCost < bestRequestCost)
    11819  {
    11820  pBestRequestBlock = pCurrBlock;
    11821  bestRequest = currRequest;
    11822  bestRequestCost = currRequestCost;
    11823 
    11824  if(bestRequestCost == 0)
    11825  {
    11826  break;
    11827  }
    11828  }
    11829  }
    11830  }
    11831  }
    11832  else // WORST_FIT, FIRST_FIT
    11833  {
    11834  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11835  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11836  {
    11837  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11838  VMA_ASSERT(pCurrBlock);
    11839  VmaAllocationRequest currRequest = {};
    11840  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11841  currentFrameIndex,
    11842  m_FrameInUseCount,
    11843  m_BufferImageGranularity,
    11844  size,
    11845  alignment,
    11846  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11847  suballocType,
    11848  canMakeOtherLost,
    11849  strategy,
    11850  &currRequest))
    11851  {
    11852  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11853  if(pBestRequestBlock == VMA_NULL ||
    11854  currRequestCost < bestRequestCost ||
    11856  {
    11857  pBestRequestBlock = pCurrBlock;
    11858  bestRequest = currRequest;
    11859  bestRequestCost = currRequestCost;
    11860 
    11861  if(bestRequestCost == 0 ||
    11863  {
    11864  break;
    11865  }
    11866  }
    11867  }
    11868  }
    11869  }
    11870 
    11871  if(pBestRequestBlock != VMA_NULL)
    11872  {
    11873  if(mapped)
    11874  {
    11875  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11876  if(res != VK_SUCCESS)
    11877  {
    11878  return res;
    11879  }
    11880  }
    11881 
    11882  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11883  currentFrameIndex,
    11884  m_FrameInUseCount,
    11885  &bestRequest))
    11886  {
    11887  // We no longer have an empty Allocation.
    11888  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11889  {
    11890  m_HasEmptyBlock = false;
    11891  }
    11892  // Allocate from this pBlock.
    11893  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11894  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11895  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11896  (*pAllocation)->InitBlockAllocation(
    11897  pBestRequestBlock,
    11898  bestRequest.offset,
    11899  alignment,
    11900  size,
    11901  suballocType,
    11902  mapped,
    11903  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11904  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11905  VMA_DEBUG_LOG(" Returned from existing block");
    11906  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11907  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11908  {
    11909  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11910  }
    11911  if(IsCorruptionDetectionEnabled())
    11912  {
    11913  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11914  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11915  }
    11916  return VK_SUCCESS;
    11917  }
    11918  // else: Some allocations must have been touched while we are here. Next try.
    11919  }
    11920  else
    11921  {
    11922  // Could not find place in any of the blocks - break outer loop.
    11923  break;
    11924  }
    11925  }
    11926  /* Maximum number of tries exceeded - a very unlike event when many other
    11927  threads are simultaneously touching allocations making it impossible to make
    11928  lost at the same time as we try to allocate. */
    11929  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11930  {
    11931  return VK_ERROR_TOO_MANY_OBJECTS;
    11932  }
    11933  }
    11934 
    11935  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11936 }
    11937 
    11938 void VmaBlockVector::Free(
    11939  VmaAllocation hAllocation)
    11940 {
    11941  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11942 
    11943  // Scope for lock.
    11944  {
    11945  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11946 
    11947  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11948 
    11949  if(IsCorruptionDetectionEnabled())
    11950  {
    11951  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11952  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11953  }
    11954 
    11955  if(hAllocation->IsPersistentMap())
    11956  {
    11957  pBlock->Unmap(m_hAllocator, 1);
    11958  }
    11959 
    11960  pBlock->m_pMetadata->Free(hAllocation);
    11961  VMA_HEAVY_ASSERT(pBlock->Validate());
    11962 
    11963  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11964 
    11965  // pBlock became empty after this deallocation.
    11966  if(pBlock->m_pMetadata->IsEmpty())
    11967  {
    11968  // Already has empty Allocation. We don't want to have two, so delete this one.
    11969  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11970  {
    11971  pBlockToDelete = pBlock;
    11972  Remove(pBlock);
    11973  }
    11974  // We now have first empty block.
    11975  else
    11976  {
    11977  m_HasEmptyBlock = true;
    11978  }
    11979  }
    11980  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11981  // (This is optional, heuristics.)
    11982  else if(m_HasEmptyBlock)
    11983  {
    11984  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11985  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11986  {
    11987  pBlockToDelete = pLastBlock;
    11988  m_Blocks.pop_back();
    11989  m_HasEmptyBlock = false;
    11990  }
    11991  }
    11992 
    11993  IncrementallySortBlocks();
    11994  }
    11995 
    11996  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11997  // lock, for performance reason.
    11998  if(pBlockToDelete != VMA_NULL)
    11999  {
    12000  VMA_DEBUG_LOG(" Deleted empty allocation");
    12001  pBlockToDelete->Destroy(m_hAllocator);
    12002  vma_delete(m_hAllocator, pBlockToDelete);
    12003  }
    12004 }
    12005 
    12006 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    12007 {
    12008  VkDeviceSize result = 0;
    12009  for(size_t i = m_Blocks.size(); i--; )
    12010  {
    12011  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    12012  if(result >= m_PreferredBlockSize)
    12013  {
    12014  break;
    12015  }
    12016  }
    12017  return result;
    12018 }
    12019 
    12020 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    12021 {
    12022  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12023  {
    12024  if(m_Blocks[blockIndex] == pBlock)
    12025  {
    12026  VmaVectorRemove(m_Blocks, blockIndex);
    12027  return;
    12028  }
    12029  }
    12030  VMA_ASSERT(0);
    12031 }
    12032 
    12033 void VmaBlockVector::IncrementallySortBlocks()
    12034 {
    12035  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    12036  {
    12037  // Bubble sort only until first swap.
    12038  for(size_t i = 1; i < m_Blocks.size(); ++i)
    12039  {
    12040  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    12041  {
    12042  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    12043  return;
    12044  }
    12045  }
    12046  }
    12047 }
    12048 
    12049 VkResult VmaBlockVector::AllocateFromBlock(
    12050  VmaDeviceMemoryBlock* pBlock,
    12051  uint32_t currentFrameIndex,
    12052  VkDeviceSize size,
    12053  VkDeviceSize alignment,
    12054  VmaAllocationCreateFlags allocFlags,
    12055  void* pUserData,
    12056  VmaSuballocationType suballocType,
    12057  uint32_t strategy,
    12058  VmaAllocation* pAllocation)
    12059 {
    12060  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12061  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12062  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12063  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12064 
    12065  VmaAllocationRequest currRequest = {};
    12066  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12067  currentFrameIndex,
    12068  m_FrameInUseCount,
    12069  m_BufferImageGranularity,
    12070  size,
    12071  alignment,
    12072  isUpperAddress,
    12073  suballocType,
    12074  false, // canMakeOtherLost
    12075  strategy,
    12076  &currRequest))
    12077  {
    12078  // Allocate from pCurrBlock.
    12079  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12080 
    12081  if(mapped)
    12082  {
    12083  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12084  if(res != VK_SUCCESS)
    12085  {
    12086  return res;
    12087  }
    12088  }
    12089 
    12090  // We no longer have an empty Allocation.
    12091  if(pBlock->m_pMetadata->IsEmpty())
    12092  {
    12093  m_HasEmptyBlock = false;
    12094  }
    12095 
    12096  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12097  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12098  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12099  (*pAllocation)->InitBlockAllocation(
    12100  pBlock,
    12101  currRequest.offset,
    12102  alignment,
    12103  size,
    12104  suballocType,
    12105  mapped,
    12106  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12107  VMA_HEAVY_ASSERT(pBlock->Validate());
    12108  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12109  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12110  {
    12111  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12112  }
    12113  if(IsCorruptionDetectionEnabled())
    12114  {
    12115  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12116  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12117  }
    12118  return VK_SUCCESS;
    12119  }
    12120  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12121 }
    12122 
    12123 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12124 {
    12125  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12126  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12127  allocInfo.allocationSize = blockSize;
    12128  VkDeviceMemory mem = VK_NULL_HANDLE;
    12129  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12130  if(res < 0)
    12131  {
    12132  return res;
    12133  }
    12134 
    12135  // New VkDeviceMemory successfully created.
    12136 
    12137  // Create new Allocation for it.
    12138  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12139  pBlock->Init(
    12140  m_hAllocator,
    12141  m_hParentPool,
    12142  m_MemoryTypeIndex,
    12143  mem,
    12144  allocInfo.allocationSize,
    12145  m_NextBlockId++,
    12146  m_Algorithm);
    12147 
    12148  m_Blocks.push_back(pBlock);
    12149  if(pNewBlockIndex != VMA_NULL)
    12150  {
    12151  *pNewBlockIndex = m_Blocks.size() - 1;
    12152  }
    12153 
    12154  return VK_SUCCESS;
    12155 }
    12156 
    12157 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12158  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12159  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12160 {
    12161  const size_t blockCount = m_Blocks.size();
    12162  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12163 
    12164  enum BLOCK_FLAG
    12165  {
    12166  BLOCK_FLAG_USED = 0x00000001,
    12167  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12168  };
    12169 
    12170  struct BlockInfo
    12171  {
    12172  uint32_t flags;
    12173  void* pMappedData;
    12174  };
    12175  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12176  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12177  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12178 
    12179  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12180  const size_t moveCount = moves.size();
    12181  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12182  {
    12183  const VmaDefragmentationMove& move = moves[moveIndex];
    12184  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12185  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12186  }
    12187 
    12188  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12189 
    12190  // Go over all blocks. Get mapped pointer or map if necessary.
    12191  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12192  {
    12193  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12194  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12195  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12196  {
    12197  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12198  // It is not originally mapped - map it.
    12199  if(currBlockInfo.pMappedData == VMA_NULL)
    12200  {
    12201  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12202  if(pDefragCtx->res == VK_SUCCESS)
    12203  {
    12204  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12205  }
    12206  }
    12207  }
    12208  }
    12209 
    12210  // Go over all moves. Do actual data transfer.
    12211  if(pDefragCtx->res == VK_SUCCESS)
    12212  {
    12213  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12214  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12215 
    12216  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12217  {
    12218  const VmaDefragmentationMove& move = moves[moveIndex];
    12219 
    12220  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12221  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12222 
    12223  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12224 
    12225  // Invalidate source.
    12226  if(isNonCoherent)
    12227  {
    12228  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12229  memRange.memory = pSrcBlock->GetDeviceMemory();
    12230  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12231  memRange.size = VMA_MIN(
    12232  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12233  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12234  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12235  }
    12236 
    12237  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12238  memmove(
    12239  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12240  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12241  static_cast<size_t>(move.size));
    12242 
    12243  if(IsCorruptionDetectionEnabled())
    12244  {
    12245  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12246  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12247  }
    12248 
    12249  // Flush destination.
    12250  if(isNonCoherent)
    12251  {
    12252  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12253  memRange.memory = pDstBlock->GetDeviceMemory();
    12254  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12255  memRange.size = VMA_MIN(
    12256  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12257  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12258  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12259  }
    12260  }
    12261  }
    12262 
    12263  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12264  // Regardless of pCtx->res == VK_SUCCESS.
    12265  for(size_t blockIndex = blockCount; blockIndex--; )
    12266  {
    12267  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12268  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12269  {
    12270  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12271  pBlock->Unmap(m_hAllocator, 1);
    12272  }
    12273  }
    12274 }
    12275 
    12276 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12277  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12278  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12279  VkCommandBuffer commandBuffer)
    12280 {
    12281  const size_t blockCount = m_Blocks.size();
    12282 
    12283  pDefragCtx->blockContexts.resize(blockCount);
    12284  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12285 
    12286  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12287  const size_t moveCount = moves.size();
    12288  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12289  {
    12290  const VmaDefragmentationMove& move = moves[moveIndex];
    12291  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12292  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12293  }
    12294 
    12295  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12296 
    12297  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12298  {
    12299  VkBufferCreateInfo bufCreateInfo;
    12300  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
    12301 
    12302  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12303  {
    12304  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12305  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12306  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12307  {
    12308  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12309  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12310  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12311  if(pDefragCtx->res == VK_SUCCESS)
    12312  {
    12313  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12314  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12315  }
    12316  }
    12317  }
    12318  }
    12319 
    12320  // Go over all moves. Post data transfer commands to command buffer.
    12321  if(pDefragCtx->res == VK_SUCCESS)
    12322  {
    12323  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12324  {
    12325  const VmaDefragmentationMove& move = moves[moveIndex];
    12326 
    12327  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12328  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12329 
    12330  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12331 
    12332  VkBufferCopy region = {
    12333  move.srcOffset,
    12334  move.dstOffset,
    12335  move.size };
    12336  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12337  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12338  }
    12339  }
    12340 
    12341  // Save buffers to defrag context for later destruction.
    12342  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12343  {
    12344  pDefragCtx->res = VK_NOT_READY;
    12345  }
    12346 }
    12347 
    12348 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12349 {
    12350  m_HasEmptyBlock = false;
    12351  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12352  {
    12353  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12354  if(pBlock->m_pMetadata->IsEmpty())
    12355  {
    12356  if(m_Blocks.size() > m_MinBlockCount)
    12357  {
    12358  if(pDefragmentationStats != VMA_NULL)
    12359  {
    12360  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12361  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12362  }
    12363 
    12364  VmaVectorRemove(m_Blocks, blockIndex);
    12365  pBlock->Destroy(m_hAllocator);
    12366  vma_delete(m_hAllocator, pBlock);
    12367  }
    12368  else
    12369  {
    12370  m_HasEmptyBlock = true;
    12371  }
    12372  }
    12373  }
    12374 }
    12375 
    12376 #if VMA_STATS_STRING_ENABLED
    12377 
    12378 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12379 {
    12380  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12381 
    12382  json.BeginObject();
    12383 
    12384  if(m_IsCustomPool)
    12385  {
    12386  json.WriteString("MemoryTypeIndex");
    12387  json.WriteNumber(m_MemoryTypeIndex);
    12388 
    12389  json.WriteString("BlockSize");
    12390  json.WriteNumber(m_PreferredBlockSize);
    12391 
    12392  json.WriteString("BlockCount");
    12393  json.BeginObject(true);
    12394  if(m_MinBlockCount > 0)
    12395  {
    12396  json.WriteString("Min");
    12397  json.WriteNumber((uint64_t)m_MinBlockCount);
    12398  }
    12399  if(m_MaxBlockCount < SIZE_MAX)
    12400  {
    12401  json.WriteString("Max");
    12402  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12403  }
    12404  json.WriteString("Cur");
    12405  json.WriteNumber((uint64_t)m_Blocks.size());
    12406  json.EndObject();
    12407 
    12408  if(m_FrameInUseCount > 0)
    12409  {
    12410  json.WriteString("FrameInUseCount");
    12411  json.WriteNumber(m_FrameInUseCount);
    12412  }
    12413 
    12414  if(m_Algorithm != 0)
    12415  {
    12416  json.WriteString("Algorithm");
    12417  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12418  }
    12419  }
    12420  else
    12421  {
    12422  json.WriteString("PreferredBlockSize");
    12423  json.WriteNumber(m_PreferredBlockSize);
    12424  }
    12425 
    12426  json.WriteString("Blocks");
    12427  json.BeginObject();
    12428  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12429  {
    12430  json.BeginString();
    12431  json.ContinueString(m_Blocks[i]->GetId());
    12432  json.EndString();
    12433 
    12434  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12435  }
    12436  json.EndObject();
    12437 
    12438  json.EndObject();
    12439 }
    12440 
    12441 #endif // #if VMA_STATS_STRING_ENABLED
    12442 
    12443 void VmaBlockVector::Defragment(
    12444  class VmaBlockVectorDefragmentationContext* pCtx,
    12445  VmaDefragmentationStats* pStats,
    12446  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12447  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12448  VkCommandBuffer commandBuffer)
    12449 {
    12450  pCtx->res = VK_SUCCESS;
    12451 
    12452  const VkMemoryPropertyFlags memPropFlags =
    12453  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12454  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12455 
    12456  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12457  isHostVisible;
    12458  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12459  !IsCorruptionDetectionEnabled() &&
    12460  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
    12461 
    12462  // There are options to defragment this memory type.
    12463  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12464  {
    12465  bool defragmentOnGpu;
    12466  // There is only one option to defragment this memory type.
    12467  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12468  {
    12469  defragmentOnGpu = canDefragmentOnGpu;
    12470  }
    12471  // Both options are available: Heuristics to choose the best one.
    12472  else
    12473  {
    12474  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12475  m_hAllocator->IsIntegratedGpu();
    12476  }
    12477 
    12478  bool overlappingMoveSupported = !defragmentOnGpu;
    12479 
    12480  if(m_hAllocator->m_UseMutex)
    12481  {
    12482  m_Mutex.LockWrite();
    12483  pCtx->mutexLocked = true;
    12484  }
    12485 
    12486  pCtx->Begin(overlappingMoveSupported);
    12487 
    12488  // Defragment.
    12489 
    12490  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12491  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12492  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12493  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12494  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12495 
    12496  // Accumulate statistics.
    12497  if(pStats != VMA_NULL)
    12498  {
    12499  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12500  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12501  pStats->bytesMoved += bytesMoved;
    12502  pStats->allocationsMoved += allocationsMoved;
    12503  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12504  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12505  if(defragmentOnGpu)
    12506  {
    12507  maxGpuBytesToMove -= bytesMoved;
    12508  maxGpuAllocationsToMove -= allocationsMoved;
    12509  }
    12510  else
    12511  {
    12512  maxCpuBytesToMove -= bytesMoved;
    12513  maxCpuAllocationsToMove -= allocationsMoved;
    12514  }
    12515  }
    12516 
    12517  if(pCtx->res >= VK_SUCCESS)
    12518  {
    12519  if(defragmentOnGpu)
    12520  {
    12521  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12522  }
    12523  else
    12524  {
    12525  ApplyDefragmentationMovesCpu(pCtx, moves);
    12526  }
    12527  }
    12528  }
    12529 }
    12530 
    12531 void VmaBlockVector::DefragmentationEnd(
    12532  class VmaBlockVectorDefragmentationContext* pCtx,
    12533  VmaDefragmentationStats* pStats)
    12534 {
    12535  // Destroy buffers.
    12536  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12537  {
    12538  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12539  if(blockCtx.hBuffer)
    12540  {
    12541  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12542  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12543  }
    12544  }
    12545 
    12546  if(pCtx->res >= VK_SUCCESS)
    12547  {
    12548  FreeEmptyBlocks(pStats);
    12549  }
    12550 
    12551  if(pCtx->mutexLocked)
    12552  {
    12553  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12554  m_Mutex.UnlockWrite();
    12555  }
    12556 }
    12557 
    12558 size_t VmaBlockVector::CalcAllocationCount() const
    12559 {
    12560  size_t result = 0;
    12561  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12562  {
    12563  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12564  }
    12565  return result;
    12566 }
    12567 
    12568 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12569 {
    12570  if(m_BufferImageGranularity == 1)
    12571  {
    12572  return false;
    12573  }
    12574  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12575  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12576  {
    12577  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12578  VMA_ASSERT(m_Algorithm == 0);
    12579  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12580  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12581  {
    12582  return true;
    12583  }
    12584  }
    12585  return false;
    12586 }
    12587 
    12588 void VmaBlockVector::MakePoolAllocationsLost(
    12589  uint32_t currentFrameIndex,
    12590  size_t* pLostAllocationCount)
    12591 {
    12592  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12593  size_t lostAllocationCount = 0;
    12594  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12595  {
    12596  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12597  VMA_ASSERT(pBlock);
    12598  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12599  }
    12600  if(pLostAllocationCount != VMA_NULL)
    12601  {
    12602  *pLostAllocationCount = lostAllocationCount;
    12603  }
    12604 }
    12605 
    12606 VkResult VmaBlockVector::CheckCorruption()
    12607 {
    12608  if(!IsCorruptionDetectionEnabled())
    12609  {
    12610  return VK_ERROR_FEATURE_NOT_PRESENT;
    12611  }
    12612 
    12613  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12614  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12615  {
    12616  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12617  VMA_ASSERT(pBlock);
    12618  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12619  if(res != VK_SUCCESS)
    12620  {
    12621  return res;
    12622  }
    12623  }
    12624  return VK_SUCCESS;
    12625 }
    12626 
    12627 void VmaBlockVector::AddStats(VmaStats* pStats)
    12628 {
    12629  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12630  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12631 
    12632  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12633 
    12634  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12635  {
    12636  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12637  VMA_ASSERT(pBlock);
    12638  VMA_HEAVY_ASSERT(pBlock->Validate());
    12639  VmaStatInfo allocationStatInfo;
    12640  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12641  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12642  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12643  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12644  }
    12645 }
    12646 
    12648 // VmaDefragmentationAlgorithm_Generic members definition
    12649 
    12650 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12651  VmaAllocator hAllocator,
    12652  VmaBlockVector* pBlockVector,
    12653  uint32_t currentFrameIndex,
    12654  bool overlappingMoveSupported) :
    12655  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12656  m_AllocationCount(0),
    12657  m_AllAllocations(false),
    12658  m_BytesMoved(0),
    12659  m_AllocationsMoved(0),
    12660  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12661 {
    12662  // Create block info for each block.
    12663  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12664  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12665  {
    12666  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12667  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12668  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12669  m_Blocks.push_back(pBlockInfo);
    12670  }
    12671 
    12672  // Sort them by m_pBlock pointer value.
    12673  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12674 }
    12675 
    12676 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12677 {
    12678  for(size_t i = m_Blocks.size(); i--; )
    12679  {
    12680  vma_delete(m_hAllocator, m_Blocks[i]);
    12681  }
    12682 }
    12683 
    12684 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12685 {
    12686  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12687  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12688  {
    12689  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12690  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12691  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12692  {
    12693  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12694  (*it)->m_Allocations.push_back(allocInfo);
    12695  }
    12696  else
    12697  {
    12698  VMA_ASSERT(0);
    12699  }
    12700 
    12701  ++m_AllocationCount;
    12702  }
    12703 }
    12704 
    12705 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12706  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12707  VkDeviceSize maxBytesToMove,
    12708  uint32_t maxAllocationsToMove)
    12709 {
    12710  if(m_Blocks.empty())
    12711  {
    12712  return VK_SUCCESS;
    12713  }
    12714 
    12715  // This is a choice based on research.
    12716  // Option 1:
    12717  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12718  // Option 2:
    12719  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12720  // Option 3:
    12721  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12722 
    12723  size_t srcBlockMinIndex = 0;
    12724  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12725  /*
    12726  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12727  {
    12728  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12729  if(blocksWithNonMovableCount > 0)
    12730  {
    12731  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12732  }
    12733  }
    12734  */
    12735 
    12736  size_t srcBlockIndex = m_Blocks.size() - 1;
    12737  size_t srcAllocIndex = SIZE_MAX;
    12738  for(;;)
    12739  {
    12740  // 1. Find next allocation to move.
    12741  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12742  // 1.2. Then start from last to first m_Allocations.
    12743  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12744  {
    12745  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12746  {
    12747  // Finished: no more allocations to process.
    12748  if(srcBlockIndex == srcBlockMinIndex)
    12749  {
    12750  return VK_SUCCESS;
    12751  }
    12752  else
    12753  {
    12754  --srcBlockIndex;
    12755  srcAllocIndex = SIZE_MAX;
    12756  }
    12757  }
    12758  else
    12759  {
    12760  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12761  }
    12762  }
    12763 
    12764  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12765  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12766 
    12767  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12768  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12769  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12770  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12771 
    12772  // 2. Try to find new place for this allocation in preceding or current block.
    12773  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12774  {
    12775  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12776  VmaAllocationRequest dstAllocRequest;
    12777  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12778  m_CurrentFrameIndex,
    12779  m_pBlockVector->GetFrameInUseCount(),
    12780  m_pBlockVector->GetBufferImageGranularity(),
    12781  size,
    12782  alignment,
    12783  false, // upperAddress
    12784  suballocType,
    12785  false, // canMakeOtherLost
    12786  strategy,
    12787  &dstAllocRequest) &&
    12788  MoveMakesSense(
    12789  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12790  {
    12791  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12792 
    12793  // Reached limit on number of allocations or bytes to move.
    12794  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12795  (m_BytesMoved + size > maxBytesToMove))
    12796  {
    12797  return VK_SUCCESS;
    12798  }
    12799 
    12800  VmaDefragmentationMove move;
    12801  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12802  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12803  move.srcOffset = srcOffset;
    12804  move.dstOffset = dstAllocRequest.offset;
    12805  move.size = size;
    12806  moves.push_back(move);
    12807 
    12808  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12809  dstAllocRequest,
    12810  suballocType,
    12811  size,
    12812  allocInfo.m_hAllocation);
    12813  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12814 
    12815  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12816 
    12817  if(allocInfo.m_pChanged != VMA_NULL)
    12818  {
    12819  *allocInfo.m_pChanged = VK_TRUE;
    12820  }
    12821 
    12822  ++m_AllocationsMoved;
    12823  m_BytesMoved += size;
    12824 
    12825  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12826 
    12827  break;
    12828  }
    12829  }
    12830 
    12831  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12832 
    12833  if(srcAllocIndex > 0)
    12834  {
    12835  --srcAllocIndex;
    12836  }
    12837  else
    12838  {
    12839  if(srcBlockIndex > 0)
    12840  {
    12841  --srcBlockIndex;
    12842  srcAllocIndex = SIZE_MAX;
    12843  }
    12844  else
    12845  {
    12846  return VK_SUCCESS;
    12847  }
    12848  }
    12849  }
    12850 }
    12851 
    12852 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12853 {
    12854  size_t result = 0;
    12855  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12856  {
    12857  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12858  {
    12859  ++result;
    12860  }
    12861  }
    12862  return result;
    12863 }
    12864 
    12865 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12866  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12867  VkDeviceSize maxBytesToMove,
    12868  uint32_t maxAllocationsToMove)
    12869 {
    12870  if(!m_AllAllocations && m_AllocationCount == 0)
    12871  {
    12872  return VK_SUCCESS;
    12873  }
    12874 
    12875  const size_t blockCount = m_Blocks.size();
    12876  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12877  {
    12878  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12879 
    12880  if(m_AllAllocations)
    12881  {
    12882  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12883  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12884  it != pMetadata->m_Suballocations.end();
    12885  ++it)
    12886  {
    12887  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12888  {
    12889  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12890  pBlockInfo->m_Allocations.push_back(allocInfo);
    12891  }
    12892  }
    12893  }
    12894 
    12895  pBlockInfo->CalcHasNonMovableAllocations();
    12896 
    12897  // This is a choice based on research.
    12898  // Option 1:
    12899  pBlockInfo->SortAllocationsByOffsetDescending();
    12900  // Option 2:
    12901  //pBlockInfo->SortAllocationsBySizeDescending();
    12902  }
    12903 
    12904  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12905  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12906 
    12907  // This is a choice based on research.
    12908  const uint32_t roundCount = 2;
    12909 
    12910  // Execute defragmentation rounds (the main part).
    12911  VkResult result = VK_SUCCESS;
    12912  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12913  {
    12914  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12915  }
    12916 
    12917  return result;
    12918 }
    12919 
    12920 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12921  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12922  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12923 {
    12924  if(dstBlockIndex < srcBlockIndex)
    12925  {
    12926  return true;
    12927  }
    12928  if(dstBlockIndex > srcBlockIndex)
    12929  {
    12930  return false;
    12931  }
    12932  if(dstOffset < srcOffset)
    12933  {
    12934  return true;
    12935  }
    12936  return false;
    12937 }
    12938 
    12940 // VmaDefragmentationAlgorithm_Fast
    12941 
    12942 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12943  VmaAllocator hAllocator,
    12944  VmaBlockVector* pBlockVector,
    12945  uint32_t currentFrameIndex,
    12946  bool overlappingMoveSupported) :
    12947  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12948  m_OverlappingMoveSupported(overlappingMoveSupported),
    12949  m_AllocationCount(0),
    12950  m_AllAllocations(false),
    12951  m_BytesMoved(0),
    12952  m_AllocationsMoved(0),
    12953  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12954 {
    12955  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12956 
    12957 }
    12958 
    12959 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12960 {
    12961 }
    12962 
    12963 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12964  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12965  VkDeviceSize maxBytesToMove,
    12966  uint32_t maxAllocationsToMove)
    12967 {
    12968  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12969 
    12970  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12971  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12972  {
    12973  return VK_SUCCESS;
    12974  }
    12975 
    12976  PreprocessMetadata();
    12977 
    12978  // Sort blocks in order from most destination.
    12979 
    12980  m_BlockInfos.resize(blockCount);
    12981  for(size_t i = 0; i < blockCount; ++i)
    12982  {
    12983  m_BlockInfos[i].origBlockIndex = i;
    12984  }
    12985 
    12986  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12987  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12988  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12989  });
    12990 
    12991  // THE MAIN ALGORITHM
    12992 
    12993  FreeSpaceDatabase freeSpaceDb;
    12994 
    12995  size_t dstBlockInfoIndex = 0;
    12996  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12997  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12998  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12999  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    13000  VkDeviceSize dstOffset = 0;
    13001 
    13002  bool end = false;
    13003  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    13004  {
    13005  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    13006  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    13007  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    13008  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    13009  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    13010  {
    13011  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    13012  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    13013  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    13014  if(m_AllocationsMoved == maxAllocationsToMove ||
    13015  m_BytesMoved + srcAllocSize > maxBytesToMove)
    13016  {
    13017  end = true;
    13018  break;
    13019  }
    13020  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    13021 
    13022  // Try to place it in one of free spaces from the database.
    13023  size_t freeSpaceInfoIndex;
    13024  VkDeviceSize dstAllocOffset;
    13025  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    13026  freeSpaceInfoIndex, dstAllocOffset))
    13027  {
    13028  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    13029  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    13030  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    13031 
    13032  // Same block
    13033  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    13034  {
    13035  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13036 
    13037  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13038 
    13039  VmaSuballocation suballoc = *srcSuballocIt;
    13040  suballoc.offset = dstAllocOffset;
    13041  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    13042  m_BytesMoved += srcAllocSize;
    13043  ++m_AllocationsMoved;
    13044 
    13045  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13046  ++nextSuballocIt;
    13047  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13048  srcSuballocIt = nextSuballocIt;
    13049 
    13050  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13051 
    13052  VmaDefragmentationMove move = {
    13053  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13054  srcAllocOffset, dstAllocOffset,
    13055  srcAllocSize };
    13056  moves.push_back(move);
    13057  }
    13058  // Different block
    13059  else
    13060  {
    13061  // MOVE OPTION 2: Move the allocation to a different block.
    13062 
    13063  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13064 
    13065  VmaSuballocation suballoc = *srcSuballocIt;
    13066  suballoc.offset = dstAllocOffset;
    13067  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13068  m_BytesMoved += srcAllocSize;
    13069  ++m_AllocationsMoved;
    13070 
    13071  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13072  ++nextSuballocIt;
    13073  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13074  srcSuballocIt = nextSuballocIt;
    13075 
    13076  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13077 
    13078  VmaDefragmentationMove move = {
    13079  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13080  srcAllocOffset, dstAllocOffset,
    13081  srcAllocSize };
    13082  moves.push_back(move);
    13083  }
    13084  }
    13085  else
    13086  {
    13087  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13088 
    13089  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13090  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13091  dstAllocOffset + srcAllocSize > dstBlockSize)
    13092  {
    13093  // But before that, register remaining free space at the end of dst block.
    13094  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13095 
    13096  ++dstBlockInfoIndex;
    13097  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13098  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13099  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13100  dstBlockSize = pDstMetadata->GetSize();
    13101  dstOffset = 0;
    13102  dstAllocOffset = 0;
    13103  }
    13104 
    13105  // Same block
    13106  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13107  {
    13108  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13109 
    13110  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13111 
    13112  bool skipOver = overlap;
    13113  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13114  {
    13115  // If destination and source place overlap, skip if it would move it
    13116  // by only < 1/64 of its size.
    13117  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13118  }
    13119 
    13120  if(skipOver)
    13121  {
    13122  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13123 
    13124  dstOffset = srcAllocOffset + srcAllocSize;
    13125  ++srcSuballocIt;
    13126  }
    13127  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13128  else
    13129  {
    13130  srcSuballocIt->offset = dstAllocOffset;
    13131  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13132  dstOffset = dstAllocOffset + srcAllocSize;
    13133  m_BytesMoved += srcAllocSize;
    13134  ++m_AllocationsMoved;
    13135  ++srcSuballocIt;
    13136  VmaDefragmentationMove move = {
    13137  srcOrigBlockIndex, dstOrigBlockIndex,
    13138  srcAllocOffset, dstAllocOffset,
    13139  srcAllocSize };
    13140  moves.push_back(move);
    13141  }
    13142  }
    13143  // Different block
    13144  else
    13145  {
    13146  // MOVE OPTION 2: Move the allocation to a different block.
    13147 
    13148  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13149  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13150 
    13151  VmaSuballocation suballoc = *srcSuballocIt;
    13152  suballoc.offset = dstAllocOffset;
    13153  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13154  dstOffset = dstAllocOffset + srcAllocSize;
    13155  m_BytesMoved += srcAllocSize;
    13156  ++m_AllocationsMoved;
    13157 
    13158  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13159  ++nextSuballocIt;
    13160  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13161  srcSuballocIt = nextSuballocIt;
    13162 
    13163  pDstMetadata->m_Suballocations.push_back(suballoc);
    13164 
    13165  VmaDefragmentationMove move = {
    13166  srcOrigBlockIndex, dstOrigBlockIndex,
    13167  srcAllocOffset, dstAllocOffset,
    13168  srcAllocSize };
    13169  moves.push_back(move);
    13170  }
    13171  }
    13172  }
    13173  }
    13174 
    13175  m_BlockInfos.clear();
    13176 
    13177  PostprocessMetadata();
    13178 
    13179  return VK_SUCCESS;
    13180 }
    13181 
    13182 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13183 {
    13184  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13185  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13186  {
    13187  VmaBlockMetadata_Generic* const pMetadata =
    13188  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13189  pMetadata->m_FreeCount = 0;
    13190  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13191  pMetadata->m_FreeSuballocationsBySize.clear();
    13192  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13193  it != pMetadata->m_Suballocations.end(); )
    13194  {
    13195  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13196  {
    13197  VmaSuballocationList::iterator nextIt = it;
    13198  ++nextIt;
    13199  pMetadata->m_Suballocations.erase(it);
    13200  it = nextIt;
    13201  }
    13202  else
    13203  {
    13204  ++it;
    13205  }
    13206  }
    13207  }
    13208 }
    13209 
    13210 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13211 {
    13212  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13213  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13214  {
    13215  VmaBlockMetadata_Generic* const pMetadata =
    13216  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13217  const VkDeviceSize blockSize = pMetadata->GetSize();
    13218 
    13219  // No allocations in this block - entire area is free.
    13220  if(pMetadata->m_Suballocations.empty())
    13221  {
    13222  pMetadata->m_FreeCount = 1;
    13223  //pMetadata->m_SumFreeSize is already set to blockSize.
    13224  VmaSuballocation suballoc = {
    13225  0, // offset
    13226  blockSize, // size
    13227  VMA_NULL, // hAllocation
    13228  VMA_SUBALLOCATION_TYPE_FREE };
    13229  pMetadata->m_Suballocations.push_back(suballoc);
    13230  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13231  }
    13232  // There are some allocations in this block.
    13233  else
    13234  {
    13235  VkDeviceSize offset = 0;
    13236  VmaSuballocationList::iterator it;
    13237  for(it = pMetadata->m_Suballocations.begin();
    13238  it != pMetadata->m_Suballocations.end();
    13239  ++it)
    13240  {
    13241  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13242  VMA_ASSERT(it->offset >= offset);
    13243 
    13244  // Need to insert preceding free space.
    13245  if(it->offset > offset)
    13246  {
    13247  ++pMetadata->m_FreeCount;
    13248  const VkDeviceSize freeSize = it->offset - offset;
    13249  VmaSuballocation suballoc = {
    13250  offset, // offset
    13251  freeSize, // size
    13252  VMA_NULL, // hAllocation
    13253  VMA_SUBALLOCATION_TYPE_FREE };
    13254  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13255  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13256  {
    13257  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13258  }
    13259  }
    13260 
    13261  pMetadata->m_SumFreeSize -= it->size;
    13262  offset = it->offset + it->size;
    13263  }
    13264 
    13265  // Need to insert trailing free space.
    13266  if(offset < blockSize)
    13267  {
    13268  ++pMetadata->m_FreeCount;
    13269  const VkDeviceSize freeSize = blockSize - offset;
    13270  VmaSuballocation suballoc = {
    13271  offset, // offset
    13272  freeSize, // size
    13273  VMA_NULL, // hAllocation
    13274  VMA_SUBALLOCATION_TYPE_FREE };
    13275  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13276  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13277  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13278  {
    13279  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13280  }
    13281  }
    13282 
    13283  VMA_SORT(
    13284  pMetadata->m_FreeSuballocationsBySize.begin(),
    13285  pMetadata->m_FreeSuballocationsBySize.end(),
    13286  VmaSuballocationItemSizeLess());
    13287  }
    13288 
    13289  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13290  }
    13291 }
    13292 
    13293 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13294 {
    13295  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13296  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13297  while(it != pMetadata->m_Suballocations.end())
    13298  {
    13299  if(it->offset < suballoc.offset)
    13300  {
    13301  ++it;
    13302  }
    13303  }
    13304  pMetadata->m_Suballocations.insert(it, suballoc);
    13305 }
    13306 
    13308 // VmaBlockVectorDefragmentationContext
    13309 
    13310 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13311  VmaAllocator hAllocator,
    13312  VmaPool hCustomPool,
    13313  VmaBlockVector* pBlockVector,
    13314  uint32_t currFrameIndex) :
    13315  res(VK_SUCCESS),
    13316  mutexLocked(false),
    13317  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13318  m_hAllocator(hAllocator),
    13319  m_hCustomPool(hCustomPool),
    13320  m_pBlockVector(pBlockVector),
    13321  m_CurrFrameIndex(currFrameIndex),
    13322  m_pAlgorithm(VMA_NULL),
    13323  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13324  m_AllAllocations(false)
    13325 {
    13326 }
    13327 
    13328 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13329 {
    13330  vma_delete(m_hAllocator, m_pAlgorithm);
    13331 }
    13332 
    13333 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13334 {
    13335  AllocInfo info = { hAlloc, pChanged };
    13336  m_Allocations.push_back(info);
    13337 }
    13338 
    13339 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13340 {
    13341  const bool allAllocations = m_AllAllocations ||
    13342  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13343 
    13344  /********************************
    13345  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13346  ********************************/
    13347 
    13348  /*
    13349  Fast algorithm is supported only when certain criteria are met:
    13350  - VMA_DEBUG_MARGIN is 0.
    13351  - All allocations in this block vector are moveable.
    13352  - There is no possibility of image/buffer granularity conflict.
    13353  */
    13354  if(VMA_DEBUG_MARGIN == 0 &&
    13355  allAllocations &&
    13356  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13357  {
    13358  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13359  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13360  }
    13361  else
    13362  {
    13363  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13364  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13365  }
    13366 
    13367  if(allAllocations)
    13368  {
    13369  m_pAlgorithm->AddAll();
    13370  }
    13371  else
    13372  {
    13373  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13374  {
    13375  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13376  }
    13377  }
    13378 }
    13379 
    13381 // VmaDefragmentationContext
    13382 
    13383 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13384  VmaAllocator hAllocator,
    13385  uint32_t currFrameIndex,
    13386  uint32_t flags,
    13387  VmaDefragmentationStats* pStats) :
    13388  m_hAllocator(hAllocator),
    13389  m_CurrFrameIndex(currFrameIndex),
    13390  m_Flags(flags),
    13391  m_pStats(pStats),
    13392  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13393 {
    13394  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13395 }
    13396 
    13397 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13398 {
    13399  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13400  {
    13401  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13402  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13403  vma_delete(m_hAllocator, pBlockVectorCtx);
    13404  }
    13405  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13406  {
    13407  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13408  if(pBlockVectorCtx)
    13409  {
    13410  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13411  vma_delete(m_hAllocator, pBlockVectorCtx);
    13412  }
    13413  }
    13414 }
    13415 
    13416 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13417 {
    13418  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13419  {
    13420  VmaPool pool = pPools[poolIndex];
    13421  VMA_ASSERT(pool);
    13422  // Pools with algorithm other than default are not defragmented.
    13423  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13424  {
    13425  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13426 
    13427  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13428  {
    13429  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13430  {
    13431  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13432  break;
    13433  }
    13434  }
    13435 
    13436  if(!pBlockVectorDefragCtx)
    13437  {
    13438  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13439  m_hAllocator,
    13440  pool,
    13441  &pool->m_BlockVector,
    13442  m_CurrFrameIndex);
    13443  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13444  }
    13445 
    13446  pBlockVectorDefragCtx->AddAll();
    13447  }
    13448  }
    13449 }
    13450 
    13451 void VmaDefragmentationContext_T::AddAllocations(
    13452  uint32_t allocationCount,
    13453  VmaAllocation* pAllocations,
    13454  VkBool32* pAllocationsChanged)
    13455 {
    13456  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13457  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13458  {
    13459  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13460  VMA_ASSERT(hAlloc);
    13461  // DedicatedAlloc cannot be defragmented.
    13462  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13463  // Lost allocation cannot be defragmented.
    13464  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13465  {
    13466  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13467 
    13468  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13469  // This allocation belongs to custom pool.
    13470  if(hAllocPool != VK_NULL_HANDLE)
    13471  {
    13472  // Pools with algorithm other than default are not defragmented.
    13473  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13474  {
    13475  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13476  {
    13477  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13478  {
    13479  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13480  break;
    13481  }
    13482  }
    13483  if(!pBlockVectorDefragCtx)
    13484  {
    13485  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13486  m_hAllocator,
    13487  hAllocPool,
    13488  &hAllocPool->m_BlockVector,
    13489  m_CurrFrameIndex);
    13490  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13491  }
    13492  }
    13493  }
    13494  // This allocation belongs to default pool.
    13495  else
    13496  {
    13497  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13498  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13499  if(!pBlockVectorDefragCtx)
    13500  {
    13501  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13502  m_hAllocator,
    13503  VMA_NULL, // hCustomPool
    13504  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13505  m_CurrFrameIndex);
    13506  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13507  }
    13508  }
    13509 
    13510  if(pBlockVectorDefragCtx)
    13511  {
    13512  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13513  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13514  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13515  }
    13516  }
    13517  }
    13518 }
    13519 
    13520 VkResult VmaDefragmentationContext_T::Defragment(
    13521  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13522  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13523  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13524 {
    13525  if(pStats)
    13526  {
    13527  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13528  }
    13529 
    13530  if(commandBuffer == VK_NULL_HANDLE)
    13531  {
    13532  maxGpuBytesToMove = 0;
    13533  maxGpuAllocationsToMove = 0;
    13534  }
    13535 
    13536  VkResult res = VK_SUCCESS;
    13537 
    13538  // Process default pools.
    13539  for(uint32_t memTypeIndex = 0;
    13540  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13541  ++memTypeIndex)
    13542  {
    13543  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13544  if(pBlockVectorCtx)
    13545  {
    13546  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13547  pBlockVectorCtx->GetBlockVector()->Defragment(
    13548  pBlockVectorCtx,
    13549  pStats,
    13550  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13551  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13552  commandBuffer);
    13553  if(pBlockVectorCtx->res != VK_SUCCESS)
    13554  {
    13555  res = pBlockVectorCtx->res;
    13556  }
    13557  }
    13558  }
    13559 
    13560  // Process custom pools.
    13561  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13562  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13563  ++customCtxIndex)
    13564  {
    13565  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13566  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13567  pBlockVectorCtx->GetBlockVector()->Defragment(
    13568  pBlockVectorCtx,
    13569  pStats,
    13570  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13571  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13572  commandBuffer);
    13573  if(pBlockVectorCtx->res != VK_SUCCESS)
    13574  {
    13575  res = pBlockVectorCtx->res;
    13576  }
    13577  }
    13578 
    13579  return res;
    13580 }
    13581 
    13583 // VmaRecorder
    13584 
    13585 #if VMA_RECORDING_ENABLED
    13586 
    13587 VmaRecorder::VmaRecorder() :
    13588  m_UseMutex(true),
    13589  m_Flags(0),
    13590  m_File(VMA_NULL),
    13591  m_Freq(INT64_MAX),
    13592  m_StartCounter(INT64_MAX)
    13593 {
    13594 }
    13595 
    13596 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13597 {
    13598  m_UseMutex = useMutex;
    13599  m_Flags = settings.flags;
    13600 
    13601  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13602  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13603 
    13604  // Open file for writing.
    13605  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13606  if(err != 0)
    13607  {
    13608  return VK_ERROR_INITIALIZATION_FAILED;
    13609  }
    13610 
    13611  // Write header.
    13612  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13613  fprintf(m_File, "%s\n", "1,5");
    13614 
    13615  return VK_SUCCESS;
    13616 }
    13617 
    13618 VmaRecorder::~VmaRecorder()
    13619 {
    13620  if(m_File != VMA_NULL)
    13621  {
    13622  fclose(m_File);
    13623  }
    13624 }
    13625 
    13626 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13627 {
    13628  CallParams callParams;
    13629  GetBasicParams(callParams);
    13630 
    13631  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13632  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13633  Flush();
    13634 }
    13635 
    13636 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13637 {
    13638  CallParams callParams;
    13639  GetBasicParams(callParams);
    13640 
    13641  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13642  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13643  Flush();
    13644 }
    13645 
    13646 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13647 {
    13648  CallParams callParams;
    13649  GetBasicParams(callParams);
    13650 
    13651  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13652  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13653  createInfo.memoryTypeIndex,
    13654  createInfo.flags,
    13655  createInfo.blockSize,
    13656  (uint64_t)createInfo.minBlockCount,
    13657  (uint64_t)createInfo.maxBlockCount,
    13658  createInfo.frameInUseCount,
    13659  pool);
    13660  Flush();
    13661 }
    13662 
    13663 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13664 {
    13665  CallParams callParams;
    13666  GetBasicParams(callParams);
    13667 
    13668  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13669  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13670  pool);
    13671  Flush();
    13672 }
    13673 
    13674 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13675  const VkMemoryRequirements& vkMemReq,
    13676  const VmaAllocationCreateInfo& createInfo,
    13677  VmaAllocation allocation)
    13678 {
    13679  CallParams callParams;
    13680  GetBasicParams(callParams);
    13681 
    13682  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13683  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13684  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13685  vkMemReq.size,
    13686  vkMemReq.alignment,
    13687  vkMemReq.memoryTypeBits,
    13688  createInfo.flags,
    13689  createInfo.usage,
    13690  createInfo.requiredFlags,
    13691  createInfo.preferredFlags,
    13692  createInfo.memoryTypeBits,
    13693  createInfo.pool,
    13694  allocation,
    13695  userDataStr.GetString());
    13696  Flush();
    13697 }
    13698 
    13699 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13700  const VkMemoryRequirements& vkMemReq,
    13701  const VmaAllocationCreateInfo& createInfo,
    13702  uint64_t allocationCount,
    13703  const VmaAllocation* pAllocations)
    13704 {
    13705  CallParams callParams;
    13706  GetBasicParams(callParams);
    13707 
    13708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13709  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13710  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13711  vkMemReq.size,
    13712  vkMemReq.alignment,
    13713  vkMemReq.memoryTypeBits,
    13714  createInfo.flags,
    13715  createInfo.usage,
    13716  createInfo.requiredFlags,
    13717  createInfo.preferredFlags,
    13718  createInfo.memoryTypeBits,
    13719  createInfo.pool);
    13720  PrintPointerList(allocationCount, pAllocations);
    13721  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13722  Flush();
    13723 }
    13724 
    13725 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13726  const VkMemoryRequirements& vkMemReq,
    13727  bool requiresDedicatedAllocation,
    13728  bool prefersDedicatedAllocation,
    13729  const VmaAllocationCreateInfo& createInfo,
    13730  VmaAllocation allocation)
    13731 {
    13732  CallParams callParams;
    13733  GetBasicParams(callParams);
    13734 
    13735  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13736  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13737  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13738  vkMemReq.size,
    13739  vkMemReq.alignment,
    13740  vkMemReq.memoryTypeBits,
    13741  requiresDedicatedAllocation ? 1 : 0,
    13742  prefersDedicatedAllocation ? 1 : 0,
    13743  createInfo.flags,
    13744  createInfo.usage,
    13745  createInfo.requiredFlags,
    13746  createInfo.preferredFlags,
    13747  createInfo.memoryTypeBits,
    13748  createInfo.pool,
    13749  allocation,
    13750  userDataStr.GetString());
    13751  Flush();
    13752 }
    13753 
    13754 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13755  const VkMemoryRequirements& vkMemReq,
    13756  bool requiresDedicatedAllocation,
    13757  bool prefersDedicatedAllocation,
    13758  const VmaAllocationCreateInfo& createInfo,
    13759  VmaAllocation allocation)
    13760 {
    13761  CallParams callParams;
    13762  GetBasicParams(callParams);
    13763 
    13764  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13765  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13766  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13767  vkMemReq.size,
    13768  vkMemReq.alignment,
    13769  vkMemReq.memoryTypeBits,
    13770  requiresDedicatedAllocation ? 1 : 0,
    13771  prefersDedicatedAllocation ? 1 : 0,
    13772  createInfo.flags,
    13773  createInfo.usage,
    13774  createInfo.requiredFlags,
    13775  createInfo.preferredFlags,
    13776  createInfo.memoryTypeBits,
    13777  createInfo.pool,
    13778  allocation,
    13779  userDataStr.GetString());
    13780  Flush();
    13781 }
    13782 
    13783 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13784  VmaAllocation allocation)
    13785 {
    13786  CallParams callParams;
    13787  GetBasicParams(callParams);
    13788 
    13789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13790  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13791  allocation);
    13792  Flush();
    13793 }
    13794 
    13795 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13796  uint64_t allocationCount,
    13797  const VmaAllocation* pAllocations)
    13798 {
    13799  CallParams callParams;
    13800  GetBasicParams(callParams);
    13801 
    13802  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13803  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13804  PrintPointerList(allocationCount, pAllocations);
    13805  fprintf(m_File, "\n");
    13806  Flush();
    13807 }
    13808 
    13809 void VmaRecorder::RecordResizeAllocation(
    13810  uint32_t frameIndex,
    13811  VmaAllocation allocation,
    13812  VkDeviceSize newSize)
    13813 {
    13814  CallParams callParams;
    13815  GetBasicParams(callParams);
    13816 
    13817  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13818  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13819  allocation, newSize);
    13820  Flush();
    13821 }
    13822 
    13823 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13824  VmaAllocation allocation,
    13825  const void* pUserData)
    13826 {
    13827  CallParams callParams;
    13828  GetBasicParams(callParams);
    13829 
    13830  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13831  UserDataString userDataStr(
    13832  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13833  pUserData);
    13834  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13835  allocation,
    13836  userDataStr.GetString());
    13837  Flush();
    13838 }
    13839 
    13840 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13841  VmaAllocation allocation)
    13842 {
    13843  CallParams callParams;
    13844  GetBasicParams(callParams);
    13845 
    13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13847  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13848  allocation);
    13849  Flush();
    13850 }
    13851 
    13852 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13853  VmaAllocation allocation)
    13854 {
    13855  CallParams callParams;
    13856  GetBasicParams(callParams);
    13857 
    13858  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13859  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13860  allocation);
    13861  Flush();
    13862 }
    13863 
    13864 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13865  VmaAllocation allocation)
    13866 {
    13867  CallParams callParams;
    13868  GetBasicParams(callParams);
    13869 
    13870  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13871  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13872  allocation);
    13873  Flush();
    13874 }
    13875 
    13876 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13877  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13878 {
    13879  CallParams callParams;
    13880  GetBasicParams(callParams);
    13881 
    13882  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13883  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13884  allocation,
    13885  offset,
    13886  size);
    13887  Flush();
    13888 }
    13889 
    13890 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13891  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13892 {
    13893  CallParams callParams;
    13894  GetBasicParams(callParams);
    13895 
    13896  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13897  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13898  allocation,
    13899  offset,
    13900  size);
    13901  Flush();
    13902 }
    13903 
    13904 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13905  const VkBufferCreateInfo& bufCreateInfo,
    13906  const VmaAllocationCreateInfo& allocCreateInfo,
    13907  VmaAllocation allocation)
    13908 {
    13909  CallParams callParams;
    13910  GetBasicParams(callParams);
    13911 
    13912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13913  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13914  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13915  bufCreateInfo.flags,
    13916  bufCreateInfo.size,
    13917  bufCreateInfo.usage,
    13918  bufCreateInfo.sharingMode,
    13919  allocCreateInfo.flags,
    13920  allocCreateInfo.usage,
    13921  allocCreateInfo.requiredFlags,
    13922  allocCreateInfo.preferredFlags,
    13923  allocCreateInfo.memoryTypeBits,
    13924  allocCreateInfo.pool,
    13925  allocation,
    13926  userDataStr.GetString());
    13927  Flush();
    13928 }
    13929 
    13930 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13931  const VkImageCreateInfo& imageCreateInfo,
    13932  const VmaAllocationCreateInfo& allocCreateInfo,
    13933  VmaAllocation allocation)
    13934 {
    13935  CallParams callParams;
    13936  GetBasicParams(callParams);
    13937 
    13938  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13939  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13940  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13941  imageCreateInfo.flags,
    13942  imageCreateInfo.imageType,
    13943  imageCreateInfo.format,
    13944  imageCreateInfo.extent.width,
    13945  imageCreateInfo.extent.height,
    13946  imageCreateInfo.extent.depth,
    13947  imageCreateInfo.mipLevels,
    13948  imageCreateInfo.arrayLayers,
    13949  imageCreateInfo.samples,
    13950  imageCreateInfo.tiling,
    13951  imageCreateInfo.usage,
    13952  imageCreateInfo.sharingMode,
    13953  imageCreateInfo.initialLayout,
    13954  allocCreateInfo.flags,
    13955  allocCreateInfo.usage,
    13956  allocCreateInfo.requiredFlags,
    13957  allocCreateInfo.preferredFlags,
    13958  allocCreateInfo.memoryTypeBits,
    13959  allocCreateInfo.pool,
    13960  allocation,
    13961  userDataStr.GetString());
    13962  Flush();
    13963 }
    13964 
    13965 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13966  VmaAllocation allocation)
    13967 {
    13968  CallParams callParams;
    13969  GetBasicParams(callParams);
    13970 
    13971  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13972  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13973  allocation);
    13974  Flush();
    13975 }
    13976 
    13977 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13978  VmaAllocation allocation)
    13979 {
    13980  CallParams callParams;
    13981  GetBasicParams(callParams);
    13982 
    13983  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13984  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13985  allocation);
    13986  Flush();
    13987 }
    13988 
    13989 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13990  VmaAllocation allocation)
    13991 {
    13992  CallParams callParams;
    13993  GetBasicParams(callParams);
    13994 
    13995  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13996  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13997  allocation);
    13998  Flush();
    13999 }
    14000 
    14001 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    14002  VmaAllocation allocation)
    14003 {
    14004  CallParams callParams;
    14005  GetBasicParams(callParams);
    14006 
    14007  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14008  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    14009  allocation);
    14010  Flush();
    14011 }
    14012 
    14013 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    14014  VmaPool pool)
    14015 {
    14016  CallParams callParams;
    14017  GetBasicParams(callParams);
    14018 
    14019  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14020  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    14021  pool);
    14022  Flush();
    14023 }
    14024 
    14025 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    14026  const VmaDefragmentationInfo2& info,
    14028 {
    14029  CallParams callParams;
    14030  GetBasicParams(callParams);
    14031 
    14032  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14033  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    14034  info.flags);
    14035  PrintPointerList(info.allocationCount, info.pAllocations);
    14036  fprintf(m_File, ",");
    14037  PrintPointerList(info.poolCount, info.pPools);
    14038  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14039  info.maxCpuBytesToMove,
    14041  info.maxGpuBytesToMove,
    14043  info.commandBuffer,
    14044  ctx);
    14045  Flush();
    14046 }
    14047 
    14048 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14050 {
    14051  CallParams callParams;
    14052  GetBasicParams(callParams);
    14053 
    14054  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14055  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14056  ctx);
    14057  Flush();
    14058 }
    14059 
    14060 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14061 {
    14062  if(pUserData != VMA_NULL)
    14063  {
    14064  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14065  {
    14066  m_Str = (const char*)pUserData;
    14067  }
    14068  else
    14069  {
    14070  sprintf_s(m_PtrStr, "%p", pUserData);
    14071  m_Str = m_PtrStr;
    14072  }
    14073  }
    14074  else
    14075  {
    14076  m_Str = "";
    14077  }
    14078 }
    14079 
    14080 void VmaRecorder::WriteConfiguration(
    14081  const VkPhysicalDeviceProperties& devProps,
    14082  const VkPhysicalDeviceMemoryProperties& memProps,
    14083  bool dedicatedAllocationExtensionEnabled)
    14084 {
    14085  fprintf(m_File, "Config,Begin\n");
    14086 
    14087  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14088  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14089  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14090  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14091  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14092  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14093 
    14094  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14095  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14096  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14097 
    14098  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14099  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14100  {
    14101  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14102  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14103  }
    14104  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14105  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14106  {
    14107  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14108  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14109  }
    14110 
    14111  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14112 
    14113  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14114  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14115  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14116  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14117  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14118  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14119  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14120  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14121  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14122 
    14123  fprintf(m_File, "Config,End\n");
    14124 }
    14125 
    14126 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14127 {
    14128  outParams.threadId = GetCurrentThreadId();
    14129 
    14130  LARGE_INTEGER counter;
    14131  QueryPerformanceCounter(&counter);
    14132  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14133 }
    14134 
    14135 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14136 {
    14137  if(count)
    14138  {
    14139  fprintf(m_File, "%p", pItems[0]);
    14140  for(uint64_t i = 1; i < count; ++i)
    14141  {
    14142  fprintf(m_File, " %p", pItems[i]);
    14143  }
    14144  }
    14145 }
    14146 
    14147 void VmaRecorder::Flush()
    14148 {
    14149  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14150  {
    14151  fflush(m_File);
    14152  }
    14153 }
    14154 
    14155 #endif // #if VMA_RECORDING_ENABLED
    14156 
    14158 // VmaAllocationObjectAllocator
    14159 
    14160 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14161  m_Allocator(pAllocationCallbacks, 1024)
    14162 {
    14163 }
    14164 
    14165 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14166 {
    14167  VmaMutexLock mutexLock(m_Mutex);
    14168  return m_Allocator.Alloc();
    14169 }
    14170 
    14171 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14172 {
    14173  VmaMutexLock mutexLock(m_Mutex);
    14174  m_Allocator.Free(hAlloc);
    14175 }
    14176 
    14178 // VmaAllocator_T
    14179 
    14180 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14181  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14182  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14183  m_hDevice(pCreateInfo->device),
    14184  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14185  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14186  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14187  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14188  m_PreferredLargeHeapBlockSize(0),
    14189  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14190  m_CurrentFrameIndex(0),
    14191  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
    14192  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14193  m_NextPoolId(0)
    14195  ,m_pRecorder(VMA_NULL)
    14196 #endif
    14197 {
    14198  if(VMA_DEBUG_DETECT_CORRUPTION)
    14199  {
    14200  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14201  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14202  }
    14203 
    14204  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14205 
    14206 #if !(VMA_DEDICATED_ALLOCATION)
    14208  {
    14209  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14210  }
    14211 #endif
    14212 
    14213  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14214  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14215  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14216 
    14217  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14218  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14219 
    14220  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14221  {
    14222  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14223  }
    14224 
    14225  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14226  {
    14227  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14228  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14229  }
    14230 
    14231  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14232 
    14233  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14234  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14235 
    14236  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14237  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14238  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14239  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14240 
    14241  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14242  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14243 
    14244  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14245  {
    14246  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14247  {
    14248  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14249  if(limit != VK_WHOLE_SIZE)
    14250  {
    14251  m_HeapSizeLimit[heapIndex] = limit;
    14252  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14253  {
    14254  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14255  }
    14256  }
    14257  }
    14258  }
    14259 
    14260  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14261  {
    14262  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14263 
    14264  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14265  this,
    14266  VK_NULL_HANDLE, // hParentPool
    14267  memTypeIndex,
    14268  preferredBlockSize,
    14269  0,
    14270  SIZE_MAX,
    14271  GetBufferImageGranularity(),
    14272  pCreateInfo->frameInUseCount,
    14273  false, // isCustomPool
    14274  false, // explicitBlockSize
    14275  false); // linearAlgorithm
    14276  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14277  // becase minBlockCount is 0.
    14278  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14279 
    14280  }
    14281 }
    14282 
    14283 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14284 {
    14285  VkResult res = VK_SUCCESS;
    14286 
    14287  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14288  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14289  {
    14290 #if VMA_RECORDING_ENABLED
    14291  m_pRecorder = vma_new(this, VmaRecorder)();
    14292  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14293  if(res != VK_SUCCESS)
    14294  {
    14295  return res;
    14296  }
    14297  m_pRecorder->WriteConfiguration(
    14298  m_PhysicalDeviceProperties,
    14299  m_MemProps,
    14300  m_UseKhrDedicatedAllocation);
    14301  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14302 #else
    14303  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14304  return VK_ERROR_FEATURE_NOT_PRESENT;
    14305 #endif
    14306  }
    14307 
    14308  return res;
    14309 }
    14310 
    14311 VmaAllocator_T::~VmaAllocator_T()
    14312 {
    14313 #if VMA_RECORDING_ENABLED
    14314  if(m_pRecorder != VMA_NULL)
    14315  {
    14316  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14317  vma_delete(this, m_pRecorder);
    14318  }
    14319 #endif
    14320 
    14321  VMA_ASSERT(m_Pools.empty());
    14322 
    14323  for(size_t i = GetMemoryTypeCount(); i--; )
    14324  {
    14325  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14326  {
    14327  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14328  }
    14329 
    14330  vma_delete(this, m_pDedicatedAllocations[i]);
    14331  vma_delete(this, m_pBlockVectors[i]);
    14332  }
    14333 }
    14334 
    14335 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14336 {
    14337 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14338  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14339  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14340  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14341  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14342  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14343  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14344  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14345  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14346  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14347  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14348  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14349  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14350  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14351  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14352  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14353  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14354  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14355 #if VMA_DEDICATED_ALLOCATION
    14356  if(m_UseKhrDedicatedAllocation)
    14357  {
    14358  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14359  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14360  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14361  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14362  }
    14363 #endif // #if VMA_DEDICATED_ALLOCATION
    14364 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14365 
    14366 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14367  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14368 
    14369  if(pVulkanFunctions != VMA_NULL)
    14370  {
    14371  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14372  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14373  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14374  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14375  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14376  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14377  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14378  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14379  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14380  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14381  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14382  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14383  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14384  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14385  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14386  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14387  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14388 #if VMA_DEDICATED_ALLOCATION
    14389  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14390  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14391 #endif
    14392  }
    14393 
    14394 #undef VMA_COPY_IF_NOT_NULL
    14395 
    14396  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14397  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14398  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14399  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14400  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14401  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14402  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14403  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14404  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14405  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14406  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14407  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14408  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14409  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14410  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14411  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14412  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14413  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14414  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14415 #if VMA_DEDICATED_ALLOCATION
    14416  if(m_UseKhrDedicatedAllocation)
    14417  {
    14418  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14419  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14420  }
    14421 #endif
    14422 }
    14423 
    14424 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14425 {
    14426  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14427  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14428  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14429  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14430 }
    14431 
    14432 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14433  VkDeviceSize size,
    14434  VkDeviceSize alignment,
    14435  bool dedicatedAllocation,
    14436  VkBuffer dedicatedBuffer,
    14437  VkImage dedicatedImage,
    14438  const VmaAllocationCreateInfo& createInfo,
    14439  uint32_t memTypeIndex,
    14440  VmaSuballocationType suballocType,
    14441  size_t allocationCount,
    14442  VmaAllocation* pAllocations)
    14443 {
    14444  VMA_ASSERT(pAllocations != VMA_NULL);
    14445  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14446 
    14447  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14448 
    14449  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14450  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14451  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14452  {
    14453  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14454  }
    14455 
    14456  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14457  VMA_ASSERT(blockVector);
    14458 
    14459  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14460  bool preferDedicatedMemory =
    14461  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14462  dedicatedAllocation ||
    14463  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14464  size > preferredBlockSize / 2;
    14465 
    14466  if(preferDedicatedMemory &&
    14467  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14468  finalCreateInfo.pool == VK_NULL_HANDLE)
    14469  {
    14471  }
    14472 
    14473  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14474  {
    14475  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14476  {
    14477  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14478  }
    14479  else
    14480  {
    14481  return AllocateDedicatedMemory(
    14482  size,
    14483  suballocType,
    14484  memTypeIndex,
    14485  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14486  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14487  finalCreateInfo.pUserData,
    14488  dedicatedBuffer,
    14489  dedicatedImage,
    14490  allocationCount,
    14491  pAllocations);
    14492  }
    14493  }
    14494  else
    14495  {
    14496  VkResult res = blockVector->Allocate(
    14497  m_CurrentFrameIndex.load(),
    14498  size,
    14499  alignment,
    14500  finalCreateInfo,
    14501  suballocType,
    14502  allocationCount,
    14503  pAllocations);
    14504  if(res == VK_SUCCESS)
    14505  {
    14506  return res;
    14507  }
    14508 
    14509  // 5. Try dedicated memory.
    14510  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14511  {
    14512  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14513  }
    14514  else
    14515  {
    14516  res = AllocateDedicatedMemory(
    14517  size,
    14518  suballocType,
    14519  memTypeIndex,
    14520  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14521  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14522  finalCreateInfo.pUserData,
    14523  dedicatedBuffer,
    14524  dedicatedImage,
    14525  allocationCount,
    14526  pAllocations);
    14527  if(res == VK_SUCCESS)
    14528  {
    14529  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14530  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14531  return VK_SUCCESS;
    14532  }
    14533  else
    14534  {
    14535  // Everything failed: Return error code.
    14536  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14537  return res;
    14538  }
    14539  }
    14540  }
    14541 }
    14542 
    14543 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14544  VkDeviceSize size,
    14545  VmaSuballocationType suballocType,
    14546  uint32_t memTypeIndex,
    14547  bool map,
    14548  bool isUserDataString,
    14549  void* pUserData,
    14550  VkBuffer dedicatedBuffer,
    14551  VkImage dedicatedImage,
    14552  size_t allocationCount,
    14553  VmaAllocation* pAllocations)
    14554 {
    14555  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14556 
    14557  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14558  allocInfo.memoryTypeIndex = memTypeIndex;
    14559  allocInfo.allocationSize = size;
    14560 
    14561 #if VMA_DEDICATED_ALLOCATION
    14562  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14563  if(m_UseKhrDedicatedAllocation)
    14564  {
    14565  if(dedicatedBuffer != VK_NULL_HANDLE)
    14566  {
    14567  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14568  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14569  allocInfo.pNext = &dedicatedAllocInfo;
    14570  }
    14571  else if(dedicatedImage != VK_NULL_HANDLE)
    14572  {
    14573  dedicatedAllocInfo.image = dedicatedImage;
    14574  allocInfo.pNext = &dedicatedAllocInfo;
    14575  }
    14576  }
    14577 #endif // #if VMA_DEDICATED_ALLOCATION
    14578 
    14579  size_t allocIndex;
    14580  VkResult res = VK_SUCCESS;
    14581  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14582  {
    14583  res = AllocateDedicatedMemoryPage(
    14584  size,
    14585  suballocType,
    14586  memTypeIndex,
    14587  allocInfo,
    14588  map,
    14589  isUserDataString,
    14590  pUserData,
    14591  pAllocations + allocIndex);
    14592  if(res != VK_SUCCESS)
    14593  {
    14594  break;
    14595  }
    14596  }
    14597 
    14598  if(res == VK_SUCCESS)
    14599  {
    14600  // Register them in m_pDedicatedAllocations.
    14601  {
    14602  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14603  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14604  VMA_ASSERT(pDedicatedAllocations);
    14605  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14606  {
    14607  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14608  }
    14609  }
    14610 
    14611  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14612  }
    14613  else
    14614  {
    14615  // Free all already created allocations.
    14616  while(allocIndex--)
    14617  {
    14618  VmaAllocation currAlloc = pAllocations[allocIndex];
    14619  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14620 
    14621  /*
    14622  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14623  before vkFreeMemory.
    14624 
    14625  if(currAlloc->GetMappedData() != VMA_NULL)
    14626  {
    14627  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14628  }
    14629  */
    14630 
    14631  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14632 
    14633  currAlloc->SetUserData(this, VMA_NULL);
    14634  currAlloc->Dtor();
    14635  m_AllocationObjectAllocator.Free(currAlloc);
    14636  }
    14637 
    14638  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14639  }
    14640 
    14641  return res;
    14642 }
    14643 
    14644 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14645  VkDeviceSize size,
    14646  VmaSuballocationType suballocType,
    14647  uint32_t memTypeIndex,
    14648  const VkMemoryAllocateInfo& allocInfo,
    14649  bool map,
    14650  bool isUserDataString,
    14651  void* pUserData,
    14652  VmaAllocation* pAllocation)
    14653 {
    14654  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14655  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14656  if(res < 0)
    14657  {
    14658  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14659  return res;
    14660  }
    14661 
    14662  void* pMappedData = VMA_NULL;
    14663  if(map)
    14664  {
    14665  res = (*m_VulkanFunctions.vkMapMemory)(
    14666  m_hDevice,
    14667  hMemory,
    14668  0,
    14669  VK_WHOLE_SIZE,
    14670  0,
    14671  &pMappedData);
    14672  if(res < 0)
    14673  {
    14674  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14675  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14676  return res;
    14677  }
    14678  }
    14679 
    14680  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14681  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14682  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14683  (*pAllocation)->SetUserData(this, pUserData);
    14684  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14685  {
    14686  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14687  }
    14688 
    14689  return VK_SUCCESS;
    14690 }
    14691 
    14692 void VmaAllocator_T::GetBufferMemoryRequirements(
    14693  VkBuffer hBuffer,
    14694  VkMemoryRequirements& memReq,
    14695  bool& requiresDedicatedAllocation,
    14696  bool& prefersDedicatedAllocation) const
    14697 {
    14698 #if VMA_DEDICATED_ALLOCATION
    14699  if(m_UseKhrDedicatedAllocation)
    14700  {
    14701  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14702  memReqInfo.buffer = hBuffer;
    14703 
    14704  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14705 
    14706  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14707  memReq2.pNext = &memDedicatedReq;
    14708 
    14709  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14710 
    14711  memReq = memReq2.memoryRequirements;
    14712  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14713  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14714  }
    14715  else
    14716 #endif // #if VMA_DEDICATED_ALLOCATION
    14717  {
    14718  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14719  requiresDedicatedAllocation = false;
    14720  prefersDedicatedAllocation = false;
    14721  }
    14722 }
    14723 
    14724 void VmaAllocator_T::GetImageMemoryRequirements(
    14725  VkImage hImage,
    14726  VkMemoryRequirements& memReq,
    14727  bool& requiresDedicatedAllocation,
    14728  bool& prefersDedicatedAllocation) const
    14729 {
    14730 #if VMA_DEDICATED_ALLOCATION
    14731  if(m_UseKhrDedicatedAllocation)
    14732  {
    14733  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14734  memReqInfo.image = hImage;
    14735 
    14736  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14737 
    14738  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14739  memReq2.pNext = &memDedicatedReq;
    14740 
    14741  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14742 
    14743  memReq = memReq2.memoryRequirements;
    14744  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14745  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14746  }
    14747  else
    14748 #endif // #if VMA_DEDICATED_ALLOCATION
    14749  {
    14750  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14751  requiresDedicatedAllocation = false;
    14752  prefersDedicatedAllocation = false;
    14753  }
    14754 }
    14755 
    14756 VkResult VmaAllocator_T::AllocateMemory(
    14757  const VkMemoryRequirements& vkMemReq,
    14758  bool requiresDedicatedAllocation,
    14759  bool prefersDedicatedAllocation,
    14760  VkBuffer dedicatedBuffer,
    14761  VkImage dedicatedImage,
    14762  const VmaAllocationCreateInfo& createInfo,
    14763  VmaSuballocationType suballocType,
    14764  size_t allocationCount,
    14765  VmaAllocation* pAllocations)
    14766 {
    14767  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14768 
    14769  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14770 
    14771  if(vkMemReq.size == 0)
    14772  {
    14773  return VK_ERROR_VALIDATION_FAILED_EXT;
    14774  }
    14775  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14776  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14777  {
    14778  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14780  }
    14781  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14783  {
    14784  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14785  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14786  }
    14787  if(requiresDedicatedAllocation)
    14788  {
    14789  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14790  {
    14791  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14792  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14793  }
    14794  if(createInfo.pool != VK_NULL_HANDLE)
    14795  {
    14796  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14797  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14798  }
    14799  }
    14800  if((createInfo.pool != VK_NULL_HANDLE) &&
    14801  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14802  {
    14803  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14804  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14805  }
    14806 
    14807  if(createInfo.pool != VK_NULL_HANDLE)
    14808  {
    14809  const VkDeviceSize alignmentForPool = VMA_MAX(
    14810  vkMemReq.alignment,
    14811  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14812 
    14813  VmaAllocationCreateInfo createInfoForPool = createInfo;
    14814  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14815  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14816  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14817  {
    14818  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14819  }
    14820 
    14821  return createInfo.pool->m_BlockVector.Allocate(
    14822  m_CurrentFrameIndex.load(),
    14823  vkMemReq.size,
    14824  alignmentForPool,
    14825  createInfoForPool,
    14826  suballocType,
    14827  allocationCount,
    14828  pAllocations);
    14829  }
    14830  else
    14831  {
    14832  // Bit mask of memory Vulkan types acceptable for this allocation.
    14833  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14834  uint32_t memTypeIndex = UINT32_MAX;
    14835  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14836  if(res == VK_SUCCESS)
    14837  {
    14838  VkDeviceSize alignmentForMemType = VMA_MAX(
    14839  vkMemReq.alignment,
    14840  GetMemoryTypeMinAlignment(memTypeIndex));
    14841 
    14842  res = AllocateMemoryOfType(
    14843  vkMemReq.size,
    14844  alignmentForMemType,
    14845  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14846  dedicatedBuffer,
    14847  dedicatedImage,
    14848  createInfo,
    14849  memTypeIndex,
    14850  suballocType,
    14851  allocationCount,
    14852  pAllocations);
    14853  // Succeeded on first try.
    14854  if(res == VK_SUCCESS)
    14855  {
    14856  return res;
    14857  }
    14858  // Allocation from this memory type failed. Try other compatible memory types.
    14859  else
    14860  {
    14861  for(;;)
    14862  {
    14863  // Remove old memTypeIndex from list of possibilities.
    14864  memoryTypeBits &= ~(1u << memTypeIndex);
    14865  // Find alternative memTypeIndex.
    14866  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14867  if(res == VK_SUCCESS)
    14868  {
    14869  alignmentForMemType = VMA_MAX(
    14870  vkMemReq.alignment,
    14871  GetMemoryTypeMinAlignment(memTypeIndex));
    14872 
    14873  res = AllocateMemoryOfType(
    14874  vkMemReq.size,
    14875  alignmentForMemType,
    14876  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14877  dedicatedBuffer,
    14878  dedicatedImage,
    14879  createInfo,
    14880  memTypeIndex,
    14881  suballocType,
    14882  allocationCount,
    14883  pAllocations);
    14884  // Allocation from this alternative memory type succeeded.
    14885  if(res == VK_SUCCESS)
    14886  {
    14887  return res;
    14888  }
    14889  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14890  }
    14891  // No other matching memory type index could be found.
    14892  else
    14893  {
    14894  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14895  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14896  }
    14897  }
    14898  }
    14899  }
    14900  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14901  else
    14902  return res;
    14903  }
    14904 }
    14905 
    14906 void VmaAllocator_T::FreeMemory(
    14907  size_t allocationCount,
    14908  const VmaAllocation* pAllocations)
    14909 {
    14910  VMA_ASSERT(pAllocations);
    14911 
    14912  for(size_t allocIndex = allocationCount; allocIndex--; )
    14913  {
    14914  VmaAllocation allocation = pAllocations[allocIndex];
    14915 
    14916  if(allocation != VK_NULL_HANDLE)
    14917  {
    14918  if(TouchAllocation(allocation))
    14919  {
    14920  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14921  {
    14922  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14923  }
    14924 
    14925  switch(allocation->GetType())
    14926  {
    14927  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14928  {
    14929  VmaBlockVector* pBlockVector = VMA_NULL;
    14930  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14931  if(hPool != VK_NULL_HANDLE)
    14932  {
    14933  pBlockVector = &hPool->m_BlockVector;
    14934  }
    14935  else
    14936  {
    14937  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14938  pBlockVector = m_pBlockVectors[memTypeIndex];
    14939  }
    14940  pBlockVector->Free(allocation);
    14941  }
    14942  break;
    14943  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14944  FreeDedicatedMemory(allocation);
    14945  break;
    14946  default:
    14947  VMA_ASSERT(0);
    14948  }
    14949  }
    14950 
    14951  allocation->SetUserData(this, VMA_NULL);
    14952  allocation->Dtor();
    14953  m_AllocationObjectAllocator.Free(allocation);
    14954  }
    14955  }
    14956 }
    14957 
    14958 VkResult VmaAllocator_T::ResizeAllocation(
    14959  const VmaAllocation alloc,
    14960  VkDeviceSize newSize)
    14961 {
    14962  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14963  {
    14964  return VK_ERROR_VALIDATION_FAILED_EXT;
    14965  }
    14966  if(newSize == alloc->GetSize())
    14967  {
    14968  return VK_SUCCESS;
    14969  }
    14970 
    14971  switch(alloc->GetType())
    14972  {
    14973  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14974  return VK_ERROR_FEATURE_NOT_PRESENT;
    14975  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14976  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14977  {
    14978  alloc->ChangeSize(newSize);
    14979  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14980  return VK_SUCCESS;
    14981  }
    14982  else
    14983  {
    14984  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14985  }
    14986  default:
    14987  VMA_ASSERT(0);
    14988  return VK_ERROR_VALIDATION_FAILED_EXT;
    14989  }
    14990 }
    14991 
    14992 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14993 {
    14994  // Initialize.
    14995  InitStatInfo(pStats->total);
    14996  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14997  InitStatInfo(pStats->memoryType[i]);
    14998  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14999  InitStatInfo(pStats->memoryHeap[i]);
    15000 
    15001  // Process default pools.
    15002  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15003  {
    15004  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15005  VMA_ASSERT(pBlockVector);
    15006  pBlockVector->AddStats(pStats);
    15007  }
    15008 
    15009  // Process custom pools.
    15010  {
    15011  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15012  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15013  {
    15014  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    15015  }
    15016  }
    15017 
    15018  // Process dedicated allocations.
    15019  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15020  {
    15021  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    15022  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15023  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15024  VMA_ASSERT(pDedicatedAllocVector);
    15025  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    15026  {
    15027  VmaStatInfo allocationStatInfo;
    15028  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    15029  VmaAddStatInfo(pStats->total, allocationStatInfo);
    15030  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    15031  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    15032  }
    15033  }
    15034 
    15035  // Postprocess.
    15036  VmaPostprocessCalcStatInfo(pStats->total);
    15037  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    15038  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    15039  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    15040  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    15041 }
    15042 
    15043 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    15044 
    15045 VkResult VmaAllocator_T::DefragmentationBegin(
    15046  const VmaDefragmentationInfo2& info,
    15047  VmaDefragmentationStats* pStats,
    15048  VmaDefragmentationContext* pContext)
    15049 {
    15050  if(info.pAllocationsChanged != VMA_NULL)
    15051  {
    15052  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15053  }
    15054 
    15055  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15056  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15057 
    15058  (*pContext)->AddPools(info.poolCount, info.pPools);
    15059  (*pContext)->AddAllocations(
    15061 
    15062  VkResult res = (*pContext)->Defragment(
    15065  info.commandBuffer, pStats);
    15066 
    15067  if(res != VK_NOT_READY)
    15068  {
    15069  vma_delete(this, *pContext);
    15070  *pContext = VMA_NULL;
    15071  }
    15072 
    15073  return res;
    15074 }
    15075 
    15076 VkResult VmaAllocator_T::DefragmentationEnd(
    15077  VmaDefragmentationContext context)
    15078 {
    15079  vma_delete(this, context);
    15080  return VK_SUCCESS;
    15081 }
    15082 
    15083 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15084 {
    15085  if(hAllocation->CanBecomeLost())
    15086  {
    15087  /*
    15088  Warning: This is a carefully designed algorithm.
    15089  Do not modify unless you really know what you're doing :)
    15090  */
    15091  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15092  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15093  for(;;)
    15094  {
    15095  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15096  {
    15097  pAllocationInfo->memoryType = UINT32_MAX;
    15098  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15099  pAllocationInfo->offset = 0;
    15100  pAllocationInfo->size = hAllocation->GetSize();
    15101  pAllocationInfo->pMappedData = VMA_NULL;
    15102  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15103  return;
    15104  }
    15105  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15106  {
    15107  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15108  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15109  pAllocationInfo->offset = hAllocation->GetOffset();
    15110  pAllocationInfo->size = hAllocation->GetSize();
    15111  pAllocationInfo->pMappedData = VMA_NULL;
    15112  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15113  return;
    15114  }
    15115  else // Last use time earlier than current time.
    15116  {
    15117  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15118  {
    15119  localLastUseFrameIndex = localCurrFrameIndex;
    15120  }
    15121  }
    15122  }
    15123  }
    15124  else
    15125  {
    15126 #if VMA_STATS_STRING_ENABLED
    15127  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15128  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15129  for(;;)
    15130  {
    15131  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15132  if(localLastUseFrameIndex == localCurrFrameIndex)
    15133  {
    15134  break;
    15135  }
    15136  else // Last use time earlier than current time.
    15137  {
    15138  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15139  {
    15140  localLastUseFrameIndex = localCurrFrameIndex;
    15141  }
    15142  }
    15143  }
    15144 #endif
    15145 
    15146  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15147  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15148  pAllocationInfo->offset = hAllocation->GetOffset();
    15149  pAllocationInfo->size = hAllocation->GetSize();
    15150  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15151  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15152  }
    15153 }
    15154 
    15155 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15156 {
    15157  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15158  if(hAllocation->CanBecomeLost())
    15159  {
    15160  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15161  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15162  for(;;)
    15163  {
    15164  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15165  {
    15166  return false;
    15167  }
    15168  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15169  {
    15170  return true;
    15171  }
    15172  else // Last use time earlier than current time.
    15173  {
    15174  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15175  {
    15176  localLastUseFrameIndex = localCurrFrameIndex;
    15177  }
    15178  }
    15179  }
    15180  }
    15181  else
    15182  {
    15183 #if VMA_STATS_STRING_ENABLED
    15184  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15185  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15186  for(;;)
    15187  {
    15188  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15189  if(localLastUseFrameIndex == localCurrFrameIndex)
    15190  {
    15191  break;
    15192  }
    15193  else // Last use time earlier than current time.
    15194  {
    15195  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15196  {
    15197  localLastUseFrameIndex = localCurrFrameIndex;
    15198  }
    15199  }
    15200  }
    15201 #endif
    15202 
    15203  return true;
    15204  }
    15205 }
    15206 
    15207 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15208 {
    15209  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15210 
    15211  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15212 
    15213  if(newCreateInfo.maxBlockCount == 0)
    15214  {
    15215  newCreateInfo.maxBlockCount = SIZE_MAX;
    15216  }
    15217  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15218  {
    15219  return VK_ERROR_INITIALIZATION_FAILED;
    15220  }
    15221 
    15222  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15223 
    15224  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15225 
    15226  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15227  if(res != VK_SUCCESS)
    15228  {
    15229  vma_delete(this, *pPool);
    15230  *pPool = VMA_NULL;
    15231  return res;
    15232  }
    15233 
    15234  // Add to m_Pools.
    15235  {
    15236  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15237  (*pPool)->SetId(m_NextPoolId++);
    15238  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15239  }
    15240 
    15241  return VK_SUCCESS;
    15242 }
    15243 
    15244 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15245 {
    15246  // Remove from m_Pools.
    15247  {
    15248  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15249  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15250  VMA_ASSERT(success && "Pool not found in Allocator.");
    15251  }
    15252 
    15253  vma_delete(this, pool);
    15254 }
    15255 
    15256 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15257 {
    15258  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15259 }
    15260 
    15261 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15262 {
    15263  m_CurrentFrameIndex.store(frameIndex);
    15264 }
    15265 
    15266 void VmaAllocator_T::MakePoolAllocationsLost(
    15267  VmaPool hPool,
    15268  size_t* pLostAllocationCount)
    15269 {
    15270  hPool->m_BlockVector.MakePoolAllocationsLost(
    15271  m_CurrentFrameIndex.load(),
    15272  pLostAllocationCount);
    15273 }
    15274 
    15275 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15276 {
    15277  return hPool->m_BlockVector.CheckCorruption();
    15278 }
    15279 
    15280 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15281 {
    15282  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15283 
    15284  // Process default pools.
    15285  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15286  {
    15287  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15288  {
    15289  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15290  VMA_ASSERT(pBlockVector);
    15291  VkResult localRes = pBlockVector->CheckCorruption();
    15292  switch(localRes)
    15293  {
    15294  case VK_ERROR_FEATURE_NOT_PRESENT:
    15295  break;
    15296  case VK_SUCCESS:
    15297  finalRes = VK_SUCCESS;
    15298  break;
    15299  default:
    15300  return localRes;
    15301  }
    15302  }
    15303  }
    15304 
    15305  // Process custom pools.
    15306  {
    15307  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15308  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15309  {
    15310  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15311  {
    15312  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15313  switch(localRes)
    15314  {
    15315  case VK_ERROR_FEATURE_NOT_PRESENT:
    15316  break;
    15317  case VK_SUCCESS:
    15318  finalRes = VK_SUCCESS;
    15319  break;
    15320  default:
    15321  return localRes;
    15322  }
    15323  }
    15324  }
    15325  }
    15326 
    15327  return finalRes;
    15328 }
    15329 
    15330 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15331 {
    15332  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15333  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15334  (*pAllocation)->InitLost();
    15335 }
    15336 
    15337 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15338 {
    15339  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15340 
    15341  VkResult res;
    15342  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15343  {
    15344  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15345  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15346  {
    15347  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15348  if(res == VK_SUCCESS)
    15349  {
    15350  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15351  }
    15352  }
    15353  else
    15354  {
    15355  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15356  }
    15357  }
    15358  else
    15359  {
    15360  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15361  }
    15362 
    15363  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15364  {
    15365  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15366  }
    15367 
    15368  return res;
    15369 }
    15370 
    15371 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15372 {
    15373  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15374  {
    15375  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15376  }
    15377 
    15378  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15379 
    15380  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15381  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15382  {
    15383  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15384  m_HeapSizeLimit[heapIndex] += size;
    15385  }
    15386 }
    15387 
    15388 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15389 {
    15390  if(hAllocation->CanBecomeLost())
    15391  {
    15392  return VK_ERROR_MEMORY_MAP_FAILED;
    15393  }
    15394 
    15395  switch(hAllocation->GetType())
    15396  {
    15397  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15398  {
    15399  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15400  char *pBytes = VMA_NULL;
    15401  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15402  if(res == VK_SUCCESS)
    15403  {
    15404  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15405  hAllocation->BlockAllocMap();
    15406  }
    15407  return res;
    15408  }
    15409  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15410  return hAllocation->DedicatedAllocMap(this, ppData);
    15411  default:
    15412  VMA_ASSERT(0);
    15413  return VK_ERROR_MEMORY_MAP_FAILED;
    15414  }
    15415 }
    15416 
    15417 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15418 {
    15419  switch(hAllocation->GetType())
    15420  {
    15421  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15422  {
    15423  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15424  hAllocation->BlockAllocUnmap();
    15425  pBlock->Unmap(this, 1);
    15426  }
    15427  break;
    15428  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15429  hAllocation->DedicatedAllocUnmap(this);
    15430  break;
    15431  default:
    15432  VMA_ASSERT(0);
    15433  }
    15434 }
    15435 
    15436 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15437 {
    15438  VkResult res = VK_SUCCESS;
    15439  switch(hAllocation->GetType())
    15440  {
    15441  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15442  res = GetVulkanFunctions().vkBindBufferMemory(
    15443  m_hDevice,
    15444  hBuffer,
    15445  hAllocation->GetMemory(),
    15446  0); //memoryOffset
    15447  break;
    15448  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15449  {
    15450  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15451  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15452  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15453  break;
    15454  }
    15455  default:
    15456  VMA_ASSERT(0);
    15457  }
    15458  return res;
    15459 }
    15460 
    15461 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15462 {
    15463  VkResult res = VK_SUCCESS;
    15464  switch(hAllocation->GetType())
    15465  {
    15466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15467  res = GetVulkanFunctions().vkBindImageMemory(
    15468  m_hDevice,
    15469  hImage,
    15470  hAllocation->GetMemory(),
    15471  0); //memoryOffset
    15472  break;
    15473  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15474  {
    15475  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15476  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15477  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15478  break;
    15479  }
    15480  default:
    15481  VMA_ASSERT(0);
    15482  }
    15483  return res;
    15484 }
    15485 
    15486 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15487  VmaAllocation hAllocation,
    15488  VkDeviceSize offset, VkDeviceSize size,
    15489  VMA_CACHE_OPERATION op)
    15490 {
    15491  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15492  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15493  {
    15494  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15495  VMA_ASSERT(offset <= allocationSize);
    15496 
    15497  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15498 
    15499  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15500  memRange.memory = hAllocation->GetMemory();
    15501 
    15502  switch(hAllocation->GetType())
    15503  {
    15504  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15505  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15506  if(size == VK_WHOLE_SIZE)
    15507  {
    15508  memRange.size = allocationSize - memRange.offset;
    15509  }
    15510  else
    15511  {
    15512  VMA_ASSERT(offset + size <= allocationSize);
    15513  memRange.size = VMA_MIN(
    15514  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15515  allocationSize - memRange.offset);
    15516  }
    15517  break;
    15518 
    15519  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15520  {
    15521  // 1. Still within this allocation.
    15522  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15523  if(size == VK_WHOLE_SIZE)
    15524  {
    15525  size = allocationSize - offset;
    15526  }
    15527  else
    15528  {
    15529  VMA_ASSERT(offset + size <= allocationSize);
    15530  }
    15531  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15532 
    15533  // 2. Adjust to whole block.
    15534  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15535  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15536  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15537  memRange.offset += allocationOffset;
    15538  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15539 
    15540  break;
    15541  }
    15542 
    15543  default:
    15544  VMA_ASSERT(0);
    15545  }
    15546 
    15547  switch(op)
    15548  {
    15549  case VMA_CACHE_FLUSH:
    15550  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15551  break;
    15552  case VMA_CACHE_INVALIDATE:
    15553  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15554  break;
    15555  default:
    15556  VMA_ASSERT(0);
    15557  }
    15558  }
    15559  // else: Just ignore this call.
    15560 }
    15561 
    15562 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15563 {
    15564  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15565 
    15566  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15567  {
    15568  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15569  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15570  VMA_ASSERT(pDedicatedAllocations);
    15571  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15572  VMA_ASSERT(success);
    15573  }
    15574 
    15575  VkDeviceMemory hMemory = allocation->GetMemory();
    15576 
    15577  /*
    15578  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15579  before vkFreeMemory.
    15580 
    15581  if(allocation->GetMappedData() != VMA_NULL)
    15582  {
    15583  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15584  }
    15585  */
    15586 
    15587  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15588 
    15589  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15590 }
    15591 
    15592 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
    15593 {
    15594  VkBufferCreateInfo dummyBufCreateInfo;
    15595  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
    15596 
    15597  uint32_t memoryTypeBits = 0;
    15598 
    15599  // Create buffer.
    15600  VkBuffer buf = VK_NULL_HANDLE;
    15601  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
    15602  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
    15603  if(res == VK_SUCCESS)
    15604  {
    15605  // Query for supported memory types.
    15606  VkMemoryRequirements memReq;
    15607  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
    15608  memoryTypeBits = memReq.memoryTypeBits;
    15609 
    15610  // Destroy buffer.
    15611  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
    15612  }
    15613 
    15614  return memoryTypeBits;
    15615 }
    15616 
    15617 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15618 {
    15619  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15620  !hAllocation->CanBecomeLost() &&
    15621  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15622  {
    15623  void* pData = VMA_NULL;
    15624  VkResult res = Map(hAllocation, &pData);
    15625  if(res == VK_SUCCESS)
    15626  {
    15627  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15628  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15629  Unmap(hAllocation);
    15630  }
    15631  else
    15632  {
    15633  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15634  }
    15635  }
    15636 }
    15637 
    15638 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
    15639 {
    15640  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
    15641  if(memoryTypeBits == UINT32_MAX)
    15642  {
    15643  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
    15644  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
    15645  }
    15646  return memoryTypeBits;
    15647 }
    15648 
    15649 #if VMA_STATS_STRING_ENABLED
    15650 
    15651 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15652 {
    15653  bool dedicatedAllocationsStarted = false;
    15654  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15655  {
    15656  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15657  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15658  VMA_ASSERT(pDedicatedAllocVector);
    15659  if(pDedicatedAllocVector->empty() == false)
    15660  {
    15661  if(dedicatedAllocationsStarted == false)
    15662  {
    15663  dedicatedAllocationsStarted = true;
    15664  json.WriteString("DedicatedAllocations");
    15665  json.BeginObject();
    15666  }
    15667 
    15668  json.BeginString("Type ");
    15669  json.ContinueString(memTypeIndex);
    15670  json.EndString();
    15671 
    15672  json.BeginArray();
    15673 
    15674  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15675  {
    15676  json.BeginObject(true);
    15677  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15678  hAlloc->PrintParameters(json);
    15679  json.EndObject();
    15680  }
    15681 
    15682  json.EndArray();
    15683  }
    15684  }
    15685  if(dedicatedAllocationsStarted)
    15686  {
    15687  json.EndObject();
    15688  }
    15689 
    15690  {
    15691  bool allocationsStarted = false;
    15692  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15693  {
    15694  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15695  {
    15696  if(allocationsStarted == false)
    15697  {
    15698  allocationsStarted = true;
    15699  json.WriteString("DefaultPools");
    15700  json.BeginObject();
    15701  }
    15702 
    15703  json.BeginString("Type ");
    15704  json.ContinueString(memTypeIndex);
    15705  json.EndString();
    15706 
    15707  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15708  }
    15709  }
    15710  if(allocationsStarted)
    15711  {
    15712  json.EndObject();
    15713  }
    15714  }
    15715 
    15716  // Custom pools
    15717  {
    15718  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15719  const size_t poolCount = m_Pools.size();
    15720  if(poolCount > 0)
    15721  {
    15722  json.WriteString("Pools");
    15723  json.BeginObject();
    15724  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15725  {
    15726  json.BeginString();
    15727  json.ContinueString(m_Pools[poolIndex]->GetId());
    15728  json.EndString();
    15729 
    15730  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15731  }
    15732  json.EndObject();
    15733  }
    15734  }
    15735 }
    15736 
    15737 #endif // #if VMA_STATS_STRING_ENABLED
    15738 
    15740 // Public interface
    15741 
    15742 VkResult vmaCreateAllocator(
    15743  const VmaAllocatorCreateInfo* pCreateInfo,
    15744  VmaAllocator* pAllocator)
    15745 {
    15746  VMA_ASSERT(pCreateInfo && pAllocator);
    15747  VMA_DEBUG_LOG("vmaCreateAllocator");
    15748  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15749  return (*pAllocator)->Init(pCreateInfo);
    15750 }
    15751 
    15752 void vmaDestroyAllocator(
    15753  VmaAllocator allocator)
    15754 {
    15755  if(allocator != VK_NULL_HANDLE)
    15756  {
    15757  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15758  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15759  vma_delete(&allocationCallbacks, allocator);
    15760  }
    15761 }
    15762 
    15764  VmaAllocator allocator,
    15765  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15766 {
    15767  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15768  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15769 }
    15770 
    15772  VmaAllocator allocator,
    15773  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15774 {
    15775  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15776  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15777 }
    15778 
    15780  VmaAllocator allocator,
    15781  uint32_t memoryTypeIndex,
    15782  VkMemoryPropertyFlags* pFlags)
    15783 {
    15784  VMA_ASSERT(allocator && pFlags);
    15785  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15786  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15787 }
    15788 
    15790  VmaAllocator allocator,
    15791  uint32_t frameIndex)
    15792 {
    15793  VMA_ASSERT(allocator);
    15794  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15795 
    15796  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15797 
    15798  allocator->SetCurrentFrameIndex(frameIndex);
    15799 }
    15800 
    15801 void vmaCalculateStats(
    15802  VmaAllocator allocator,
    15803  VmaStats* pStats)
    15804 {
    15805  VMA_ASSERT(allocator && pStats);
    15806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15807  allocator->CalculateStats(pStats);
    15808 }
    15809 
    15810 #if VMA_STATS_STRING_ENABLED
    15811 
    15812 void vmaBuildStatsString(
    15813  VmaAllocator allocator,
    15814  char** ppStatsString,
    15815  VkBool32 detailedMap)
    15816 {
    15817  VMA_ASSERT(allocator && ppStatsString);
    15818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15819 
    15820  VmaStringBuilder sb(allocator);
    15821  {
    15822  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15823  json.BeginObject();
    15824 
    15825  VmaStats stats;
    15826  allocator->CalculateStats(&stats);
    15827 
    15828  json.WriteString("Total");
    15829  VmaPrintStatInfo(json, stats.total);
    15830 
    15831  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15832  {
    15833  json.BeginString("Heap ");
    15834  json.ContinueString(heapIndex);
    15835  json.EndString();
    15836  json.BeginObject();
    15837 
    15838  json.WriteString("Size");
    15839  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15840 
    15841  json.WriteString("Flags");
    15842  json.BeginArray(true);
    15843  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15844  {
    15845  json.WriteString("DEVICE_LOCAL");
    15846  }
    15847  json.EndArray();
    15848 
    15849  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15850  {
    15851  json.WriteString("Stats");
    15852  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15853  }
    15854 
    15855  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15856  {
    15857  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15858  {
    15859  json.BeginString("Type ");
    15860  json.ContinueString(typeIndex);
    15861  json.EndString();
    15862 
    15863  json.BeginObject();
    15864 
    15865  json.WriteString("Flags");
    15866  json.BeginArray(true);
    15867  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15868  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15869  {
    15870  json.WriteString("DEVICE_LOCAL");
    15871  }
    15872  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15873  {
    15874  json.WriteString("HOST_VISIBLE");
    15875  }
    15876  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15877  {
    15878  json.WriteString("HOST_COHERENT");
    15879  }
    15880  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15881  {
    15882  json.WriteString("HOST_CACHED");
    15883  }
    15884  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15885  {
    15886  json.WriteString("LAZILY_ALLOCATED");
    15887  }
    15888  json.EndArray();
    15889 
    15890  if(stats.memoryType[typeIndex].blockCount > 0)
    15891  {
    15892  json.WriteString("Stats");
    15893  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15894  }
    15895 
    15896  json.EndObject();
    15897  }
    15898  }
    15899 
    15900  json.EndObject();
    15901  }
    15902  if(detailedMap == VK_TRUE)
    15903  {
    15904  allocator->PrintDetailedMap(json);
    15905  }
    15906 
    15907  json.EndObject();
    15908  }
    15909 
    15910  const size_t len = sb.GetLength();
    15911  char* const pChars = vma_new_array(allocator, char, len + 1);
    15912  if(len > 0)
    15913  {
    15914  memcpy(pChars, sb.GetData(), len);
    15915  }
    15916  pChars[len] = '\0';
    15917  *ppStatsString = pChars;
    15918 }
    15919 
    15920 void vmaFreeStatsString(
    15921  VmaAllocator allocator,
    15922  char* pStatsString)
    15923 {
    15924  if(pStatsString != VMA_NULL)
    15925  {
    15926  VMA_ASSERT(allocator);
    15927  size_t len = strlen(pStatsString);
    15928  vma_delete_array(allocator, pStatsString, len + 1);
    15929  }
    15930 }
    15931 
    15932 #endif // #if VMA_STATS_STRING_ENABLED
    15933 
    15934 /*
    15935 This function is not protected by any mutex because it just reads immutable data.
    15936 */
    15937 VkResult vmaFindMemoryTypeIndex(
    15938  VmaAllocator allocator,
    15939  uint32_t memoryTypeBits,
    15940  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15941  uint32_t* pMemoryTypeIndex)
    15942 {
    15943  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15944  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15945  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15946 
    15947  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15948  {
    15949  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15950  }
    15951 
    15952  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15953  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15954 
    15955  // Convert usage to requiredFlags and preferredFlags.
    15956  switch(pAllocationCreateInfo->usage)
    15957  {
    15959  break;
    15961  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15962  {
    15963  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15964  }
    15965  break;
    15967  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15968  break;
    15970  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15971  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15972  {
    15973  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15974  }
    15975  break;
    15977  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15978  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15979  break;
    15980  default:
    15981  break;
    15982  }
    15983 
    15984  *pMemoryTypeIndex = UINT32_MAX;
    15985  uint32_t minCost = UINT32_MAX;
    15986  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15987  memTypeIndex < allocator->GetMemoryTypeCount();
    15988  ++memTypeIndex, memTypeBit <<= 1)
    15989  {
    15990  // This memory type is acceptable according to memoryTypeBits bitmask.
    15991  if((memTypeBit & memoryTypeBits) != 0)
    15992  {
    15993  const VkMemoryPropertyFlags currFlags =
    15994  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15995  // This memory type contains requiredFlags.
    15996  if((requiredFlags & ~currFlags) == 0)
    15997  {
    15998  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15999  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    16000  // Remember memory type with lowest cost.
    16001  if(currCost < minCost)
    16002  {
    16003  *pMemoryTypeIndex = memTypeIndex;
    16004  if(currCost == 0)
    16005  {
    16006  return VK_SUCCESS;
    16007  }
    16008  minCost = currCost;
    16009  }
    16010  }
    16011  }
    16012  }
    16013  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    16014 }
    16015 
    16017  VmaAllocator allocator,
    16018  const VkBufferCreateInfo* pBufferCreateInfo,
    16019  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16020  uint32_t* pMemoryTypeIndex)
    16021 {
    16022  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16023  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    16024  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16025  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16026 
    16027  const VkDevice hDev = allocator->m_hDevice;
    16028  VkBuffer hBuffer = VK_NULL_HANDLE;
    16029  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    16030  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    16031  if(res == VK_SUCCESS)
    16032  {
    16033  VkMemoryRequirements memReq = {};
    16034  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    16035  hDev, hBuffer, &memReq);
    16036 
    16037  res = vmaFindMemoryTypeIndex(
    16038  allocator,
    16039  memReq.memoryTypeBits,
    16040  pAllocationCreateInfo,
    16041  pMemoryTypeIndex);
    16042 
    16043  allocator->GetVulkanFunctions().vkDestroyBuffer(
    16044  hDev, hBuffer, allocator->GetAllocationCallbacks());
    16045  }
    16046  return res;
    16047 }
    16048 
    16050  VmaAllocator allocator,
    16051  const VkImageCreateInfo* pImageCreateInfo,
    16052  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16053  uint32_t* pMemoryTypeIndex)
    16054 {
    16055  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16056  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    16057  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16058  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16059 
    16060  const VkDevice hDev = allocator->m_hDevice;
    16061  VkImage hImage = VK_NULL_HANDLE;
    16062  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    16063  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    16064  if(res == VK_SUCCESS)
    16065  {
    16066  VkMemoryRequirements memReq = {};
    16067  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    16068  hDev, hImage, &memReq);
    16069 
    16070  res = vmaFindMemoryTypeIndex(
    16071  allocator,
    16072  memReq.memoryTypeBits,
    16073  pAllocationCreateInfo,
    16074  pMemoryTypeIndex);
    16075 
    16076  allocator->GetVulkanFunctions().vkDestroyImage(
    16077  hDev, hImage, allocator->GetAllocationCallbacks());
    16078  }
    16079  return res;
    16080 }
    16081 
    16082 VkResult vmaCreatePool(
    16083  VmaAllocator allocator,
    16084  const VmaPoolCreateInfo* pCreateInfo,
    16085  VmaPool* pPool)
    16086 {
    16087  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16088 
    16089  VMA_DEBUG_LOG("vmaCreatePool");
    16090 
    16091  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16092 
    16093  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16094 
    16095 #if VMA_RECORDING_ENABLED
    16096  if(allocator->GetRecorder() != VMA_NULL)
    16097  {
    16098  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16099  }
    16100 #endif
    16101 
    16102  return res;
    16103 }
    16104 
    16105 void vmaDestroyPool(
    16106  VmaAllocator allocator,
    16107  VmaPool pool)
    16108 {
    16109  VMA_ASSERT(allocator);
    16110 
    16111  if(pool == VK_NULL_HANDLE)
    16112  {
    16113  return;
    16114  }
    16115 
    16116  VMA_DEBUG_LOG("vmaDestroyPool");
    16117 
    16118  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16119 
    16120 #if VMA_RECORDING_ENABLED
    16121  if(allocator->GetRecorder() != VMA_NULL)
    16122  {
    16123  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16124  }
    16125 #endif
    16126 
    16127  allocator->DestroyPool(pool);
    16128 }
    16129 
    16130 void vmaGetPoolStats(
    16131  VmaAllocator allocator,
    16132  VmaPool pool,
    16133  VmaPoolStats* pPoolStats)
    16134 {
    16135  VMA_ASSERT(allocator && pool && pPoolStats);
    16136 
    16137  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16138 
    16139  allocator->GetPoolStats(pool, pPoolStats);
    16140 }
    16141 
    16143  VmaAllocator allocator,
    16144  VmaPool pool,
    16145  size_t* pLostAllocationCount)
    16146 {
    16147  VMA_ASSERT(allocator && pool);
    16148 
    16149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16150 
    16151 #if VMA_RECORDING_ENABLED
    16152  if(allocator->GetRecorder() != VMA_NULL)
    16153  {
    16154  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16155  }
    16156 #endif
    16157 
    16158  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16159 }
    16160 
    16161 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16162 {
    16163  VMA_ASSERT(allocator && pool);
    16164 
    16165  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16166 
    16167  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16168 
    16169  return allocator->CheckPoolCorruption(pool);
    16170 }
    16171 
    16172 VkResult vmaAllocateMemory(
    16173  VmaAllocator allocator,
    16174  const VkMemoryRequirements* pVkMemoryRequirements,
    16175  const VmaAllocationCreateInfo* pCreateInfo,
    16176  VmaAllocation* pAllocation,
    16177  VmaAllocationInfo* pAllocationInfo)
    16178 {
    16179  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16180 
    16181  VMA_DEBUG_LOG("vmaAllocateMemory");
    16182 
    16183  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16184 
    16185  VkResult result = allocator->AllocateMemory(
    16186  *pVkMemoryRequirements,
    16187  false, // requiresDedicatedAllocation
    16188  false, // prefersDedicatedAllocation
    16189  VK_NULL_HANDLE, // dedicatedBuffer
    16190  VK_NULL_HANDLE, // dedicatedImage
    16191  *pCreateInfo,
    16192  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16193  1, // allocationCount
    16194  pAllocation);
    16195 
    16196 #if VMA_RECORDING_ENABLED
    16197  if(allocator->GetRecorder() != VMA_NULL)
    16198  {
    16199  allocator->GetRecorder()->RecordAllocateMemory(
    16200  allocator->GetCurrentFrameIndex(),
    16201  *pVkMemoryRequirements,
    16202  *pCreateInfo,
    16203  *pAllocation);
    16204  }
    16205 #endif
    16206 
    16207  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16208  {
    16209  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16210  }
    16211 
    16212  return result;
    16213 }
    16214 
    16215 VkResult vmaAllocateMemoryPages(
    16216  VmaAllocator allocator,
    16217  const VkMemoryRequirements* pVkMemoryRequirements,
    16218  const VmaAllocationCreateInfo* pCreateInfo,
    16219  size_t allocationCount,
    16220  VmaAllocation* pAllocations,
    16221  VmaAllocationInfo* pAllocationInfo)
    16222 {
    16223  if(allocationCount == 0)
    16224  {
    16225  return VK_SUCCESS;
    16226  }
    16227 
    16228  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16229 
    16230  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16231 
    16232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16233 
    16234  VkResult result = allocator->AllocateMemory(
    16235  *pVkMemoryRequirements,
    16236  false, // requiresDedicatedAllocation
    16237  false, // prefersDedicatedAllocation
    16238  VK_NULL_HANDLE, // dedicatedBuffer
    16239  VK_NULL_HANDLE, // dedicatedImage
    16240  *pCreateInfo,
    16241  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16242  allocationCount,
    16243  pAllocations);
    16244 
    16245 #if VMA_RECORDING_ENABLED
    16246  if(allocator->GetRecorder() != VMA_NULL)
    16247  {
    16248  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16249  allocator->GetCurrentFrameIndex(),
    16250  *pVkMemoryRequirements,
    16251  *pCreateInfo,
    16252  (uint64_t)allocationCount,
    16253  pAllocations);
    16254  }
    16255 #endif
    16256 
    16257  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16258  {
    16259  for(size_t i = 0; i < allocationCount; ++i)
    16260  {
    16261  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16262  }
    16263  }
    16264 
    16265  return result;
    16266 }
    16267 
    16269  VmaAllocator allocator,
    16270  VkBuffer buffer,
    16271  const VmaAllocationCreateInfo* pCreateInfo,
    16272  VmaAllocation* pAllocation,
    16273  VmaAllocationInfo* pAllocationInfo)
    16274 {
    16275  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16276 
    16277  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16278 
    16279  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16280 
    16281  VkMemoryRequirements vkMemReq = {};
    16282  bool requiresDedicatedAllocation = false;
    16283  bool prefersDedicatedAllocation = false;
    16284  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16285  requiresDedicatedAllocation,
    16286  prefersDedicatedAllocation);
    16287 
    16288  VkResult result = allocator->AllocateMemory(
    16289  vkMemReq,
    16290  requiresDedicatedAllocation,
    16291  prefersDedicatedAllocation,
    16292  buffer, // dedicatedBuffer
    16293  VK_NULL_HANDLE, // dedicatedImage
    16294  *pCreateInfo,
    16295  VMA_SUBALLOCATION_TYPE_BUFFER,
    16296  1, // allocationCount
    16297  pAllocation);
    16298 
    16299 #if VMA_RECORDING_ENABLED
    16300  if(allocator->GetRecorder() != VMA_NULL)
    16301  {
    16302  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16303  allocator->GetCurrentFrameIndex(),
    16304  vkMemReq,
    16305  requiresDedicatedAllocation,
    16306  prefersDedicatedAllocation,
    16307  *pCreateInfo,
    16308  *pAllocation);
    16309  }
    16310 #endif
    16311 
    16312  if(pAllocationInfo && result == VK_SUCCESS)
    16313  {
    16314  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16315  }
    16316 
    16317  return result;
    16318 }
    16319 
    16320 VkResult vmaAllocateMemoryForImage(
    16321  VmaAllocator allocator,
    16322  VkImage image,
    16323  const VmaAllocationCreateInfo* pCreateInfo,
    16324  VmaAllocation* pAllocation,
    16325  VmaAllocationInfo* pAllocationInfo)
    16326 {
    16327  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16328 
    16329  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16330 
    16331  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16332 
    16333  VkMemoryRequirements vkMemReq = {};
    16334  bool requiresDedicatedAllocation = false;
    16335  bool prefersDedicatedAllocation = false;
    16336  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16337  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16338 
    16339  VkResult result = allocator->AllocateMemory(
    16340  vkMemReq,
    16341  requiresDedicatedAllocation,
    16342  prefersDedicatedAllocation,
    16343  VK_NULL_HANDLE, // dedicatedBuffer
    16344  image, // dedicatedImage
    16345  *pCreateInfo,
    16346  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16347  1, // allocationCount
    16348  pAllocation);
    16349 
    16350 #if VMA_RECORDING_ENABLED
    16351  if(allocator->GetRecorder() != VMA_NULL)
    16352  {
    16353  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16354  allocator->GetCurrentFrameIndex(),
    16355  vkMemReq,
    16356  requiresDedicatedAllocation,
    16357  prefersDedicatedAllocation,
    16358  *pCreateInfo,
    16359  *pAllocation);
    16360  }
    16361 #endif
    16362 
    16363  if(pAllocationInfo && result == VK_SUCCESS)
    16364  {
    16365  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16366  }
    16367 
    16368  return result;
    16369 }
    16370 
    16371 void vmaFreeMemory(
    16372  VmaAllocator allocator,
    16373  VmaAllocation allocation)
    16374 {
    16375  VMA_ASSERT(allocator);
    16376 
    16377  if(allocation == VK_NULL_HANDLE)
    16378  {
    16379  return;
    16380  }
    16381 
    16382  VMA_DEBUG_LOG("vmaFreeMemory");
    16383 
    16384  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16385 
    16386 #if VMA_RECORDING_ENABLED
    16387  if(allocator->GetRecorder() != VMA_NULL)
    16388  {
    16389  allocator->GetRecorder()->RecordFreeMemory(
    16390  allocator->GetCurrentFrameIndex(),
    16391  allocation);
    16392  }
    16393 #endif
    16394 
    16395  allocator->FreeMemory(
    16396  1, // allocationCount
    16397  &allocation);
    16398 }
    16399 
    16400 void vmaFreeMemoryPages(
    16401  VmaAllocator allocator,
    16402  size_t allocationCount,
    16403  VmaAllocation* pAllocations)
    16404 {
    16405  if(allocationCount == 0)
    16406  {
    16407  return;
    16408  }
    16409 
    16410  VMA_ASSERT(allocator);
    16411 
    16412  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16413 
    16414  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16415 
    16416 #if VMA_RECORDING_ENABLED
    16417  if(allocator->GetRecorder() != VMA_NULL)
    16418  {
    16419  allocator->GetRecorder()->RecordFreeMemoryPages(
    16420  allocator->GetCurrentFrameIndex(),
    16421  (uint64_t)allocationCount,
    16422  pAllocations);
    16423  }
    16424 #endif
    16425 
    16426  allocator->FreeMemory(allocationCount, pAllocations);
    16427 }
    16428 
    16429 VkResult vmaResizeAllocation(
    16430  VmaAllocator allocator,
    16431  VmaAllocation allocation,
    16432  VkDeviceSize newSize)
    16433 {
    16434  VMA_ASSERT(allocator && allocation);
    16435 
    16436  VMA_DEBUG_LOG("vmaResizeAllocation");
    16437 
    16438  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16439 
    16440 #if VMA_RECORDING_ENABLED
    16441  if(allocator->GetRecorder() != VMA_NULL)
    16442  {
    16443  allocator->GetRecorder()->RecordResizeAllocation(
    16444  allocator->GetCurrentFrameIndex(),
    16445  allocation,
    16446  newSize);
    16447  }
    16448 #endif
    16449 
    16450  return allocator->ResizeAllocation(allocation, newSize);
    16451 }
    16452 
    16454  VmaAllocator allocator,
    16455  VmaAllocation allocation,
    16456  VmaAllocationInfo* pAllocationInfo)
    16457 {
    16458  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16459 
    16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16461 
    16462 #if VMA_RECORDING_ENABLED
    16463  if(allocator->GetRecorder() != VMA_NULL)
    16464  {
    16465  allocator->GetRecorder()->RecordGetAllocationInfo(
    16466  allocator->GetCurrentFrameIndex(),
    16467  allocation);
    16468  }
    16469 #endif
    16470 
    16471  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16472 }
    16473 
    16474 VkBool32 vmaTouchAllocation(
    16475  VmaAllocator allocator,
    16476  VmaAllocation allocation)
    16477 {
    16478  VMA_ASSERT(allocator && allocation);
    16479 
    16480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16481 
    16482 #if VMA_RECORDING_ENABLED
    16483  if(allocator->GetRecorder() != VMA_NULL)
    16484  {
    16485  allocator->GetRecorder()->RecordTouchAllocation(
    16486  allocator->GetCurrentFrameIndex(),
    16487  allocation);
    16488  }
    16489 #endif
    16490 
    16491  return allocator->TouchAllocation(allocation);
    16492 }
    16493 
    16495  VmaAllocator allocator,
    16496  VmaAllocation allocation,
    16497  void* pUserData)
    16498 {
    16499  VMA_ASSERT(allocator && allocation);
    16500 
    16501  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16502 
    16503  allocation->SetUserData(allocator, pUserData);
    16504 
    16505 #if VMA_RECORDING_ENABLED
    16506  if(allocator->GetRecorder() != VMA_NULL)
    16507  {
    16508  allocator->GetRecorder()->RecordSetAllocationUserData(
    16509  allocator->GetCurrentFrameIndex(),
    16510  allocation,
    16511  pUserData);
    16512  }
    16513 #endif
    16514 }
    16515 
    16517  VmaAllocator allocator,
    16518  VmaAllocation* pAllocation)
    16519 {
    16520  VMA_ASSERT(allocator && pAllocation);
    16521 
    16522  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16523 
    16524  allocator->CreateLostAllocation(pAllocation);
    16525 
    16526 #if VMA_RECORDING_ENABLED
    16527  if(allocator->GetRecorder() != VMA_NULL)
    16528  {
    16529  allocator->GetRecorder()->RecordCreateLostAllocation(
    16530  allocator->GetCurrentFrameIndex(),
    16531  *pAllocation);
    16532  }
    16533 #endif
    16534 }
    16535 
    16536 VkResult vmaMapMemory(
    16537  VmaAllocator allocator,
    16538  VmaAllocation allocation,
    16539  void** ppData)
    16540 {
    16541  VMA_ASSERT(allocator && allocation && ppData);
    16542 
    16543  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16544 
    16545  VkResult res = allocator->Map(allocation, ppData);
    16546 
    16547 #if VMA_RECORDING_ENABLED
    16548  if(allocator->GetRecorder() != VMA_NULL)
    16549  {
    16550  allocator->GetRecorder()->RecordMapMemory(
    16551  allocator->GetCurrentFrameIndex(),
    16552  allocation);
    16553  }
    16554 #endif
    16555 
    16556  return res;
    16557 }
    16558 
    16559 void vmaUnmapMemory(
    16560  VmaAllocator allocator,
    16561  VmaAllocation allocation)
    16562 {
    16563  VMA_ASSERT(allocator && allocation);
    16564 
    16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16566 
    16567 #if VMA_RECORDING_ENABLED
    16568  if(allocator->GetRecorder() != VMA_NULL)
    16569  {
    16570  allocator->GetRecorder()->RecordUnmapMemory(
    16571  allocator->GetCurrentFrameIndex(),
    16572  allocation);
    16573  }
    16574 #endif
    16575 
    16576  allocator->Unmap(allocation);
    16577 }
    16578 
    16579 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16580 {
    16581  VMA_ASSERT(allocator && allocation);
    16582 
    16583  VMA_DEBUG_LOG("vmaFlushAllocation");
    16584 
    16585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16586 
    16587  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16588 
    16589 #if VMA_RECORDING_ENABLED
    16590  if(allocator->GetRecorder() != VMA_NULL)
    16591  {
    16592  allocator->GetRecorder()->RecordFlushAllocation(
    16593  allocator->GetCurrentFrameIndex(),
    16594  allocation, offset, size);
    16595  }
    16596 #endif
    16597 }
    16598 
    16599 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16600 {
    16601  VMA_ASSERT(allocator && allocation);
    16602 
    16603  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16604 
    16605  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16606 
    16607  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16608 
    16609 #if VMA_RECORDING_ENABLED
    16610  if(allocator->GetRecorder() != VMA_NULL)
    16611  {
    16612  allocator->GetRecorder()->RecordInvalidateAllocation(
    16613  allocator->GetCurrentFrameIndex(),
    16614  allocation, offset, size);
    16615  }
    16616 #endif
    16617 }
    16618 
    16619 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16620 {
    16621  VMA_ASSERT(allocator);
    16622 
    16623  VMA_DEBUG_LOG("vmaCheckCorruption");
    16624 
    16625  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16626 
    16627  return allocator->CheckCorruption(memoryTypeBits);
    16628 }
    16629 
    16630 VkResult vmaDefragment(
    16631  VmaAllocator allocator,
    16632  VmaAllocation* pAllocations,
    16633  size_t allocationCount,
    16634  VkBool32* pAllocationsChanged,
    16635  const VmaDefragmentationInfo *pDefragmentationInfo,
    16636  VmaDefragmentationStats* pDefragmentationStats)
    16637 {
    16638  // Deprecated interface, reimplemented using new one.
    16639 
    16640  VmaDefragmentationInfo2 info2 = {};
    16641  info2.allocationCount = (uint32_t)allocationCount;
    16642  info2.pAllocations = pAllocations;
    16643  info2.pAllocationsChanged = pAllocationsChanged;
    16644  if(pDefragmentationInfo != VMA_NULL)
    16645  {
    16646  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16647  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16648  }
    16649  else
    16650  {
    16651  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16652  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16653  }
    16654  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16655 
    16657  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16658  if(res == VK_NOT_READY)
    16659  {
    16660  res = vmaDefragmentationEnd( allocator, ctx);
    16661  }
    16662  return res;
    16663 }
    16664 
    16665 VkResult vmaDefragmentationBegin(
    16666  VmaAllocator allocator,
    16667  const VmaDefragmentationInfo2* pInfo,
    16668  VmaDefragmentationStats* pStats,
    16669  VmaDefragmentationContext *pContext)
    16670 {
    16671  VMA_ASSERT(allocator && pInfo && pContext);
    16672 
    16673  // Degenerate case: Nothing to defragment.
    16674  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16675  {
    16676  return VK_SUCCESS;
    16677  }
    16678 
    16679  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16680  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16681  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16682  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16683 
    16684  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16685 
    16686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16687 
    16688  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16689 
    16690 #if VMA_RECORDING_ENABLED
    16691  if(allocator->GetRecorder() != VMA_NULL)
    16692  {
    16693  allocator->GetRecorder()->RecordDefragmentationBegin(
    16694  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16695  }
    16696 #endif
    16697 
    16698  return res;
    16699 }
    16700 
    16701 VkResult vmaDefragmentationEnd(
    16702  VmaAllocator allocator,
    16703  VmaDefragmentationContext context)
    16704 {
    16705  VMA_ASSERT(allocator);
    16706 
    16707  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16708 
    16709  if(context != VK_NULL_HANDLE)
    16710  {
    16711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16712 
    16713 #if VMA_RECORDING_ENABLED
    16714  if(allocator->GetRecorder() != VMA_NULL)
    16715  {
    16716  allocator->GetRecorder()->RecordDefragmentationEnd(
    16717  allocator->GetCurrentFrameIndex(), context);
    16718  }
    16719 #endif
    16720 
    16721  return allocator->DefragmentationEnd(context);
    16722  }
    16723  else
    16724  {
    16725  return VK_SUCCESS;
    16726  }
    16727 }
    16728 
    16729 VkResult vmaBindBufferMemory(
    16730  VmaAllocator allocator,
    16731  VmaAllocation allocation,
    16732  VkBuffer buffer)
    16733 {
    16734  VMA_ASSERT(allocator && allocation && buffer);
    16735 
    16736  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16737 
    16738  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16739 
    16740  return allocator->BindBufferMemory(allocation, buffer);
    16741 }
    16742 
    16743 VkResult vmaBindImageMemory(
    16744  VmaAllocator allocator,
    16745  VmaAllocation allocation,
    16746  VkImage image)
    16747 {
    16748  VMA_ASSERT(allocator && allocation && image);
    16749 
    16750  VMA_DEBUG_LOG("vmaBindImageMemory");
    16751 
    16752  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16753 
    16754  return allocator->BindImageMemory(allocation, image);
    16755 }
    16756 
    16757 VkResult vmaCreateBuffer(
    16758  VmaAllocator allocator,
    16759  const VkBufferCreateInfo* pBufferCreateInfo,
    16760  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16761  VkBuffer* pBuffer,
    16762  VmaAllocation* pAllocation,
    16763  VmaAllocationInfo* pAllocationInfo)
    16764 {
    16765  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16766 
    16767  if(pBufferCreateInfo->size == 0)
    16768  {
    16769  return VK_ERROR_VALIDATION_FAILED_EXT;
    16770  }
    16771 
    16772  VMA_DEBUG_LOG("vmaCreateBuffer");
    16773 
    16774  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16775 
    16776  *pBuffer = VK_NULL_HANDLE;
    16777  *pAllocation = VK_NULL_HANDLE;
    16778 
    16779  // 1. Create VkBuffer.
    16780  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16781  allocator->m_hDevice,
    16782  pBufferCreateInfo,
    16783  allocator->GetAllocationCallbacks(),
    16784  pBuffer);
    16785  if(res >= 0)
    16786  {
    16787  // 2. vkGetBufferMemoryRequirements.
    16788  VkMemoryRequirements vkMemReq = {};
    16789  bool requiresDedicatedAllocation = false;
    16790  bool prefersDedicatedAllocation = false;
    16791  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16792  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16793 
    16794  // Make sure alignment requirements for specific buffer usages reported
    16795  // in Physical Device Properties are included in alignment reported by memory requirements.
    16796  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16797  {
    16798  VMA_ASSERT(vkMemReq.alignment %
    16799  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16800  }
    16801  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16802  {
    16803  VMA_ASSERT(vkMemReq.alignment %
    16804  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16805  }
    16806  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16807  {
    16808  VMA_ASSERT(vkMemReq.alignment %
    16809  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16810  }
    16811 
    16812  // 3. Allocate memory using allocator.
    16813  res = allocator->AllocateMemory(
    16814  vkMemReq,
    16815  requiresDedicatedAllocation,
    16816  prefersDedicatedAllocation,
    16817  *pBuffer, // dedicatedBuffer
    16818  VK_NULL_HANDLE, // dedicatedImage
    16819  *pAllocationCreateInfo,
    16820  VMA_SUBALLOCATION_TYPE_BUFFER,
    16821  1, // allocationCount
    16822  pAllocation);
    16823 
    16824 #if VMA_RECORDING_ENABLED
    16825  if(allocator->GetRecorder() != VMA_NULL)
    16826  {
    16827  allocator->GetRecorder()->RecordCreateBuffer(
    16828  allocator->GetCurrentFrameIndex(),
    16829  *pBufferCreateInfo,
    16830  *pAllocationCreateInfo,
    16831  *pAllocation);
    16832  }
    16833 #endif
    16834 
    16835  if(res >= 0)
    16836  {
    16837  // 3. Bind buffer with memory.
    16838  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16839  {
    16840  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16841  }
    16842  if(res >= 0)
    16843  {
    16844  // All steps succeeded.
    16845  #if VMA_STATS_STRING_ENABLED
    16846  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16847  #endif
    16848  if(pAllocationInfo != VMA_NULL)
    16849  {
    16850  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16851  }
    16852 
    16853  return VK_SUCCESS;
    16854  }
    16855  allocator->FreeMemory(
    16856  1, // allocationCount
    16857  pAllocation);
    16858  *pAllocation = VK_NULL_HANDLE;
    16859  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16860  *pBuffer = VK_NULL_HANDLE;
    16861  return res;
    16862  }
    16863  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16864  *pBuffer = VK_NULL_HANDLE;
    16865  return res;
    16866  }
    16867  return res;
    16868 }
    16869 
    16870 void vmaDestroyBuffer(
    16871  VmaAllocator allocator,
    16872  VkBuffer buffer,
    16873  VmaAllocation allocation)
    16874 {
    16875  VMA_ASSERT(allocator);
    16876 
    16877  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16878  {
    16879  return;
    16880  }
    16881 
    16882  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16883 
    16884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16885 
    16886 #if VMA_RECORDING_ENABLED
    16887  if(allocator->GetRecorder() != VMA_NULL)
    16888  {
    16889  allocator->GetRecorder()->RecordDestroyBuffer(
    16890  allocator->GetCurrentFrameIndex(),
    16891  allocation);
    16892  }
    16893 #endif
    16894 
    16895  if(buffer != VK_NULL_HANDLE)
    16896  {
    16897  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16898  }
    16899 
    16900  if(allocation != VK_NULL_HANDLE)
    16901  {
    16902  allocator->FreeMemory(
    16903  1, // allocationCount
    16904  &allocation);
    16905  }
    16906 }
    16907 
    16908 VkResult vmaCreateImage(
    16909  VmaAllocator allocator,
    16910  const VkImageCreateInfo* pImageCreateInfo,
    16911  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16912  VkImage* pImage,
    16913  VmaAllocation* pAllocation,
    16914  VmaAllocationInfo* pAllocationInfo)
    16915 {
    16916  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16917 
    16918  if(pImageCreateInfo->extent.width == 0 ||
    16919  pImageCreateInfo->extent.height == 0 ||
    16920  pImageCreateInfo->extent.depth == 0 ||
    16921  pImageCreateInfo->mipLevels == 0 ||
    16922  pImageCreateInfo->arrayLayers == 0)
    16923  {
    16924  return VK_ERROR_VALIDATION_FAILED_EXT;
    16925  }
    16926 
    16927  VMA_DEBUG_LOG("vmaCreateImage");
    16928 
    16929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16930 
    16931  *pImage = VK_NULL_HANDLE;
    16932  *pAllocation = VK_NULL_HANDLE;
    16933 
    16934  // 1. Create VkImage.
    16935  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16936  allocator->m_hDevice,
    16937  pImageCreateInfo,
    16938  allocator->GetAllocationCallbacks(),
    16939  pImage);
    16940  if(res >= 0)
    16941  {
    16942  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16943  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16944  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16945 
    16946  // 2. Allocate memory using allocator.
    16947  VkMemoryRequirements vkMemReq = {};
    16948  bool requiresDedicatedAllocation = false;
    16949  bool prefersDedicatedAllocation = false;
    16950  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16951  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16952 
    16953  res = allocator->AllocateMemory(
    16954  vkMemReq,
    16955  requiresDedicatedAllocation,
    16956  prefersDedicatedAllocation,
    16957  VK_NULL_HANDLE, // dedicatedBuffer
    16958  *pImage, // dedicatedImage
    16959  *pAllocationCreateInfo,
    16960  suballocType,
    16961  1, // allocationCount
    16962  pAllocation);
    16963 
    16964 #if VMA_RECORDING_ENABLED
    16965  if(allocator->GetRecorder() != VMA_NULL)
    16966  {
    16967  allocator->GetRecorder()->RecordCreateImage(
    16968  allocator->GetCurrentFrameIndex(),
    16969  *pImageCreateInfo,
    16970  *pAllocationCreateInfo,
    16971  *pAllocation);
    16972  }
    16973 #endif
    16974 
    16975  if(res >= 0)
    16976  {
    16977  // 3. Bind image with memory.
    16978  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16979  {
    16980  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16981  }
    16982  if(res >= 0)
    16983  {
    16984  // All steps succeeded.
    16985  #if VMA_STATS_STRING_ENABLED
    16986  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16987  #endif
    16988  if(pAllocationInfo != VMA_NULL)
    16989  {
    16990  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16991  }
    16992 
    16993  return VK_SUCCESS;
    16994  }
    16995  allocator->FreeMemory(
    16996  1, // allocationCount
    16997  pAllocation);
    16998  *pAllocation = VK_NULL_HANDLE;
    16999  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17000  *pImage = VK_NULL_HANDLE;
    17001  return res;
    17002  }
    17003  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17004  *pImage = VK_NULL_HANDLE;
    17005  return res;
    17006  }
    17007  return res;
    17008 }
    17009 
    17010 void vmaDestroyImage(
    17011  VmaAllocator allocator,
    17012  VkImage image,
    17013  VmaAllocation allocation)
    17014 {
    17015  VMA_ASSERT(allocator);
    17016 
    17017  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    17018  {
    17019  return;
    17020  }
    17021 
    17022  VMA_DEBUG_LOG("vmaDestroyImage");
    17023 
    17024  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    17025 
    17026 #if VMA_RECORDING_ENABLED
    17027  if(allocator->GetRecorder() != VMA_NULL)
    17028  {
    17029  allocator->GetRecorder()->RecordDestroyImage(
    17030  allocator->GetCurrentFrameIndex(),
    17031  allocation);
    17032  }
    17033 #endif
    17034 
    17035  if(image != VK_NULL_HANDLE)
    17036  {
    17037  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    17038  }
    17039  if(allocation != VK_NULL_HANDLE)
    17040  {
    17041  allocator->FreeMemory(
    17042  1, // allocationCount
    17043  &allocation);
    17044  }
    17045 }
    17046 
    17047 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1764
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2064
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1677 /*
    1678 Define this macro to 0/1 to disable/enable support for recording functionality,
    1679 available through VmaAllocatorCreateInfo::pRecordSettings.
    1680 */
    1681 #ifndef VMA_RECORDING_ENABLED
    1682  #ifdef _WIN32
    1683  #define VMA_RECORDING_ENABLED 1
    1684  #else
    1685  #define VMA_RECORDING_ENABLED 0
    1686  #endif
    1687 #endif
    1688 
    1689 #ifndef NOMINMAX
    1690  #define NOMINMAX // For windows.h
    1691 #endif
    1692 
    1693 #ifndef VULKAN_H_
    1694  #include <vulkan/vulkan.h>
    1695 #endif
    1696 
    1697 #if VMA_RECORDING_ENABLED
    1698  #include <windows.h>
    1699 #endif
    1700 
    1701 #if !defined(VMA_DEDICATED_ALLOCATION)
    1702  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1703  #define VMA_DEDICATED_ALLOCATION 1
    1704  #else
    1705  #define VMA_DEDICATED_ALLOCATION 0
    1706  #endif
    1707 #endif
    1708 
    1718 VK_DEFINE_HANDLE(VmaAllocator)
    1719 
    1720 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1722  VmaAllocator allocator,
    1723  uint32_t memoryType,
    1724  VkDeviceMemory memory,
    1725  VkDeviceSize size);
    1727 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1728  VmaAllocator allocator,
    1729  uint32_t memoryType,
    1730  VkDeviceMemory memory,
    1731  VkDeviceSize size);
    1732 
    1746 
    1776 
    1779 typedef VkFlags VmaAllocatorCreateFlags;
    1780 
    1785 typedef struct VmaVulkanFunctions {
    1786  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1787  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1788  PFN_vkAllocateMemory vkAllocateMemory;
    1789  PFN_vkFreeMemory vkFreeMemory;
    1790  PFN_vkMapMemory vkMapMemory;
    1791  PFN_vkUnmapMemory vkUnmapMemory;
    1792  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1793  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1794  PFN_vkBindBufferMemory vkBindBufferMemory;
    1795  PFN_vkBindImageMemory vkBindImageMemory;
    1796  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1797  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1798  PFN_vkCreateBuffer vkCreateBuffer;
    1799  PFN_vkDestroyBuffer vkDestroyBuffer;
    1800  PFN_vkCreateImage vkCreateImage;
    1801  PFN_vkDestroyImage vkDestroyImage;
    1802  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1803 #if VMA_DEDICATED_ALLOCATION
    1804  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1805  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1806 #endif
    1808 
    1810 typedef enum VmaRecordFlagBits {
    1817 
    1820 typedef VkFlags VmaRecordFlags;
    1821 
    1823 typedef struct VmaRecordSettings
    1824 {
    1834  const char* pFilePath;
    1836 
    1839 {
    1843 
    1844  VkPhysicalDevice physicalDevice;
    1846 
    1847  VkDevice device;
    1849 
    1852 
    1853  const VkAllocationCallbacks* pAllocationCallbacks;
    1855 
    1895  const VkDeviceSize* pHeapSizeLimit;
    1916 
    1918 VkResult vmaCreateAllocator(
    1919  const VmaAllocatorCreateInfo* pCreateInfo,
    1920  VmaAllocator* pAllocator);
    1921 
    1923 void vmaDestroyAllocator(
    1924  VmaAllocator allocator);
    1925 
    1931  VmaAllocator allocator,
    1932  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1933 
    1939  VmaAllocator allocator,
    1940  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1941 
    1949  VmaAllocator allocator,
    1950  uint32_t memoryTypeIndex,
    1951  VkMemoryPropertyFlags* pFlags);
    1952 
    1962  VmaAllocator allocator,
    1963  uint32_t frameIndex);
    1964 
    1967 typedef struct VmaStatInfo
    1968 {
    1970  uint32_t blockCount;
    1976  VkDeviceSize usedBytes;
    1978  VkDeviceSize unusedBytes;
    1981 } VmaStatInfo;
    1982 
    1984 typedef struct VmaStats
    1985 {
    1986  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1987  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1989 } VmaStats;
    1990 
    1992 void vmaCalculateStats(
    1993  VmaAllocator allocator,
    1994  VmaStats* pStats);
    1995 
    1996 #ifndef VMA_STATS_STRING_ENABLED
    1997 #define VMA_STATS_STRING_ENABLED 1
    1998 #endif
    1999 
    2000 #if VMA_STATS_STRING_ENABLED
    2001 
    2003 
    2005 void vmaBuildStatsString(
    2006  VmaAllocator allocator,
    2007  char** ppStatsString,
    2008  VkBool32 detailedMap);
    2009 
    2010 void vmaFreeStatsString(
    2011  VmaAllocator allocator,
    2012  char* pStatsString);
    2013 
    2014 #endif // #if VMA_STATS_STRING_ENABLED
    2015 
    2024 VK_DEFINE_HANDLE(VmaPool)
    2025 
    2026 typedef enum VmaMemoryUsage
    2027 {
    2076 } VmaMemoryUsage;
    2077 
    2087 
    2148 
    2164 
    2174 
    2181 
    2185 
    2187 {
    2200  VkMemoryPropertyFlags requiredFlags;
    2205  VkMemoryPropertyFlags preferredFlags;
    2213  uint32_t memoryTypeBits;
    2226  void* pUserData;
    2228 
    2245 VkResult vmaFindMemoryTypeIndex(
    2246  VmaAllocator allocator,
    2247  uint32_t memoryTypeBits,
    2248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2249  uint32_t* pMemoryTypeIndex);
    2250 
    2264  VmaAllocator allocator,
    2265  const VkBufferCreateInfo* pBufferCreateInfo,
    2266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2267  uint32_t* pMemoryTypeIndex);
    2268 
    2282  VmaAllocator allocator,
    2283  const VkImageCreateInfo* pImageCreateInfo,
    2284  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2285  uint32_t* pMemoryTypeIndex);
    2286 
    2307 
    2324 
    2335 
    2341 
    2344 typedef VkFlags VmaPoolCreateFlags;
    2345 
    2348 typedef struct VmaPoolCreateInfo {
    2363  VkDeviceSize blockSize;
    2392 
    2395 typedef struct VmaPoolStats {
    2398  VkDeviceSize size;
    2401  VkDeviceSize unusedSize;
    2414  VkDeviceSize unusedRangeSizeMax;
    2417  size_t blockCount;
    2418 } VmaPoolStats;
    2419 
    2426 VkResult vmaCreatePool(
    2427  VmaAllocator allocator,
    2428  const VmaPoolCreateInfo* pCreateInfo,
    2429  VmaPool* pPool);
    2430 
    2433 void vmaDestroyPool(
    2434  VmaAllocator allocator,
    2435  VmaPool pool);
    2436 
    2443 void vmaGetPoolStats(
    2444  VmaAllocator allocator,
    2445  VmaPool pool,
    2446  VmaPoolStats* pPoolStats);
    2447 
    2455  VmaAllocator allocator,
    2456  VmaPool pool,
    2457  size_t* pLostAllocationCount);
    2458 
    2473 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2474 
    2499 VK_DEFINE_HANDLE(VmaAllocation)
    2500 
    2501 
    2503 typedef struct VmaAllocationInfo {
    2508  uint32_t memoryType;
    2517  VkDeviceMemory deviceMemory;
    2522  VkDeviceSize offset;
    2527  VkDeviceSize size;
    2541  void* pUserData;
    2543 
    2554 VkResult vmaAllocateMemory(
    2555  VmaAllocator allocator,
    2556  const VkMemoryRequirements* pVkMemoryRequirements,
    2557  const VmaAllocationCreateInfo* pCreateInfo,
    2558  VmaAllocation* pAllocation,
    2559  VmaAllocationInfo* pAllocationInfo);
    2560 
    2580 VkResult vmaAllocateMemoryPages(
    2581  VmaAllocator allocator,
    2582  const VkMemoryRequirements* pVkMemoryRequirements,
    2583  const VmaAllocationCreateInfo* pCreateInfo,
    2584  size_t allocationCount,
    2585  VmaAllocation* pAllocations,
    2586  VmaAllocationInfo* pAllocationInfo);
    2587 
    2595  VmaAllocator allocator,
    2596  VkBuffer buffer,
    2597  const VmaAllocationCreateInfo* pCreateInfo,
    2598  VmaAllocation* pAllocation,
    2599  VmaAllocationInfo* pAllocationInfo);
    2600 
    2602 VkResult vmaAllocateMemoryForImage(
    2603  VmaAllocator allocator,
    2604  VkImage image,
    2605  const VmaAllocationCreateInfo* pCreateInfo,
    2606  VmaAllocation* pAllocation,
    2607  VmaAllocationInfo* pAllocationInfo);
    2608 
    2613 void vmaFreeMemory(
    2614  VmaAllocator allocator,
    2615  VmaAllocation allocation);
    2616 
    2627 void vmaFreeMemoryPages(
    2628  VmaAllocator allocator,
    2629  size_t allocationCount,
    2630  VmaAllocation* pAllocations);
    2631 
    2652 VkResult vmaResizeAllocation(
    2653  VmaAllocator allocator,
    2654  VmaAllocation allocation,
    2655  VkDeviceSize newSize);
    2656 
    2674  VmaAllocator allocator,
    2675  VmaAllocation allocation,
    2676  VmaAllocationInfo* pAllocationInfo);
    2677 
    2692 VkBool32 vmaTouchAllocation(
    2693  VmaAllocator allocator,
    2694  VmaAllocation allocation);
    2695 
    2710  VmaAllocator allocator,
    2711  VmaAllocation allocation,
    2712  void* pUserData);
    2713 
    2725  VmaAllocator allocator,
    2726  VmaAllocation* pAllocation);
    2727 
    2762 VkResult vmaMapMemory(
    2763  VmaAllocator allocator,
    2764  VmaAllocation allocation,
    2765  void** ppData);
    2766 
    2771 void vmaUnmapMemory(
    2772  VmaAllocator allocator,
    2773  VmaAllocation allocation);
    2774 
    2791 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2792 
    2809 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2810 
    2827 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2828 
    2835 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2836 
    2837 typedef enum VmaDefragmentationFlagBits {
    2841 typedef VkFlags VmaDefragmentationFlags;
    2842 
    2847 typedef struct VmaDefragmentationInfo2 {
    2871  uint32_t poolCount;
    2892  VkDeviceSize maxCpuBytesToMove;
    2902  VkDeviceSize maxGpuBytesToMove;
    2916  VkCommandBuffer commandBuffer;
    2918 
    2923 typedef struct VmaDefragmentationInfo {
    2928  VkDeviceSize maxBytesToMove;
    2935 
    2937 typedef struct VmaDefragmentationStats {
    2939  VkDeviceSize bytesMoved;
    2941  VkDeviceSize bytesFreed;
    2947 
    2977 VkResult vmaDefragmentationBegin(
    2978  VmaAllocator allocator,
    2979  const VmaDefragmentationInfo2* pInfo,
    2980  VmaDefragmentationStats* pStats,
    2981  VmaDefragmentationContext *pContext);
    2982 
    2988 VkResult vmaDefragmentationEnd(
    2989  VmaAllocator allocator,
    2990  VmaDefragmentationContext context);
    2991 
    3032 VkResult vmaDefragment(
    3033  VmaAllocator allocator,
    3034  VmaAllocation* pAllocations,
    3035  size_t allocationCount,
    3036  VkBool32* pAllocationsChanged,
    3037  const VmaDefragmentationInfo *pDefragmentationInfo,
    3038  VmaDefragmentationStats* pDefragmentationStats);
    3039 
    3052 VkResult vmaBindBufferMemory(
    3053  VmaAllocator allocator,
    3054  VmaAllocation allocation,
    3055  VkBuffer buffer);
    3056 
    3069 VkResult vmaBindImageMemory(
    3070  VmaAllocator allocator,
    3071  VmaAllocation allocation,
    3072  VkImage image);
    3073 
    3100 VkResult vmaCreateBuffer(
    3101  VmaAllocator allocator,
    3102  const VkBufferCreateInfo* pBufferCreateInfo,
    3103  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3104  VkBuffer* pBuffer,
    3105  VmaAllocation* pAllocation,
    3106  VmaAllocationInfo* pAllocationInfo);
    3107 
    3119 void vmaDestroyBuffer(
    3120  VmaAllocator allocator,
    3121  VkBuffer buffer,
    3122  VmaAllocation allocation);
    3123 
    3125 VkResult vmaCreateImage(
    3126  VmaAllocator allocator,
    3127  const VkImageCreateInfo* pImageCreateInfo,
    3128  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3129  VkImage* pImage,
    3130  VmaAllocation* pAllocation,
    3131  VmaAllocationInfo* pAllocationInfo);
    3132 
    3144 void vmaDestroyImage(
    3145  VmaAllocator allocator,
    3146  VkImage image,
    3147  VmaAllocation allocation);
    3148 
    3149 #ifdef __cplusplus
    3150 }
    3151 #endif
    3152 
    3153 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3154 
    3155 // For Visual Studio IntelliSense.
    3156 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3157 #define VMA_IMPLEMENTATION
    3158 #endif
    3159 
    3160 #ifdef VMA_IMPLEMENTATION
    3161 #undef VMA_IMPLEMENTATION
    3162 
    3163 #include <cstdint>
    3164 #include <cstdlib>
    3165 #include <cstring>
    3166 
    3167 /*******************************************************************************
    3168 CONFIGURATION SECTION
    3169 
    3170 Define some of these macros before each #include of this header or change them
    3171 here if you need other then default behavior depending on your environment.
    3172 */
    3173 
    3174 /*
    3175 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3176 internally, like:
    3177 
    3178  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3179 
    3180 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3181 VmaAllocatorCreateInfo::pVulkanFunctions.
    3182 */
    3183 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3184 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3185 #endif
    3186 
    3187 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3188 //#define VMA_USE_STL_CONTAINERS 1
    3189 
    3190 /* Set this macro to 1 to make the library including and using STL containers:
    3191 std::pair, std::vector, std::list, std::unordered_map.
    3192 
    3193 Set it to 0 or undefined to make the library using its own implementation of
    3194 the containers.
    3195 */
    3196 #if VMA_USE_STL_CONTAINERS
    3197  #define VMA_USE_STL_VECTOR 1
    3198  #define VMA_USE_STL_UNORDERED_MAP 1
    3199  #define VMA_USE_STL_LIST 1
    3200 #endif
    3201 
    3202 #ifndef VMA_USE_STL_SHARED_MUTEX
    3203  // Compiler conforms to C++17.
    3204  #if __cplusplus >= 201703L
    3205  #define VMA_USE_STL_SHARED_MUTEX 1
    3206  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3207  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3208  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3209  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3210  #define VMA_USE_STL_SHARED_MUTEX 1
    3211  #else
    3212  #define VMA_USE_STL_SHARED_MUTEX 0
    3213  #endif
    3214 #endif
    3215 
    3216 /*
    3217 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
    3218 Library has its own container implementation.
    3219 */
    3220 #if VMA_USE_STL_VECTOR
    3221  #include <vector>
    3222 #endif
    3223 
    3224 #if VMA_USE_STL_UNORDERED_MAP
    3225  #include <unordered_map>
    3226 #endif
    3227 
    3228 #if VMA_USE_STL_LIST
    3229  #include <list>
    3230 #endif
    3231 
    3232 /*
    3233 Following headers are used in this CONFIGURATION section only, so feel free to
    3234 remove them if not needed.
    3235 */
    3236 #include <cassert> // for assert
    3237 #include <algorithm> // for min, max
    3238 #include <mutex>
    3239 
    3240 #ifndef VMA_NULL
    3241  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3242  #define VMA_NULL nullptr
    3243 #endif
    3244 
    3245 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3246 #include <cstdlib>
    3247 void *aligned_alloc(size_t alignment, size_t size)
    3248 {
    3249  // alignment must be >= sizeof(void*)
    3250  if(alignment < sizeof(void*))
    3251  {
    3252  alignment = sizeof(void*);
    3253  }
    3254 
    3255  return memalign(alignment, size);
    3256 }
    3257 #elif defined(__APPLE__) || defined(__ANDROID__)
    3258 #include <cstdlib>
    3259 void *aligned_alloc(size_t alignment, size_t size)
    3260 {
    3261  // alignment must be >= sizeof(void*)
    3262  if(alignment < sizeof(void*))
    3263  {
    3264  alignment = sizeof(void*);
    3265  }
    3266 
    3267  void *pointer;
    3268  if(posix_memalign(&pointer, alignment, size) == 0)
    3269  return pointer;
    3270  return VMA_NULL;
    3271 }
    3272 #endif
    3273 
    3274 // If your compiler is not compatible with C++11 and definition of
    3275 // aligned_alloc() function is missing, uncommeting following line may help:
    3276 
    3277 //#include <malloc.h>
    3278 
    3279 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3280 #ifndef VMA_ASSERT
    3281  #ifdef _DEBUG
    3282  #define VMA_ASSERT(expr) assert(expr)
    3283  #else
    3284  #define VMA_ASSERT(expr)
    3285  #endif
    3286 #endif
    3287 
    3288 // Assert that will be called very often, like inside data structures e.g. operator[].
    3289 // Making it non-empty can make program slow.
    3290 #ifndef VMA_HEAVY_ASSERT
    3291  #ifdef _DEBUG
    3292  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3293  #else
    3294  #define VMA_HEAVY_ASSERT(expr)
    3295  #endif
    3296 #endif
    3297 
    3298 #ifndef VMA_ALIGN_OF
    3299  #define VMA_ALIGN_OF(type) (__alignof(type))
    3300 #endif
    3301 
    3302 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3303  #if defined(_WIN32)
    3304  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3305  #else
    3306  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3307  #endif
    3308 #endif
    3309 
    3310 #ifndef VMA_SYSTEM_FREE
    3311  #if defined(_WIN32)
    3312  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3313  #else
    3314  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3315  #endif
    3316 #endif
    3317 
    3318 #ifndef VMA_MIN
    3319  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3320 #endif
    3321 
    3322 #ifndef VMA_MAX
    3323  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3324 #endif
    3325 
    3326 #ifndef VMA_SWAP
    3327  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3328 #endif
    3329 
    3330 #ifndef VMA_SORT
    3331  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3332 #endif
    3333 
    3334 #ifndef VMA_DEBUG_LOG
    3335  #define VMA_DEBUG_LOG(format, ...)
    3336  /*
    3337  #define VMA_DEBUG_LOG(format, ...) do { \
    3338  printf(format, __VA_ARGS__); \
    3339  printf("\n"); \
    3340  } while(false)
    3341  */
    3342 #endif
    3343 
    3344 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3345 #if VMA_STATS_STRING_ENABLED
    3346  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3347  {
    3348  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3349  }
    3350  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3351  {
    3352  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3353  }
    3354  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3355  {
    3356  snprintf(outStr, strLen, "%p", ptr);
    3357  }
    3358 #endif
    3359 
    3360 #ifndef VMA_MUTEX
    3361  class VmaMutex
    3362  {
    3363  public:
    3364  void Lock() { m_Mutex.lock(); }
    3365  void Unlock() { m_Mutex.unlock(); }
    3366  private:
    3367  std::mutex m_Mutex;
    3368  };
    3369  #define VMA_MUTEX VmaMutex
    3370 #endif
    3371 
    3372 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3373 #ifndef VMA_RW_MUTEX
    3374  #if VMA_USE_STL_SHARED_MUTEX
    3375  // Use std::shared_mutex from C++17.
    3376  #include <shared_mutex>
    3377  class VmaRWMutex
    3378  {
    3379  public:
    3380  void LockRead() { m_Mutex.lock_shared(); }
    3381  void UnlockRead() { m_Mutex.unlock_shared(); }
    3382  void LockWrite() { m_Mutex.lock(); }
    3383  void UnlockWrite() { m_Mutex.unlock(); }
    3384  private:
    3385  std::shared_mutex m_Mutex;
    3386  };
    3387  #define VMA_RW_MUTEX VmaRWMutex
    3388  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3389  // Use SRWLOCK from WinAPI.
    3390  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3391  class VmaRWMutex
    3392  {
    3393  public:
    3394  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3395  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3396  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3397  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3398  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3399  private:
    3400  SRWLOCK m_Lock;
    3401  };
    3402  #define VMA_RW_MUTEX VmaRWMutex
    3403  #else
    3404  // Less efficient fallback: Use normal mutex.
    3405  class VmaRWMutex
    3406  {
    3407  public:
    3408  void LockRead() { m_Mutex.Lock(); }
    3409  void UnlockRead() { m_Mutex.Unlock(); }
    3410  void LockWrite() { m_Mutex.Lock(); }
    3411  void UnlockWrite() { m_Mutex.Unlock(); }
    3412  private:
    3413  VMA_MUTEX m_Mutex;
    3414  };
    3415  #define VMA_RW_MUTEX VmaRWMutex
    3416  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3417 #endif // #ifndef VMA_RW_MUTEX
    3418 
    3419 /*
    3420 If providing your own implementation, you need to implement a subset of std::atomic:
    3421 
    3422 - Constructor(uint32_t desired)
    3423 - uint32_t load() const
    3424 - void store(uint32_t desired)
    3425 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3426 */
    3427 #ifndef VMA_ATOMIC_UINT32
    3428  #include <atomic>
    3429  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3430 #endif
    3431 
    3432 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3433 
    3437  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3438 #endif
    3439 
    3440 #ifndef VMA_DEBUG_ALIGNMENT
    3441 
    3445  #define VMA_DEBUG_ALIGNMENT (1)
    3446 #endif
    3447 
    3448 #ifndef VMA_DEBUG_MARGIN
    3449 
    3453  #define VMA_DEBUG_MARGIN (0)
    3454 #endif
    3455 
    3456 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3457 
    3461  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3462 #endif
    3463 
    3464 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3465 
    3470  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3471 #endif
    3472 
    3473 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3474 
    3478  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3479 #endif
    3480 
    3481 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3482 
    3486  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3487 #endif
    3488 
    3489 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3490  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3492 #endif
    3493 
    3494 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3495  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3497 #endif
    3498 
    3499 #ifndef VMA_CLASS_NO_COPY
    3500  #define VMA_CLASS_NO_COPY(className) \
    3501  private: \
    3502  className(const className&) = delete; \
    3503  className& operator=(const className&) = delete;
    3504 #endif
    3505 
    3506 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3507 
    3508 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3509 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3510 
    3511 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3512 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3513 
    3514 /*******************************************************************************
    3515 END OF CONFIGURATION
    3516 */
    3517 
    3518 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3519 
    3520 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3521  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3522 
    3523 // Returns number of bits set to 1 in (v).
    3524 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3525 {
    3526  uint32_t c = v - ((v >> 1) & 0x55555555);
    3527  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3528  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3529  c = ((c >> 8) + c) & 0x00FF00FF;
    3530  c = ((c >> 16) + c) & 0x0000FFFF;
    3531  return c;
    3532 }
    3533 
    3534 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3535 // Use types like uint32_t, uint64_t as T.
    3536 template <typename T>
    3537 static inline T VmaAlignUp(T val, T align)
    3538 {
    3539  return (val + align - 1) / align * align;
    3540 }
    3541 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3542 // Use types like uint32_t, uint64_t as T.
    3543 template <typename T>
    3544 static inline T VmaAlignDown(T val, T align)
    3545 {
    3546  return val / align * align;
    3547 }
    3548 
    3549 // Division with mathematical rounding to nearest number.
    3550 template <typename T>
    3551 static inline T VmaRoundDiv(T x, T y)
    3552 {
    3553  return (x + (y / (T)2)) / y;
    3554 }
    3555 
    3556 /*
    3557 Returns true if given number is a power of two.
    3558 T must be unsigned integer number or signed integer but always nonnegative.
    3559 For 0 returns true.
    3560 */
    3561 template <typename T>
    3562 inline bool VmaIsPow2(T x)
    3563 {
    3564  return (x & (x-1)) == 0;
    3565 }
    3566 
    3567 // Returns smallest power of 2 greater or equal to v.
    3568 static inline uint32_t VmaNextPow2(uint32_t v)
    3569 {
    3570  v--;
    3571  v |= v >> 1;
    3572  v |= v >> 2;
    3573  v |= v >> 4;
    3574  v |= v >> 8;
    3575  v |= v >> 16;
    3576  v++;
    3577  return v;
    3578 }
    3579 static inline uint64_t VmaNextPow2(uint64_t v)
    3580 {
    3581  v--;
    3582  v |= v >> 1;
    3583  v |= v >> 2;
    3584  v |= v >> 4;
    3585  v |= v >> 8;
    3586  v |= v >> 16;
    3587  v |= v >> 32;
    3588  v++;
    3589  return v;
    3590 }
    3591 
    3592 // Returns largest power of 2 less or equal to v.
    3593 static inline uint32_t VmaPrevPow2(uint32_t v)
    3594 {
    3595  v |= v >> 1;
    3596  v |= v >> 2;
    3597  v |= v >> 4;
    3598  v |= v >> 8;
    3599  v |= v >> 16;
    3600  v = v ^ (v >> 1);
    3601  return v;
    3602 }
    3603 static inline uint64_t VmaPrevPow2(uint64_t v)
    3604 {
    3605  v |= v >> 1;
    3606  v |= v >> 2;
    3607  v |= v >> 4;
    3608  v |= v >> 8;
    3609  v |= v >> 16;
    3610  v |= v >> 32;
    3611  v = v ^ (v >> 1);
    3612  return v;
    3613 }
    3614 
    3615 static inline bool VmaStrIsEmpty(const char* pStr)
    3616 {
    3617  return pStr == VMA_NULL || *pStr == '\0';
    3618 }
    3619 
    3620 #if VMA_STATS_STRING_ENABLED
    3621 
    3622 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3623 {
    3624  switch(algorithm)
    3625  {
    3627  return "Linear";
    3629  return "Buddy";
    3630  case 0:
    3631  return "Default";
    3632  default:
    3633  VMA_ASSERT(0);
    3634  return "";
    3635  }
    3636 }
    3637 
    3638 #endif // #if VMA_STATS_STRING_ENABLED
    3639 
    3640 #ifndef VMA_SORT
    3641 
    3642 template<typename Iterator, typename Compare>
    3643 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3644 {
    3645  Iterator centerValue = end; --centerValue;
    3646  Iterator insertIndex = beg;
    3647  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3648  {
    3649  if(cmp(*memTypeIndex, *centerValue))
    3650  {
    3651  if(insertIndex != memTypeIndex)
    3652  {
    3653  VMA_SWAP(*memTypeIndex, *insertIndex);
    3654  }
    3655  ++insertIndex;
    3656  }
    3657  }
    3658  if(insertIndex != centerValue)
    3659  {
    3660  VMA_SWAP(*insertIndex, *centerValue);
    3661  }
    3662  return insertIndex;
    3663 }
    3664 
    3665 template<typename Iterator, typename Compare>
    3666 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3667 {
    3668  if(beg < end)
    3669  {
    3670  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3671  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3672  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3673  }
    3674 }
    3675 
    3676 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3677 
    3678 #endif // #ifndef VMA_SORT
    3679 
    3680 /*
    3681 Returns true if two memory blocks occupy overlapping pages.
    3682 ResourceA must be in less memory offset than ResourceB.
    3683 
    3684 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3685 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3686 */
    3687 static inline bool VmaBlocksOnSamePage(
    3688  VkDeviceSize resourceAOffset,
    3689  VkDeviceSize resourceASize,
    3690  VkDeviceSize resourceBOffset,
    3691  VkDeviceSize pageSize)
    3692 {
    3693  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3694  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3695  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3696  VkDeviceSize resourceBStart = resourceBOffset;
    3697  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3698  return resourceAEndPage == resourceBStartPage;
    3699 }
    3700 
    3701 enum VmaSuballocationType
    3702 {
    3703  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3704  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3705  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3706  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3707  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3708  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3709  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3710 };
    3711 
    3712 /*
    3713 Returns true if given suballocation types could conflict and must respect
    3714 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3715 or linear image and another one is optimal image. If type is unknown, behave
    3716 conservatively.
    3717 */
    3718 static inline bool VmaIsBufferImageGranularityConflict(
    3719  VmaSuballocationType suballocType1,
    3720  VmaSuballocationType suballocType2)
    3721 {
    3722  if(suballocType1 > suballocType2)
    3723  {
    3724  VMA_SWAP(suballocType1, suballocType2);
    3725  }
    3726 
    3727  switch(suballocType1)
    3728  {
    3729  case VMA_SUBALLOCATION_TYPE_FREE:
    3730  return false;
    3731  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3732  return true;
    3733  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3734  return
    3735  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3736  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3737  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3738  return
    3739  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3740  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3741  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3742  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3743  return
    3744  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3745  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3746  return false;
    3747  default:
    3748  VMA_ASSERT(0);
    3749  return true;
    3750  }
    3751 }
    3752 
    3753 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3754 {
    3755 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3756  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3757  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3758  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3759  {
    3760  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3761  }
    3762 #else
    3763  // no-op
    3764 #endif
    3765 }
    3766 
    3767 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3768 {
    3769 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
    3770  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3771  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3772  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3773  {
    3774  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3775  {
    3776  return false;
    3777  }
    3778  }
    3779 #endif
    3780  return true;
    3781 }
    3782 
    3783 /*
    3784 Fills structure with parameters of an example buffer to be used for transfers
    3785 during GPU memory defragmentation.
    3786 */
    3787 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
    3788 {
    3789  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
    3790  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    3791  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    3792  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
    3793 }
    3794 
    3795 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3796 struct VmaMutexLock
    3797 {
    3798  VMA_CLASS_NO_COPY(VmaMutexLock)
    3799 public:
    3800  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3801  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3802  { if(m_pMutex) { m_pMutex->Lock(); } }
    3803  ~VmaMutexLock()
    3804  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3805 private:
    3806  VMA_MUTEX* m_pMutex;
    3807 };
    3808 
    3809 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3810 struct VmaMutexLockRead
    3811 {
    3812  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3813 public:
    3814  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3815  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3816  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3817  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3818 private:
    3819  VMA_RW_MUTEX* m_pMutex;
    3820 };
    3821 
    3822 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3823 struct VmaMutexLockWrite
    3824 {
    3825  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3826 public:
    3827  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3828  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3829  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3830  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3831 private:
    3832  VMA_RW_MUTEX* m_pMutex;
    3833 };
    3834 
    3835 #if VMA_DEBUG_GLOBAL_MUTEX
    3836  static VMA_MUTEX gDebugGlobalMutex;
    3837  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3838 #else
    3839  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3840 #endif
    3841 
    3842 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3843 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3844 
    3845 /*
    3846 Performs binary search and returns iterator to first element that is greater or
    3847 equal to (key), according to comparison (cmp).
    3848 
    3849 Cmp should return true if first argument is less than second argument.
    3850 
    3851 Returned value is the found element, if present in the collection or place where
    3852 new element with value (key) should be inserted.
    3853 */
    3854 template <typename CmpLess, typename IterT, typename KeyT>
    3855 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
    3856 {
    3857  size_t down = 0, up = (end - beg);
    3858  while(down < up)
    3859  {
    3860  const size_t mid = (down + up) / 2;
    3861  if(cmp(*(beg+mid), key))
    3862  {
    3863  down = mid + 1;
    3864  }
    3865  else
    3866  {
    3867  up = mid;
    3868  }
    3869  }
    3870  return beg + down;
    3871 }
    3872 
    3873 template<typename CmpLess, typename IterT, typename KeyT>
    3874 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
    3875 {
    3876  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3877  beg, end, value, cmp);
    3878  if(it == end ||
    3879  (!cmp(*it, value) && !cmp(value, *it)))
    3880  {
    3881  return it;
    3882  }
    3883  return end;
    3884 }
    3885 
    3886 /*
    3887 Returns true if all pointers in the array are not-null and unique.
    3888 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3889 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3890 */
    3891 template<typename T>
    3892 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3893 {
    3894  for(uint32_t i = 0; i < count; ++i)
    3895  {
    3896  const T iPtr = arr[i];
    3897  if(iPtr == VMA_NULL)
    3898  {
    3899  return false;
    3900  }
    3901  for(uint32_t j = i + 1; j < count; ++j)
    3902  {
    3903  if(iPtr == arr[j])
    3904  {
    3905  return false;
    3906  }
    3907  }
    3908  }
    3909  return true;
    3910 }
    3911 
    3913 // Memory allocation
    3914 
    3915 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3916 {
    3917  if((pAllocationCallbacks != VMA_NULL) &&
    3918  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3919  {
    3920  return (*pAllocationCallbacks->pfnAllocation)(
    3921  pAllocationCallbacks->pUserData,
    3922  size,
    3923  alignment,
    3924  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3925  }
    3926  else
    3927  {
    3928  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3929  }
    3930 }
    3931 
    3932 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3933 {
    3934  if((pAllocationCallbacks != VMA_NULL) &&
    3935  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3936  {
    3937  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3938  }
    3939  else
    3940  {
    3941  VMA_SYSTEM_FREE(ptr);
    3942  }
    3943 }
    3944 
    3945 template<typename T>
    3946 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3947 {
    3948  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3949 }
    3950 
    3951 template<typename T>
    3952 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3953 {
    3954  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3955 }
    3956 
    3957 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3958 
    3959 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3960 
    3961 template<typename T>
    3962 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3963 {
    3964  ptr->~T();
    3965  VmaFree(pAllocationCallbacks, ptr);
    3966 }
    3967 
    3968 template<typename T>
    3969 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3970 {
    3971  if(ptr != VMA_NULL)
    3972  {
    3973  for(size_t i = count; i--; )
    3974  {
    3975  ptr[i].~T();
    3976  }
    3977  VmaFree(pAllocationCallbacks, ptr);
    3978  }
    3979 }
    3980 
    3981 // STL-compatible allocator.
    3982 template<typename T>
    3983 class VmaStlAllocator
    3984 {
    3985 public:
    3986  const VkAllocationCallbacks* const m_pCallbacks;
    3987  typedef T value_type;
    3988 
    3989  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3990  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3991 
    3992  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3993  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3994 
    3995  template<typename U>
    3996  bool operator==(const VmaStlAllocator<U>& rhs) const
    3997  {
    3998  return m_pCallbacks == rhs.m_pCallbacks;
    3999  }
    4000  template<typename U>
    4001  bool operator!=(const VmaStlAllocator<U>& rhs) const
    4002  {
    4003  return m_pCallbacks != rhs.m_pCallbacks;
    4004  }
    4005 
    4006  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    4007 };
    4008 
    4009 #if VMA_USE_STL_VECTOR
    4010 
    4011 #define VmaVector std::vector
    4012 
    4013 template<typename T, typename allocatorT>
    4014 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    4015 {
    4016  vec.insert(vec.begin() + index, item);
    4017 }
    4018 
    4019 template<typename T, typename allocatorT>
    4020 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    4021 {
    4022  vec.erase(vec.begin() + index);
    4023 }
    4024 
    4025 #else // #if VMA_USE_STL_VECTOR
    4026 
    4027 /* Class with interface compatible with subset of std::vector.
    4028 T must be POD because constructors and destructors are not called and memcpy is
    4029 used for these objects. */
    4030 template<typename T, typename AllocatorT>
    4031 class VmaVector
    4032 {
    4033 public:
    4034  typedef T value_type;
    4035 
    4036  VmaVector(const AllocatorT& allocator) :
    4037  m_Allocator(allocator),
    4038  m_pArray(VMA_NULL),
    4039  m_Count(0),
    4040  m_Capacity(0)
    4041  {
    4042  }
    4043 
    4044  VmaVector(size_t count, const AllocatorT& allocator) :
    4045  m_Allocator(allocator),
    4046  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    4047  m_Count(count),
    4048  m_Capacity(count)
    4049  {
    4050  }
    4051 
    4052  VmaVector(const VmaVector<T, AllocatorT>& src) :
    4053  m_Allocator(src.m_Allocator),
    4054  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    4055  m_Count(src.m_Count),
    4056  m_Capacity(src.m_Count)
    4057  {
    4058  if(m_Count != 0)
    4059  {
    4060  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    4061  }
    4062  }
    4063 
    4064  ~VmaVector()
    4065  {
    4066  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4067  }
    4068 
    4069  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    4070  {
    4071  if(&rhs != this)
    4072  {
    4073  resize(rhs.m_Count);
    4074  if(m_Count != 0)
    4075  {
    4076  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4077  }
    4078  }
    4079  return *this;
    4080  }
    4081 
    4082  bool empty() const { return m_Count == 0; }
    4083  size_t size() const { return m_Count; }
    4084  T* data() { return m_pArray; }
    4085  const T* data() const { return m_pArray; }
    4086 
    4087  T& operator[](size_t index)
    4088  {
    4089  VMA_HEAVY_ASSERT(index < m_Count);
    4090  return m_pArray[index];
    4091  }
    4092  const T& operator[](size_t index) const
    4093  {
    4094  VMA_HEAVY_ASSERT(index < m_Count);
    4095  return m_pArray[index];
    4096  }
    4097 
    4098  T& front()
    4099  {
    4100  VMA_HEAVY_ASSERT(m_Count > 0);
    4101  return m_pArray[0];
    4102  }
    4103  const T& front() const
    4104  {
    4105  VMA_HEAVY_ASSERT(m_Count > 0);
    4106  return m_pArray[0];
    4107  }
    4108  T& back()
    4109  {
    4110  VMA_HEAVY_ASSERT(m_Count > 0);
    4111  return m_pArray[m_Count - 1];
    4112  }
    4113  const T& back() const
    4114  {
    4115  VMA_HEAVY_ASSERT(m_Count > 0);
    4116  return m_pArray[m_Count - 1];
    4117  }
    4118 
    4119  void reserve(size_t newCapacity, bool freeMemory = false)
    4120  {
    4121  newCapacity = VMA_MAX(newCapacity, m_Count);
    4122 
    4123  if((newCapacity < m_Capacity) && !freeMemory)
    4124  {
    4125  newCapacity = m_Capacity;
    4126  }
    4127 
    4128  if(newCapacity != m_Capacity)
    4129  {
    4130  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4131  if(m_Count != 0)
    4132  {
    4133  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4134  }
    4135  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4136  m_Capacity = newCapacity;
    4137  m_pArray = newArray;
    4138  }
    4139  }
    4140 
    4141  void resize(size_t newCount, bool freeMemory = false)
    4142  {
    4143  size_t newCapacity = m_Capacity;
    4144  if(newCount > m_Capacity)
    4145  {
    4146  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4147  }
    4148  else if(freeMemory)
    4149  {
    4150  newCapacity = newCount;
    4151  }
    4152 
    4153  if(newCapacity != m_Capacity)
    4154  {
    4155  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4156  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4157  if(elementsToCopy != 0)
    4158  {
    4159  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4160  }
    4161  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4162  m_Capacity = newCapacity;
    4163  m_pArray = newArray;
    4164  }
    4165 
    4166  m_Count = newCount;
    4167  }
    4168 
    4169  void clear(bool freeMemory = false)
    4170  {
    4171  resize(0, freeMemory);
    4172  }
    4173 
    4174  void insert(size_t index, const T& src)
    4175  {
    4176  VMA_HEAVY_ASSERT(index <= m_Count);
    4177  const size_t oldCount = size();
    4178  resize(oldCount + 1);
    4179  if(index < oldCount)
    4180  {
    4181  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4182  }
    4183  m_pArray[index] = src;
    4184  }
    4185 
    4186  void remove(size_t index)
    4187  {
    4188  VMA_HEAVY_ASSERT(index < m_Count);
    4189  const size_t oldCount = size();
    4190  if(index < oldCount - 1)
    4191  {
    4192  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4193  }
    4194  resize(oldCount - 1);
    4195  }
    4196 
    4197  void push_back(const T& src)
    4198  {
    4199  const size_t newIndex = size();
    4200  resize(newIndex + 1);
    4201  m_pArray[newIndex] = src;
    4202  }
    4203 
    4204  void pop_back()
    4205  {
    4206  VMA_HEAVY_ASSERT(m_Count > 0);
    4207  resize(size() - 1);
    4208  }
    4209 
    4210  void push_front(const T& src)
    4211  {
    4212  insert(0, src);
    4213  }
    4214 
    4215  void pop_front()
    4216  {
    4217  VMA_HEAVY_ASSERT(m_Count > 0);
    4218  remove(0);
    4219  }
    4220 
    4221  typedef T* iterator;
    4222 
    4223  iterator begin() { return m_pArray; }
    4224  iterator end() { return m_pArray + m_Count; }
    4225 
    4226 private:
    4227  AllocatorT m_Allocator;
    4228  T* m_pArray;
    4229  size_t m_Count;
    4230  size_t m_Capacity;
    4231 };
    4232 
    4233 template<typename T, typename allocatorT>
    4234 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4235 {
    4236  vec.insert(index, item);
    4237 }
    4238 
    4239 template<typename T, typename allocatorT>
    4240 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4241 {
    4242  vec.remove(index);
    4243 }
    4244 
    4245 #endif // #if VMA_USE_STL_VECTOR
    4246 
    4247 template<typename CmpLess, typename VectorT>
    4248 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4249 {
    4250  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4251  vector.data(),
    4252  vector.data() + vector.size(),
    4253  value,
    4254  CmpLess()) - vector.data();
    4255  VmaVectorInsert(vector, indexToInsert, value);
    4256  return indexToInsert;
    4257 }
    4258 
    4259 template<typename CmpLess, typename VectorT>
    4260 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4261 {
    4262  CmpLess comparator;
    4263  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4264  vector.begin(),
    4265  vector.end(),
    4266  value,
    4267  comparator);
    4268  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4269  {
    4270  size_t indexToRemove = it - vector.begin();
    4271  VmaVectorRemove(vector, indexToRemove);
    4272  return true;
    4273  }
    4274  return false;
    4275 }
    4276 
    4278 // class VmaPoolAllocator
    4279 
    4280 /*
    4281 Allocator for objects of type T using a list of arrays (pools) to speed up
    4282 allocation. Number of elements that can be allocated is not bounded because
    4283 allocator can create multiple blocks.
    4284 */
    4285 template<typename T>
    4286 class VmaPoolAllocator
    4287 {
    4288  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4289 public:
    4290  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4291  ~VmaPoolAllocator();
    4292  void Clear();
    4293  T* Alloc();
    4294  void Free(T* ptr);
    4295 
    4296 private:
    4297  union Item
    4298  {
    4299  uint32_t NextFreeIndex;
    4300  T Value;
    4301  };
    4302 
    4303  struct ItemBlock
    4304  {
    4305  Item* pItems;
    4306  uint32_t Capacity;
    4307  uint32_t FirstFreeIndex;
    4308  };
    4309 
    4310  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4311  const uint32_t m_FirstBlockCapacity;
    4312  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4313 
    4314  ItemBlock& CreateNewBlock();
    4315 };
    4316 
    4317 template<typename T>
    4318 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4319  m_pAllocationCallbacks(pAllocationCallbacks),
    4320  m_FirstBlockCapacity(firstBlockCapacity),
    4321  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4322 {
    4323  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4324 }
    4325 
    4326 template<typename T>
    4327 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4328 {
    4329  Clear();
    4330 }
    4331 
    4332 template<typename T>
    4333 void VmaPoolAllocator<T>::Clear()
    4334 {
    4335  for(size_t i = m_ItemBlocks.size(); i--; )
    4336  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4337  m_ItemBlocks.clear();
    4338 }
    4339 
    4340 template<typename T>
    4341 T* VmaPoolAllocator<T>::Alloc()
    4342 {
    4343  for(size_t i = m_ItemBlocks.size(); i--; )
    4344  {
    4345  ItemBlock& block = m_ItemBlocks[i];
    4346  // This block has some free items: Use first one.
    4347  if(block.FirstFreeIndex != UINT32_MAX)
    4348  {
    4349  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4350  block.FirstFreeIndex = pItem->NextFreeIndex;
    4351  return &pItem->Value;
    4352  }
    4353  }
    4354 
    4355  // No block has free item: Create new one and use it.
    4356  ItemBlock& newBlock = CreateNewBlock();
    4357  Item* const pItem = &newBlock.pItems[0];
    4358  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4359  return &pItem->Value;
    4360 }
    4361 
    4362 template<typename T>
    4363 void VmaPoolAllocator<T>::Free(T* ptr)
    4364 {
    4365  // Search all memory blocks to find ptr.
    4366  for(size_t i = m_ItemBlocks.size(); i--; )
    4367  {
    4368  ItemBlock& block = m_ItemBlocks[i];
    4369 
    4370  // Casting to union.
    4371  Item* pItemPtr;
    4372  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4373 
    4374  // Check if pItemPtr is in address range of this block.
    4375  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4376  {
    4377  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4378  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4379  block.FirstFreeIndex = index;
    4380  return;
    4381  }
    4382  }
    4383  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4384 }
    4385 
    4386 template<typename T>
    4387 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4388 {
    4389  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4390  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4391 
    4392  const ItemBlock newBlock = {
    4393  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4394  newBlockCapacity,
    4395  0 };
    4396 
    4397  m_ItemBlocks.push_back(newBlock);
    4398 
    4399  // Setup singly-linked list of all free items in this block.
    4400  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4401  newBlock.pItems[i].NextFreeIndex = i + 1;
    4402  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4403  return m_ItemBlocks.back();
    4404 }
    4405 
    4407 // class VmaRawList, VmaList
    4408 
    4409 #if VMA_USE_STL_LIST
    4410 
    4411 #define VmaList std::list
    4412 
    4413 #else // #if VMA_USE_STL_LIST
    4414 
    4415 template<typename T>
    4416 struct VmaListItem
    4417 {
    4418  VmaListItem* pPrev;
    4419  VmaListItem* pNext;
    4420  T Value;
    4421 };
    4422 
    4423 // Doubly linked list.
    4424 template<typename T>
    4425 class VmaRawList
    4426 {
    4427  VMA_CLASS_NO_COPY(VmaRawList)
    4428 public:
    4429  typedef VmaListItem<T> ItemType;
    4430 
    4431  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4432  ~VmaRawList();
    4433  void Clear();
    4434 
    4435  size_t GetCount() const { return m_Count; }
    4436  bool IsEmpty() const { return m_Count == 0; }
    4437 
    4438  ItemType* Front() { return m_pFront; }
    4439  const ItemType* Front() const { return m_pFront; }
    4440  ItemType* Back() { return m_pBack; }
    4441  const ItemType* Back() const { return m_pBack; }
    4442 
    4443  ItemType* PushBack();
    4444  ItemType* PushFront();
    4445  ItemType* PushBack(const T& value);
    4446  ItemType* PushFront(const T& value);
    4447  void PopBack();
    4448  void PopFront();
    4449 
    4450  // Item can be null - it means PushBack.
    4451  ItemType* InsertBefore(ItemType* pItem);
    4452  // Item can be null - it means PushFront.
    4453  ItemType* InsertAfter(ItemType* pItem);
    4454 
    4455  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4456  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4457 
    4458  void Remove(ItemType* pItem);
    4459 
    4460 private:
    4461  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4462  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4463  ItemType* m_pFront;
    4464  ItemType* m_pBack;
    4465  size_t m_Count;
    4466 };
    4467 
    4468 template<typename T>
    4469 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4470  m_pAllocationCallbacks(pAllocationCallbacks),
    4471  m_ItemAllocator(pAllocationCallbacks, 128),
    4472  m_pFront(VMA_NULL),
    4473  m_pBack(VMA_NULL),
    4474  m_Count(0)
    4475 {
    4476 }
    4477 
    4478 template<typename T>
    4479 VmaRawList<T>::~VmaRawList()
    4480 {
    4481  // Intentionally not calling Clear, because that would be unnecessary
    4482  // computations to return all items to m_ItemAllocator as free.
    4483 }
    4484 
    4485 template<typename T>
    4486 void VmaRawList<T>::Clear()
    4487 {
    4488  if(IsEmpty() == false)
    4489  {
    4490  ItemType* pItem = m_pBack;
    4491  while(pItem != VMA_NULL)
    4492  {
    4493  ItemType* const pPrevItem = pItem->pPrev;
    4494  m_ItemAllocator.Free(pItem);
    4495  pItem = pPrevItem;
    4496  }
    4497  m_pFront = VMA_NULL;
    4498  m_pBack = VMA_NULL;
    4499  m_Count = 0;
    4500  }
    4501 }
    4502 
    4503 template<typename T>
    4504 VmaListItem<T>* VmaRawList<T>::PushBack()
    4505 {
    4506  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4507  pNewItem->pNext = VMA_NULL;
    4508  if(IsEmpty())
    4509  {
    4510  pNewItem->pPrev = VMA_NULL;
    4511  m_pFront = pNewItem;
    4512  m_pBack = pNewItem;
    4513  m_Count = 1;
    4514  }
    4515  else
    4516  {
    4517  pNewItem->pPrev = m_pBack;
    4518  m_pBack->pNext = pNewItem;
    4519  m_pBack = pNewItem;
    4520  ++m_Count;
    4521  }
    4522  return pNewItem;
    4523 }
    4524 
    4525 template<typename T>
    4526 VmaListItem<T>* VmaRawList<T>::PushFront()
    4527 {
    4528  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4529  pNewItem->pPrev = VMA_NULL;
    4530  if(IsEmpty())
    4531  {
    4532  pNewItem->pNext = VMA_NULL;
    4533  m_pFront = pNewItem;
    4534  m_pBack = pNewItem;
    4535  m_Count = 1;
    4536  }
    4537  else
    4538  {
    4539  pNewItem->pNext = m_pFront;
    4540  m_pFront->pPrev = pNewItem;
    4541  m_pFront = pNewItem;
    4542  ++m_Count;
    4543  }
    4544  return pNewItem;
    4545 }
    4546 
    4547 template<typename T>
    4548 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4549 {
    4550  ItemType* const pNewItem = PushBack();
    4551  pNewItem->Value = value;
    4552  return pNewItem;
    4553 }
    4554 
    4555 template<typename T>
    4556 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4557 {
    4558  ItemType* const pNewItem = PushFront();
    4559  pNewItem->Value = value;
    4560  return pNewItem;
    4561 }
    4562 
    4563 template<typename T>
    4564 void VmaRawList<T>::PopBack()
    4565 {
    4566  VMA_HEAVY_ASSERT(m_Count > 0);
    4567  ItemType* const pBackItem = m_pBack;
    4568  ItemType* const pPrevItem = pBackItem->pPrev;
    4569  if(pPrevItem != VMA_NULL)
    4570  {
    4571  pPrevItem->pNext = VMA_NULL;
    4572  }
    4573  m_pBack = pPrevItem;
    4574  m_ItemAllocator.Free(pBackItem);
    4575  --m_Count;
    4576 }
    4577 
    4578 template<typename T>
    4579 void VmaRawList<T>::PopFront()
    4580 {
    4581  VMA_HEAVY_ASSERT(m_Count > 0);
    4582  ItemType* const pFrontItem = m_pFront;
    4583  ItemType* const pNextItem = pFrontItem->pNext;
    4584  if(pNextItem != VMA_NULL)
    4585  {
    4586  pNextItem->pPrev = VMA_NULL;
    4587  }
    4588  m_pFront = pNextItem;
    4589  m_ItemAllocator.Free(pFrontItem);
    4590  --m_Count;
    4591 }
    4592 
    4593 template<typename T>
    4594 void VmaRawList<T>::Remove(ItemType* pItem)
    4595 {
    4596  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4597  VMA_HEAVY_ASSERT(m_Count > 0);
    4598 
    4599  if(pItem->pPrev != VMA_NULL)
    4600  {
    4601  pItem->pPrev->pNext = pItem->pNext;
    4602  }
    4603  else
    4604  {
    4605  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4606  m_pFront = pItem->pNext;
    4607  }
    4608 
    4609  if(pItem->pNext != VMA_NULL)
    4610  {
    4611  pItem->pNext->pPrev = pItem->pPrev;
    4612  }
    4613  else
    4614  {
    4615  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4616  m_pBack = pItem->pPrev;
    4617  }
    4618 
    4619  m_ItemAllocator.Free(pItem);
    4620  --m_Count;
    4621 }
    4622 
    4623 template<typename T>
    4624 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4625 {
    4626  if(pItem != VMA_NULL)
    4627  {
    4628  ItemType* const prevItem = pItem->pPrev;
    4629  ItemType* const newItem = m_ItemAllocator.Alloc();
    4630  newItem->pPrev = prevItem;
    4631  newItem->pNext = pItem;
    4632  pItem->pPrev = newItem;
    4633  if(prevItem != VMA_NULL)
    4634  {
    4635  prevItem->pNext = newItem;
    4636  }
    4637  else
    4638  {
    4639  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4640  m_pFront = newItem;
    4641  }
    4642  ++m_Count;
    4643  return newItem;
    4644  }
    4645  else
    4646  return PushBack();
    4647 }
    4648 
    4649 template<typename T>
    4650 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4651 {
    4652  if(pItem != VMA_NULL)
    4653  {
    4654  ItemType* const nextItem = pItem->pNext;
    4655  ItemType* const newItem = m_ItemAllocator.Alloc();
    4656  newItem->pNext = nextItem;
    4657  newItem->pPrev = pItem;
    4658  pItem->pNext = newItem;
    4659  if(nextItem != VMA_NULL)
    4660  {
    4661  nextItem->pPrev = newItem;
    4662  }
    4663  else
    4664  {
    4665  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4666  m_pBack = newItem;
    4667  }
    4668  ++m_Count;
    4669  return newItem;
    4670  }
    4671  else
    4672  return PushFront();
    4673 }
    4674 
    4675 template<typename T>
    4676 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4677 {
    4678  ItemType* const newItem = InsertBefore(pItem);
    4679  newItem->Value = value;
    4680  return newItem;
    4681 }
    4682 
    4683 template<typename T>
    4684 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4685 {
    4686  ItemType* const newItem = InsertAfter(pItem);
    4687  newItem->Value = value;
    4688  return newItem;
    4689 }
    4690 
    4691 template<typename T, typename AllocatorT>
    4692 class VmaList
    4693 {
    4694  VMA_CLASS_NO_COPY(VmaList)
    4695 public:
    4696  class iterator
    4697  {
    4698  public:
    4699  iterator() :
    4700  m_pList(VMA_NULL),
    4701  m_pItem(VMA_NULL)
    4702  {
    4703  }
    4704 
    4705  T& operator*() const
    4706  {
    4707  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4708  return m_pItem->Value;
    4709  }
    4710  T* operator->() const
    4711  {
    4712  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4713  return &m_pItem->Value;
    4714  }
    4715 
    4716  iterator& operator++()
    4717  {
    4718  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4719  m_pItem = m_pItem->pNext;
    4720  return *this;
    4721  }
    4722  iterator& operator--()
    4723  {
    4724  if(m_pItem != VMA_NULL)
    4725  {
    4726  m_pItem = m_pItem->pPrev;
    4727  }
    4728  else
    4729  {
    4730  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4731  m_pItem = m_pList->Back();
    4732  }
    4733  return *this;
    4734  }
    4735 
    4736  iterator operator++(int)
    4737  {
    4738  iterator result = *this;
    4739  ++*this;
    4740  return result;
    4741  }
    4742  iterator operator--(int)
    4743  {
    4744  iterator result = *this;
    4745  --*this;
    4746  return result;
    4747  }
    4748 
    4749  bool operator==(const iterator& rhs) const
    4750  {
    4751  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4752  return m_pItem == rhs.m_pItem;
    4753  }
    4754  bool operator!=(const iterator& rhs) const
    4755  {
    4756  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4757  return m_pItem != rhs.m_pItem;
    4758  }
    4759 
    4760  private:
    4761  VmaRawList<T>* m_pList;
    4762  VmaListItem<T>* m_pItem;
    4763 
    4764  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4765  m_pList(pList),
    4766  m_pItem(pItem)
    4767  {
    4768  }
    4769 
    4770  friend class VmaList<T, AllocatorT>;
    4771  };
    4772 
    4773  class const_iterator
    4774  {
    4775  public:
    4776  const_iterator() :
    4777  m_pList(VMA_NULL),
    4778  m_pItem(VMA_NULL)
    4779  {
    4780  }
    4781 
    4782  const_iterator(const iterator& src) :
    4783  m_pList(src.m_pList),
    4784  m_pItem(src.m_pItem)
    4785  {
    4786  }
    4787 
    4788  const T& operator*() const
    4789  {
    4790  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4791  return m_pItem->Value;
    4792  }
    4793  const T* operator->() const
    4794  {
    4795  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4796  return &m_pItem->Value;
    4797  }
    4798 
    4799  const_iterator& operator++()
    4800  {
    4801  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4802  m_pItem = m_pItem->pNext;
    4803  return *this;
    4804  }
    4805  const_iterator& operator--()
    4806  {
    4807  if(m_pItem != VMA_NULL)
    4808  {
    4809  m_pItem = m_pItem->pPrev;
    4810  }
    4811  else
    4812  {
    4813  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4814  m_pItem = m_pList->Back();
    4815  }
    4816  return *this;
    4817  }
    4818 
    4819  const_iterator operator++(int)
    4820  {
    4821  const_iterator result = *this;
    4822  ++*this;
    4823  return result;
    4824  }
    4825  const_iterator operator--(int)
    4826  {
    4827  const_iterator result = *this;
    4828  --*this;
    4829  return result;
    4830  }
    4831 
    4832  bool operator==(const const_iterator& rhs) const
    4833  {
    4834  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4835  return m_pItem == rhs.m_pItem;
    4836  }
    4837  bool operator!=(const const_iterator& rhs) const
    4838  {
    4839  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4840  return m_pItem != rhs.m_pItem;
    4841  }
    4842 
    4843  private:
    4844  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4845  m_pList(pList),
    4846  m_pItem(pItem)
    4847  {
    4848  }
    4849 
    4850  const VmaRawList<T>* m_pList;
    4851  const VmaListItem<T>* m_pItem;
    4852 
    4853  friend class VmaList<T, AllocatorT>;
    4854  };
    4855 
    4856  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4857 
    4858  bool empty() const { return m_RawList.IsEmpty(); }
    4859  size_t size() const { return m_RawList.GetCount(); }
    4860 
    4861  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4862  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4863 
    4864  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4865  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4866 
    4867  void clear() { m_RawList.Clear(); }
    4868  void push_back(const T& value) { m_RawList.PushBack(value); }
    4869  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4870  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4871 
    4872 private:
    4873  VmaRawList<T> m_RawList;
    4874 };
    4875 
    4876 #endif // #if VMA_USE_STL_LIST
    4877 
    4879 // class VmaMap
    4880 
    4881 // Unused in this version.
    4882 #if 0
    4883 
    4884 #if VMA_USE_STL_UNORDERED_MAP
    4885 
    4886 #define VmaPair std::pair
    4887 
    4888 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4889  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4890 
    4891 #else // #if VMA_USE_STL_UNORDERED_MAP
    4892 
    4893 template<typename T1, typename T2>
    4894 struct VmaPair
    4895 {
    4896  T1 first;
    4897  T2 second;
    4898 
    4899  VmaPair() : first(), second() { }
    4900  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4901 };
    4902 
    4903 /* Class compatible with subset of interface of std::unordered_map.
    4904 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4905 */
    4906 template<typename KeyT, typename ValueT>
    4907 class VmaMap
    4908 {
    4909 public:
    4910  typedef VmaPair<KeyT, ValueT> PairType;
    4911  typedef PairType* iterator;
    4912 
    4913  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4914 
    4915  iterator begin() { return m_Vector.begin(); }
    4916  iterator end() { return m_Vector.end(); }
    4917 
    4918  void insert(const PairType& pair);
    4919  iterator find(const KeyT& key);
    4920  void erase(iterator it);
    4921 
    4922 private:
    4923  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4924 };
    4925 
    4926 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4927 
    4928 template<typename FirstT, typename SecondT>
    4929 struct VmaPairFirstLess
    4930 {
    4931  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4932  {
    4933  return lhs.first < rhs.first;
    4934  }
    4935  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4936  {
    4937  return lhs.first < rhsFirst;
    4938  }
    4939 };
    4940 
    4941 template<typename KeyT, typename ValueT>
    4942 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4943 {
    4944  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4945  m_Vector.data(),
    4946  m_Vector.data() + m_Vector.size(),
    4947  pair,
    4948  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4949  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4950 }
    4951 
    4952 template<typename KeyT, typename ValueT>
    4953 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4954 {
    4955  PairType* it = VmaBinaryFindFirstNotLess(
    4956  m_Vector.data(),
    4957  m_Vector.data() + m_Vector.size(),
    4958  key,
    4959  VmaPairFirstLess<KeyT, ValueT>());
    4960  if((it != m_Vector.end()) && (it->first == key))
    4961  {
    4962  return it;
    4963  }
    4964  else
    4965  {
    4966  return m_Vector.end();
    4967  }
    4968 }
    4969 
    4970 template<typename KeyT, typename ValueT>
    4971 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4972 {
    4973  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4974 }
    4975 
    4976 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4977 
    4978 #endif // #if 0
    4979 
    4981 
    4982 class VmaDeviceMemoryBlock;
    4983 
    4984 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4985 
    4986 struct VmaAllocation_T
    4987 {
    4988 private:
    4989  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4990 
    4991  enum FLAGS
    4992  {
    4993  FLAG_USER_DATA_STRING = 0x01,
    4994  };
    4995 
    4996 public:
    4997  enum ALLOCATION_TYPE
    4998  {
    4999  ALLOCATION_TYPE_NONE,
    5000  ALLOCATION_TYPE_BLOCK,
    5001  ALLOCATION_TYPE_DEDICATED,
    5002  };
    5003 
    5004  /*
    5005  This struct cannot have constructor or destructor. It must be POD because it is
    5006  allocated using VmaPoolAllocator.
    5007  */
    5008 
    5009  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    5010  {
    5011  m_Alignment = 1;
    5012  m_Size = 0;
    5013  m_pUserData = VMA_NULL;
    5014  m_LastUseFrameIndex = currentFrameIndex;
    5015  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    5016  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    5017  m_MapCount = 0;
    5018  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    5019 
    5020 #if VMA_STATS_STRING_ENABLED
    5021  m_CreationFrameIndex = currentFrameIndex;
    5022  m_BufferImageUsage = 0;
    5023 #endif
    5024  }
    5025 
    5026  void Dtor()
    5027  {
    5028  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    5029 
    5030  // Check if owned string was freed.
    5031  VMA_ASSERT(m_pUserData == VMA_NULL);
    5032  }
    5033 
    5034  void InitBlockAllocation(
    5035  VmaDeviceMemoryBlock* block,
    5036  VkDeviceSize offset,
    5037  VkDeviceSize alignment,
    5038  VkDeviceSize size,
    5039  VmaSuballocationType suballocationType,
    5040  bool mapped,
    5041  bool canBecomeLost)
    5042  {
    5043  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5044  VMA_ASSERT(block != VMA_NULL);
    5045  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5046  m_Alignment = alignment;
    5047  m_Size = size;
    5048  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5049  m_SuballocationType = (uint8_t)suballocationType;
    5050  m_BlockAllocation.m_Block = block;
    5051  m_BlockAllocation.m_Offset = offset;
    5052  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    5053  }
    5054 
    5055  void InitLost()
    5056  {
    5057  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5058  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5059  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5060  m_BlockAllocation.m_Block = VMA_NULL;
    5061  m_BlockAllocation.m_Offset = 0;
    5062  m_BlockAllocation.m_CanBecomeLost = true;
    5063  }
    5064 
    5065  void ChangeBlockAllocation(
    5066  VmaAllocator hAllocator,
    5067  VmaDeviceMemoryBlock* block,
    5068  VkDeviceSize offset);
    5069 
    5070  void ChangeSize(VkDeviceSize newSize);
    5071  void ChangeOffset(VkDeviceSize newOffset);
    5072 
    5073  // pMappedData not null means allocation is created with MAPPED flag.
    5074  void InitDedicatedAllocation(
    5075  uint32_t memoryTypeIndex,
    5076  VkDeviceMemory hMemory,
    5077  VmaSuballocationType suballocationType,
    5078  void* pMappedData,
    5079  VkDeviceSize size)
    5080  {
    5081  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5082  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5083  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5084  m_Alignment = 0;
    5085  m_Size = size;
    5086  m_SuballocationType = (uint8_t)suballocationType;
    5087  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5088  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5089  m_DedicatedAllocation.m_hMemory = hMemory;
    5090  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5091  }
    5092 
    5093  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5094  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5095  VkDeviceSize GetSize() const { return m_Size; }
    5096  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5097  void* GetUserData() const { return m_pUserData; }
    5098  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5099  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5100 
    5101  VmaDeviceMemoryBlock* GetBlock() const
    5102  {
    5103  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5104  return m_BlockAllocation.m_Block;
    5105  }
    5106  VkDeviceSize GetOffset() const;
    5107  VkDeviceMemory GetMemory() const;
    5108  uint32_t GetMemoryTypeIndex() const;
    5109  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5110  void* GetMappedData() const;
    5111  bool CanBecomeLost() const;
    5112 
    5113  uint32_t GetLastUseFrameIndex() const
    5114  {
    5115  return m_LastUseFrameIndex.load();
    5116  }
    5117  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5118  {
    5119  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5120  }
    5121  /*
    5122  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5123  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5124  - Else, returns false.
    5125 
    5126  If hAllocation is already lost, assert - you should not call it then.
    5127  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5128  */
    5129  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5130 
    5131  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5132  {
    5133  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5134  outInfo.blockCount = 1;
    5135  outInfo.allocationCount = 1;
    5136  outInfo.unusedRangeCount = 0;
    5137  outInfo.usedBytes = m_Size;
    5138  outInfo.unusedBytes = 0;
    5139  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5140  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5141  outInfo.unusedRangeSizeMax = 0;
    5142  }
    5143 
    5144  void BlockAllocMap();
    5145  void BlockAllocUnmap();
    5146  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5147  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5148 
    5149 #if VMA_STATS_STRING_ENABLED
    5150  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5151  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5152 
    5153  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5154  {
    5155  VMA_ASSERT(m_BufferImageUsage == 0);
    5156  m_BufferImageUsage = bufferImageUsage;
    5157  }
    5158 
    5159  void PrintParameters(class VmaJsonWriter& json) const;
    5160 #endif
    5161 
    5162 private:
    5163  VkDeviceSize m_Alignment;
    5164  VkDeviceSize m_Size;
    5165  void* m_pUserData;
    5166  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5167  uint8_t m_Type; // ALLOCATION_TYPE
    5168  uint8_t m_SuballocationType; // VmaSuballocationType
    5169  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5170  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5171  uint8_t m_MapCount;
    5172  uint8_t m_Flags; // enum FLAGS
    5173 
    5174  // Allocation out of VmaDeviceMemoryBlock.
    5175  struct BlockAllocation
    5176  {
    5177  VmaDeviceMemoryBlock* m_Block;
    5178  VkDeviceSize m_Offset;
    5179  bool m_CanBecomeLost;
    5180  };
    5181 
    5182  // Allocation for an object that has its own private VkDeviceMemory.
    5183  struct DedicatedAllocation
    5184  {
    5185  uint32_t m_MemoryTypeIndex;
    5186  VkDeviceMemory m_hMemory;
    5187  void* m_pMappedData; // Not null means memory is mapped.
    5188  };
    5189 
    5190  union
    5191  {
    5192  // Allocation out of VmaDeviceMemoryBlock.
    5193  BlockAllocation m_BlockAllocation;
    5194  // Allocation for an object that has its own private VkDeviceMemory.
    5195  DedicatedAllocation m_DedicatedAllocation;
    5196  };
    5197 
    5198 #if VMA_STATS_STRING_ENABLED
    5199  uint32_t m_CreationFrameIndex;
    5200  uint32_t m_BufferImageUsage; // 0 if unknown.
    5201 #endif
    5202 
    5203  void FreeUserDataString(VmaAllocator hAllocator);
    5204 };
    5205 
    5206 /*
    5207 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5208 allocated memory block or free.
    5209 */
    5210 struct VmaSuballocation
    5211 {
    5212  VkDeviceSize offset;
    5213  VkDeviceSize size;
    5214  VmaAllocation hAllocation;
    5215  VmaSuballocationType type;
    5216 };
    5217 
    5218 // Comparator for offsets.
    5219 struct VmaSuballocationOffsetLess
    5220 {
    5221  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5222  {
    5223  return lhs.offset < rhs.offset;
    5224  }
    5225 };
    5226 struct VmaSuballocationOffsetGreater
    5227 {
    5228  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5229  {
    5230  return lhs.offset > rhs.offset;
    5231  }
    5232 };
    5233 
    5234 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5235 
    5236 // Cost of one additional allocation lost, as equivalent in bytes.
    5237 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5238 
    5239 enum class VmaAllocationRequestType
    5240 {
    5241  Normal,
    5242  // Used by "Linear" algorithm.
    5243  UpperAddress,
    5244  EndOf1st,
    5245  EndOf2nd,
    5246 };
    5247 
    5248 /*
    5249 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5250 
    5251 If canMakeOtherLost was false:
    5252 - item points to a FREE suballocation.
    5253 - itemsToMakeLostCount is 0.
    5254 
    5255 If canMakeOtherLost was true:
    5256 - item points to first of sequence of suballocations, which are either FREE,
    5257  or point to VmaAllocations that can become lost.
    5258 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5259  the requested allocation to succeed.
    5260 */
    5261 struct VmaAllocationRequest
    5262 {
    5263  VkDeviceSize offset;
    5264  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5265  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5266  VmaSuballocationList::iterator item;
    5267  size_t itemsToMakeLostCount;
    5268  void* customData;
    5269  VmaAllocationRequestType type;
    5270 
    5271  VkDeviceSize CalcCost() const
    5272  {
    5273  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5274  }
    5275 };
    5276 
    5277 /*
    5278 Data structure used for bookkeeping of allocations and unused ranges of memory
    5279 in a single VkDeviceMemory block.
    5280 */
    5281 class VmaBlockMetadata
    5282 {
    5283 public:
    5284  VmaBlockMetadata(VmaAllocator hAllocator);
    5285  virtual ~VmaBlockMetadata() { }
    5286  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5287 
    5288  // Validates all data structures inside this object. If not valid, returns false.
    5289  virtual bool Validate() const = 0;
    5290  VkDeviceSize GetSize() const { return m_Size; }
    5291  virtual size_t GetAllocationCount() const = 0;
    5292  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5293  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5294  // Returns true if this block is empty - contains only single free suballocation.
    5295  virtual bool IsEmpty() const = 0;
    5296 
    5297  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5298  // Shouldn't modify blockCount.
    5299  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5300 
    5301 #if VMA_STATS_STRING_ENABLED
    5302  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5303 #endif
    5304 
    5305  // Tries to find a place for suballocation with given parameters inside this block.
    5306  // If succeeded, fills pAllocationRequest and returns true.
    5307  // If failed, returns false.
    5308  virtual bool CreateAllocationRequest(
    5309  uint32_t currentFrameIndex,
    5310  uint32_t frameInUseCount,
    5311  VkDeviceSize bufferImageGranularity,
    5312  VkDeviceSize allocSize,
    5313  VkDeviceSize allocAlignment,
    5314  bool upperAddress,
    5315  VmaSuballocationType allocType,
    5316  bool canMakeOtherLost,
    5317  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5318  uint32_t strategy,
    5319  VmaAllocationRequest* pAllocationRequest) = 0;
    5320 
    5321  virtual bool MakeRequestedAllocationsLost(
    5322  uint32_t currentFrameIndex,
    5323  uint32_t frameInUseCount,
    5324  VmaAllocationRequest* pAllocationRequest) = 0;
    5325 
    5326  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5327 
    5328  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5329 
    5330  // Makes actual allocation based on request. Request must already be checked and valid.
    5331  virtual void Alloc(
    5332  const VmaAllocationRequest& request,
    5333  VmaSuballocationType type,
    5334  VkDeviceSize allocSize,
    5335  VmaAllocation hAllocation) = 0;
    5336 
    5337  // Frees suballocation assigned to given memory region.
    5338  virtual void Free(const VmaAllocation allocation) = 0;
    5339  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5340 
    5341  // Tries to resize (grow or shrink) space for given allocation, in place.
    5342  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5343 
    5344 protected:
    5345  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5346 
    5347 #if VMA_STATS_STRING_ENABLED
    5348  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5349  VkDeviceSize unusedBytes,
    5350  size_t allocationCount,
    5351  size_t unusedRangeCount) const;
    5352  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5353  VkDeviceSize offset,
    5354  VmaAllocation hAllocation) const;
    5355  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5356  VkDeviceSize offset,
    5357  VkDeviceSize size) const;
    5358  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5359 #endif
    5360 
    5361 private:
    5362  VkDeviceSize m_Size;
    5363  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5364 };
    5365 
    5366 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5367  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5368  return false; \
    5369  } } while(false)
    5370 
    5371 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5372 {
    5373  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5374 public:
    5375  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5376  virtual ~VmaBlockMetadata_Generic();
    5377  virtual void Init(VkDeviceSize size);
    5378 
    5379  virtual bool Validate() const;
    5380  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5381  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5382  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5383  virtual bool IsEmpty() const;
    5384 
    5385  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5386  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5387 
    5388 #if VMA_STATS_STRING_ENABLED
    5389  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5390 #endif
    5391 
    5392  virtual bool CreateAllocationRequest(
    5393  uint32_t currentFrameIndex,
    5394  uint32_t frameInUseCount,
    5395  VkDeviceSize bufferImageGranularity,
    5396  VkDeviceSize allocSize,
    5397  VkDeviceSize allocAlignment,
    5398  bool upperAddress,
    5399  VmaSuballocationType allocType,
    5400  bool canMakeOtherLost,
    5401  uint32_t strategy,
    5402  VmaAllocationRequest* pAllocationRequest);
    5403 
    5404  virtual bool MakeRequestedAllocationsLost(
    5405  uint32_t currentFrameIndex,
    5406  uint32_t frameInUseCount,
    5407  VmaAllocationRequest* pAllocationRequest);
    5408 
    5409  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5410 
    5411  virtual VkResult CheckCorruption(const void* pBlockData);
    5412 
    5413  virtual void Alloc(
    5414  const VmaAllocationRequest& request,
    5415  VmaSuballocationType type,
    5416  VkDeviceSize allocSize,
    5417  VmaAllocation hAllocation);
    5418 
    5419  virtual void Free(const VmaAllocation allocation);
    5420  virtual void FreeAtOffset(VkDeviceSize offset);
    5421 
    5422  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5423 
    5425  // For defragmentation
    5426 
    5427  bool IsBufferImageGranularityConflictPossible(
    5428  VkDeviceSize bufferImageGranularity,
    5429  VmaSuballocationType& inOutPrevSuballocType) const;
    5430 
    5431 private:
    5432  friend class VmaDefragmentationAlgorithm_Generic;
    5433  friend class VmaDefragmentationAlgorithm_Fast;
    5434 
    5435  uint32_t m_FreeCount;
    5436  VkDeviceSize m_SumFreeSize;
    5437  VmaSuballocationList m_Suballocations;
    5438  // Suballocations that are free and have size greater than certain threshold.
    5439  // Sorted by size, ascending.
    5440  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5441 
    5442  bool ValidateFreeSuballocationList() const;
    5443 
    5444  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5445  // If yes, fills pOffset and returns true. If no, returns false.
    5446  bool CheckAllocation(
    5447  uint32_t currentFrameIndex,
    5448  uint32_t frameInUseCount,
    5449  VkDeviceSize bufferImageGranularity,
    5450  VkDeviceSize allocSize,
    5451  VkDeviceSize allocAlignment,
    5452  VmaSuballocationType allocType,
    5453  VmaSuballocationList::const_iterator suballocItem,
    5454  bool canMakeOtherLost,
    5455  VkDeviceSize* pOffset,
    5456  size_t* itemsToMakeLostCount,
    5457  VkDeviceSize* pSumFreeSize,
    5458  VkDeviceSize* pSumItemSize) const;
    5459  // Given free suballocation, it merges it with following one, which must also be free.
    5460  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5461  // Releases given suballocation, making it free.
    5462  // Merges it with adjacent free suballocations if applicable.
    5463  // Returns iterator to new free suballocation at this place.
    5464  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5465  // Given free suballocation, it inserts it into sorted list of
    5466  // m_FreeSuballocationsBySize if it's suitable.
    5467  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5468  // Given free suballocation, it removes it from sorted list of
    5469  // m_FreeSuballocationsBySize if it's suitable.
    5470  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5471 };
    5472 
    5473 /*
    5474 Allocations and their references in internal data structure look like this:
    5475 
    5476 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5477 
    5478  0 +-------+
    5479  | |
    5480  | |
    5481  | |
    5482  +-------+
    5483  | Alloc | 1st[m_1stNullItemsBeginCount]
    5484  +-------+
    5485  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5486  +-------+
    5487  | ... |
    5488  +-------+
    5489  | Alloc | 1st[1st.size() - 1]
    5490  +-------+
    5491  | |
    5492  | |
    5493  | |
    5494 GetSize() +-------+
    5495 
    5496 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5497 
    5498  0 +-------+
    5499  | Alloc | 2nd[0]
    5500  +-------+
    5501  | Alloc | 2nd[1]
    5502  +-------+
    5503  | ... |
    5504  +-------+
    5505  | Alloc | 2nd[2nd.size() - 1]
    5506  +-------+
    5507  | |
    5508  | |
    5509  | |
    5510  +-------+
    5511  | Alloc | 1st[m_1stNullItemsBeginCount]
    5512  +-------+
    5513  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5514  +-------+
    5515  | ... |
    5516  +-------+
    5517  | Alloc | 1st[1st.size() - 1]
    5518  +-------+
    5519  | |
    5520 GetSize() +-------+
    5521 
    5522 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5523 
    5524  0 +-------+
    5525  | |
    5526  | |
    5527  | |
    5528  +-------+
    5529  | Alloc | 1st[m_1stNullItemsBeginCount]
    5530  +-------+
    5531  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5532  +-------+
    5533  | ... |
    5534  +-------+
    5535  | Alloc | 1st[1st.size() - 1]
    5536  +-------+
    5537  | |
    5538  | |
    5539  | |
    5540  +-------+
    5541  | Alloc | 2nd[2nd.size() - 1]
    5542  +-------+
    5543  | ... |
    5544  +-------+
    5545  | Alloc | 2nd[1]
    5546  +-------+
    5547  | Alloc | 2nd[0]
    5548 GetSize() +-------+
    5549 
    5550 */
    5551 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5552 {
    5553  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5554 public:
    5555  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5556  virtual ~VmaBlockMetadata_Linear();
    5557  virtual void Init(VkDeviceSize size);
    5558 
    5559  virtual bool Validate() const;
    5560  virtual size_t GetAllocationCount() const;
    5561  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5562  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5563  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5564 
    5565  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5566  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5567 
    5568 #if VMA_STATS_STRING_ENABLED
    5569  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5570 #endif
    5571 
    5572  virtual bool CreateAllocationRequest(
    5573  uint32_t currentFrameIndex,
    5574  uint32_t frameInUseCount,
    5575  VkDeviceSize bufferImageGranularity,
    5576  VkDeviceSize allocSize,
    5577  VkDeviceSize allocAlignment,
    5578  bool upperAddress,
    5579  VmaSuballocationType allocType,
    5580  bool canMakeOtherLost,
    5581  uint32_t strategy,
    5582  VmaAllocationRequest* pAllocationRequest);
    5583 
    5584  virtual bool MakeRequestedAllocationsLost(
    5585  uint32_t currentFrameIndex,
    5586  uint32_t frameInUseCount,
    5587  VmaAllocationRequest* pAllocationRequest);
    5588 
    5589  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5590 
    5591  virtual VkResult CheckCorruption(const void* pBlockData);
    5592 
    5593  virtual void Alloc(
    5594  const VmaAllocationRequest& request,
    5595  VmaSuballocationType type,
    5596  VkDeviceSize allocSize,
    5597  VmaAllocation hAllocation);
    5598 
    5599  virtual void Free(const VmaAllocation allocation);
    5600  virtual void FreeAtOffset(VkDeviceSize offset);
    5601 
    5602 private:
    5603  /*
    5604  There are two suballocation vectors, used in ping-pong way.
    5605  The one with index m_1stVectorIndex is called 1st.
    5606  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5607  2nd can be non-empty only when 1st is not empty.
    5608  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5609  */
    5610  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5611 
    5612  enum SECOND_VECTOR_MODE
    5613  {
    5614  SECOND_VECTOR_EMPTY,
    5615  /*
    5616  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5617  all have smaller offset.
    5618  */
    5619  SECOND_VECTOR_RING_BUFFER,
    5620  /*
    5621  Suballocations in 2nd vector are upper side of double stack.
    5622  They all have offsets higher than those in 1st vector.
    5623  Top of this stack means smaller offsets, but higher indices in this vector.
    5624  */
    5625  SECOND_VECTOR_DOUBLE_STACK,
    5626  };
    5627 
    5628  VkDeviceSize m_SumFreeSize;
    5629  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5630  uint32_t m_1stVectorIndex;
    5631  SECOND_VECTOR_MODE m_2ndVectorMode;
    5632 
    5633  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5634  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5635  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5636  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5637 
    5638  // Number of items in 1st vector with hAllocation = null at the beginning.
    5639  size_t m_1stNullItemsBeginCount;
    5640  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5641  size_t m_1stNullItemsMiddleCount;
    5642  // Number of items in 2nd vector with hAllocation = null.
    5643  size_t m_2ndNullItemsCount;
    5644 
    5645  bool ShouldCompact1st() const;
    5646  void CleanupAfterFree();
    5647 
    5648  bool CreateAllocationRequest_LowerAddress(
    5649  uint32_t currentFrameIndex,
    5650  uint32_t frameInUseCount,
    5651  VkDeviceSize bufferImageGranularity,
    5652  VkDeviceSize allocSize,
    5653  VkDeviceSize allocAlignment,
    5654  VmaSuballocationType allocType,
    5655  bool canMakeOtherLost,
    5656  uint32_t strategy,
    5657  VmaAllocationRequest* pAllocationRequest);
    5658  bool CreateAllocationRequest_UpperAddress(
    5659  uint32_t currentFrameIndex,
    5660  uint32_t frameInUseCount,
    5661  VkDeviceSize bufferImageGranularity,
    5662  VkDeviceSize allocSize,
    5663  VkDeviceSize allocAlignment,
    5664  VmaSuballocationType allocType,
    5665  bool canMakeOtherLost,
    5666  uint32_t strategy,
    5667  VmaAllocationRequest* pAllocationRequest);
    5668 };
    5669 
    5670 /*
    5671 - GetSize() is the original size of allocated memory block.
    5672 - m_UsableSize is this size aligned down to a power of two.
    5673  All allocations and calculations happen relative to m_UsableSize.
    5674 - GetUnusableSize() is the difference between them.
    5675  It is repoted as separate, unused range, not available for allocations.
    5676 
    5677 Node at level 0 has size = m_UsableSize.
    5678 Each next level contains nodes with size 2 times smaller than current level.
    5679 m_LevelCount is the maximum number of levels to use in the current object.
    5680 */
    5681 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5682 {
    5683  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5684 public:
    5685  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5686  virtual ~VmaBlockMetadata_Buddy();
    5687  virtual void Init(VkDeviceSize size);
    5688 
    5689  virtual bool Validate() const;
    5690  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5691  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5692  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5693  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5694 
    5695  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5696  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5697 
    5698 #if VMA_STATS_STRING_ENABLED
    5699  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5700 #endif
    5701 
    5702  virtual bool CreateAllocationRequest(
    5703  uint32_t currentFrameIndex,
    5704  uint32_t frameInUseCount,
    5705  VkDeviceSize bufferImageGranularity,
    5706  VkDeviceSize allocSize,
    5707  VkDeviceSize allocAlignment,
    5708  bool upperAddress,
    5709  VmaSuballocationType allocType,
    5710  bool canMakeOtherLost,
    5711  uint32_t strategy,
    5712  VmaAllocationRequest* pAllocationRequest);
    5713 
    5714  virtual bool MakeRequestedAllocationsLost(
    5715  uint32_t currentFrameIndex,
    5716  uint32_t frameInUseCount,
    5717  VmaAllocationRequest* pAllocationRequest);
    5718 
    5719  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5720 
    5721  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5722 
    5723  virtual void Alloc(
    5724  const VmaAllocationRequest& request,
    5725  VmaSuballocationType type,
    5726  VkDeviceSize allocSize,
    5727  VmaAllocation hAllocation);
    5728 
    5729  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5730  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5731 
    5732 private:
    5733  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5734  static const size_t MAX_LEVELS = 30;
    5735 
    5736  struct ValidationContext
    5737  {
    5738  size_t calculatedAllocationCount;
    5739  size_t calculatedFreeCount;
    5740  VkDeviceSize calculatedSumFreeSize;
    5741 
    5742  ValidationContext() :
    5743  calculatedAllocationCount(0),
    5744  calculatedFreeCount(0),
    5745  calculatedSumFreeSize(0) { }
    5746  };
    5747 
    5748  struct Node
    5749  {
    5750  VkDeviceSize offset;
    5751  enum TYPE
    5752  {
    5753  TYPE_FREE,
    5754  TYPE_ALLOCATION,
    5755  TYPE_SPLIT,
    5756  TYPE_COUNT
    5757  } type;
    5758  Node* parent;
    5759  Node* buddy;
    5760 
    5761  union
    5762  {
    5763  struct
    5764  {
    5765  Node* prev;
    5766  Node* next;
    5767  } free;
    5768  struct
    5769  {
    5770  VmaAllocation alloc;
    5771  } allocation;
    5772  struct
    5773  {
    5774  Node* leftChild;
    5775  } split;
    5776  };
    5777  };
    5778 
    5779  // Size of the memory block aligned down to a power of two.
    5780  VkDeviceSize m_UsableSize;
    5781  uint32_t m_LevelCount;
    5782 
    5783  Node* m_Root;
    5784  struct {
    5785  Node* front;
    5786  Node* back;
    5787  } m_FreeList[MAX_LEVELS];
    5788  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5789  size_t m_AllocationCount;
    5790  // Number of nodes in the tree with type == TYPE_FREE.
    5791  size_t m_FreeCount;
    5792  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5793  VkDeviceSize m_SumFreeSize;
    5794 
    5795  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5796  void DeleteNode(Node* node);
    5797  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5798  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5799  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5800  // Alloc passed just for validation. Can be null.
    5801  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5802  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5803  // Adds node to the front of FreeList at given level.
    5804  // node->type must be FREE.
    5805  // node->free.prev, next can be undefined.
    5806  void AddToFreeListFront(uint32_t level, Node* node);
    5807  // Removes node from FreeList at given level.
    5808  // node->type must be FREE.
    5809  // node->free.prev, next stay untouched.
    5810  void RemoveFromFreeList(uint32_t level, Node* node);
    5811 
    5812 #if VMA_STATS_STRING_ENABLED
    5813  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5814 #endif
    5815 };
    5816 
    5817 /*
    5818 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5819 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5820 
    5821 Thread-safety: This class must be externally synchronized.
    5822 */
    5823 class VmaDeviceMemoryBlock
    5824 {
    5825  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5826 public:
    5827  VmaBlockMetadata* m_pMetadata;
    5828 
    5829  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5830 
    5831  ~VmaDeviceMemoryBlock()
    5832  {
    5833  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5834  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5835  }
    5836 
    5837  // Always call after construction.
    5838  void Init(
    5839  VmaAllocator hAllocator,
    5840  VmaPool hParentPool,
    5841  uint32_t newMemoryTypeIndex,
    5842  VkDeviceMemory newMemory,
    5843  VkDeviceSize newSize,
    5844  uint32_t id,
    5845  uint32_t algorithm);
    5846  // Always call before destruction.
    5847  void Destroy(VmaAllocator allocator);
    5848 
    5849  VmaPool GetParentPool() const { return m_hParentPool; }
    5850  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5851  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5852  uint32_t GetId() const { return m_Id; }
    5853  void* GetMappedData() const { return m_pMappedData; }
    5854 
    5855  // Validates all data structures inside this object. If not valid, returns false.
    5856  bool Validate() const;
    5857 
    5858  VkResult CheckCorruption(VmaAllocator hAllocator);
    5859 
    5860  // ppData can be null.
    5861  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5862  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5863 
    5864  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5865  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5866 
    5867  VkResult BindBufferMemory(
    5868  const VmaAllocator hAllocator,
    5869  const VmaAllocation hAllocation,
    5870  VkBuffer hBuffer);
    5871  VkResult BindImageMemory(
    5872  const VmaAllocator hAllocator,
    5873  const VmaAllocation hAllocation,
    5874  VkImage hImage);
    5875 
    5876 private:
    5877  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5878  uint32_t m_MemoryTypeIndex;
    5879  uint32_t m_Id;
    5880  VkDeviceMemory m_hMemory;
    5881 
    5882  /*
    5883  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5884  Also protects m_MapCount, m_pMappedData.
    5885  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5886  */
    5887  VMA_MUTEX m_Mutex;
    5888  uint32_t m_MapCount;
    5889  void* m_pMappedData;
    5890 };
    5891 
    5892 struct VmaPointerLess
    5893 {
    5894  bool operator()(const void* lhs, const void* rhs) const
    5895  {
    5896  return lhs < rhs;
    5897  }
    5898 };
    5899 
    5900 struct VmaDefragmentationMove
    5901 {
    5902  size_t srcBlockIndex;
    5903  size_t dstBlockIndex;
    5904  VkDeviceSize srcOffset;
    5905  VkDeviceSize dstOffset;
    5906  VkDeviceSize size;
    5907 };
    5908 
    5909 class VmaDefragmentationAlgorithm;
    5910 
    5911 /*
    5912 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5913 Vulkan memory type.
    5914 
    5915 Synchronized internally with a mutex.
    5916 */
    5917 struct VmaBlockVector
    5918 {
    5919  VMA_CLASS_NO_COPY(VmaBlockVector)
    5920 public:
    5921  VmaBlockVector(
    5922  VmaAllocator hAllocator,
    5923  VmaPool hParentPool,
    5924  uint32_t memoryTypeIndex,
    5925  VkDeviceSize preferredBlockSize,
    5926  size_t minBlockCount,
    5927  size_t maxBlockCount,
    5928  VkDeviceSize bufferImageGranularity,
    5929  uint32_t frameInUseCount,
    5930  bool isCustomPool,
    5931  bool explicitBlockSize,
    5932  uint32_t algorithm);
    5933  ~VmaBlockVector();
    5934 
    5935  VkResult CreateMinBlocks();
    5936 
    5937  VmaPool GetParentPool() const { return m_hParentPool; }
    5938  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5939  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5940  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5941  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5942  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5943 
    5944  void GetPoolStats(VmaPoolStats* pStats);
    5945 
    5946  bool IsEmpty() const { return m_Blocks.empty(); }
    5947  bool IsCorruptionDetectionEnabled() const;
    5948 
    5949  VkResult Allocate(
    5950  uint32_t currentFrameIndex,
    5951  VkDeviceSize size,
    5952  VkDeviceSize alignment,
    5953  const VmaAllocationCreateInfo& createInfo,
    5954  VmaSuballocationType suballocType,
    5955  size_t allocationCount,
    5956  VmaAllocation* pAllocations);
    5957 
    5958  void Free(
    5959  VmaAllocation hAllocation);
    5960 
    5961  // Adds statistics of this BlockVector to pStats.
    5962  void AddStats(VmaStats* pStats);
    5963 
    5964 #if VMA_STATS_STRING_ENABLED
    5965  void PrintDetailedMap(class VmaJsonWriter& json);
    5966 #endif
    5967 
    5968  void MakePoolAllocationsLost(
    5969  uint32_t currentFrameIndex,
    5970  size_t* pLostAllocationCount);
    5971  VkResult CheckCorruption();
    5972 
    5973  // Saves results in pCtx->res.
    5974  void Defragment(
    5975  class VmaBlockVectorDefragmentationContext* pCtx,
    5976  VmaDefragmentationStats* pStats,
    5977  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5978  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5979  VkCommandBuffer commandBuffer);
    5980  void DefragmentationEnd(
    5981  class VmaBlockVectorDefragmentationContext* pCtx,
    5982  VmaDefragmentationStats* pStats);
    5983 
    5985  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5986 
    5987  size_t GetBlockCount() const { return m_Blocks.size(); }
    5988  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5989  size_t CalcAllocationCount() const;
    5990  bool IsBufferImageGranularityConflictPossible() const;
    5991 
    5992 private:
    5993  friend class VmaDefragmentationAlgorithm_Generic;
    5994 
    5995  const VmaAllocator m_hAllocator;
    5996  const VmaPool m_hParentPool;
    5997  const uint32_t m_MemoryTypeIndex;
    5998  const VkDeviceSize m_PreferredBlockSize;
    5999  const size_t m_MinBlockCount;
    6000  const size_t m_MaxBlockCount;
    6001  const VkDeviceSize m_BufferImageGranularity;
    6002  const uint32_t m_FrameInUseCount;
    6003  const bool m_IsCustomPool;
    6004  const bool m_ExplicitBlockSize;
    6005  const uint32_t m_Algorithm;
    6006  /* There can be at most one allocation that is completely empty - a
    6007  hysteresis to avoid pessimistic case of alternating creation and destruction
    6008  of a VkDeviceMemory. */
    6009  bool m_HasEmptyBlock;
    6010  VMA_RW_MUTEX m_Mutex;
    6011  // Incrementally sorted by sumFreeSize, ascending.
    6012  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    6013  uint32_t m_NextBlockId;
    6014 
    6015  VkDeviceSize CalcMaxBlockSize() const;
    6016 
    6017  // Finds and removes given block from vector.
    6018  void Remove(VmaDeviceMemoryBlock* pBlock);
    6019 
    6020  // Performs single step in sorting m_Blocks. They may not be fully sorted
    6021  // after this call.
    6022  void IncrementallySortBlocks();
    6023 
    6024  VkResult AllocatePage(
    6025  uint32_t currentFrameIndex,
    6026  VkDeviceSize size,
    6027  VkDeviceSize alignment,
    6028  const VmaAllocationCreateInfo& createInfo,
    6029  VmaSuballocationType suballocType,
    6030  VmaAllocation* pAllocation);
    6031 
    6032  // To be used only without CAN_MAKE_OTHER_LOST flag.
    6033  VkResult AllocateFromBlock(
    6034  VmaDeviceMemoryBlock* pBlock,
    6035  uint32_t currentFrameIndex,
    6036  VkDeviceSize size,
    6037  VkDeviceSize alignment,
    6038  VmaAllocationCreateFlags allocFlags,
    6039  void* pUserData,
    6040  VmaSuballocationType suballocType,
    6041  uint32_t strategy,
    6042  VmaAllocation* pAllocation);
    6043 
    6044  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    6045 
    6046  // Saves result to pCtx->res.
    6047  void ApplyDefragmentationMovesCpu(
    6048  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6049  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    6050  // Saves result to pCtx->res.
    6051  void ApplyDefragmentationMovesGpu(
    6052  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    6053  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6054  VkCommandBuffer commandBuffer);
    6055 
    6056  /*
    6057  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6058  - updated with new data.
    6059  */
    6060  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6061 };
    6062 
    6063 struct VmaPool_T
    6064 {
    6065  VMA_CLASS_NO_COPY(VmaPool_T)
    6066 public:
    6067  VmaBlockVector m_BlockVector;
    6068 
    6069  VmaPool_T(
    6070  VmaAllocator hAllocator,
    6071  const VmaPoolCreateInfo& createInfo,
    6072  VkDeviceSize preferredBlockSize);
    6073  ~VmaPool_T();
    6074 
    6075  uint32_t GetId() const { return m_Id; }
    6076  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6077 
    6078 #if VMA_STATS_STRING_ENABLED
    6079  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6080 #endif
    6081 
    6082 private:
    6083  uint32_t m_Id;
    6084 };
    6085 
    6086 /*
    6087 Performs defragmentation:
    6088 
    6089 - Updates `pBlockVector->m_pMetadata`.
    6090 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6091 - Does not move actual data, only returns requested moves as `moves`.
    6092 */
    6093 class VmaDefragmentationAlgorithm
    6094 {
    6095  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6096 public:
    6097  VmaDefragmentationAlgorithm(
    6098  VmaAllocator hAllocator,
    6099  VmaBlockVector* pBlockVector,
    6100  uint32_t currentFrameIndex) :
    6101  m_hAllocator(hAllocator),
    6102  m_pBlockVector(pBlockVector),
    6103  m_CurrentFrameIndex(currentFrameIndex)
    6104  {
    6105  }
    6106  virtual ~VmaDefragmentationAlgorithm()
    6107  {
    6108  }
    6109 
    6110  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6111  virtual void AddAll() = 0;
    6112 
    6113  virtual VkResult Defragment(
    6114  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6115  VkDeviceSize maxBytesToMove,
    6116  uint32_t maxAllocationsToMove) = 0;
    6117 
    6118  virtual VkDeviceSize GetBytesMoved() const = 0;
    6119  virtual uint32_t GetAllocationsMoved() const = 0;
    6120 
    6121 protected:
    6122  VmaAllocator const m_hAllocator;
    6123  VmaBlockVector* const m_pBlockVector;
    6124  const uint32_t m_CurrentFrameIndex;
    6125 
    6126  struct AllocationInfo
    6127  {
    6128  VmaAllocation m_hAllocation;
    6129  VkBool32* m_pChanged;
    6130 
    6131  AllocationInfo() :
    6132  m_hAllocation(VK_NULL_HANDLE),
    6133  m_pChanged(VMA_NULL)
    6134  {
    6135  }
    6136  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6137  m_hAllocation(hAlloc),
    6138  m_pChanged(pChanged)
    6139  {
    6140  }
    6141  };
    6142 };
    6143 
    6144 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6145 {
    6146  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6147 public:
    6148  VmaDefragmentationAlgorithm_Generic(
    6149  VmaAllocator hAllocator,
    6150  VmaBlockVector* pBlockVector,
    6151  uint32_t currentFrameIndex,
    6152  bool overlappingMoveSupported);
    6153  virtual ~VmaDefragmentationAlgorithm_Generic();
    6154 
    6155  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6156  virtual void AddAll() { m_AllAllocations = true; }
    6157 
    6158  virtual VkResult Defragment(
    6159  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6160  VkDeviceSize maxBytesToMove,
    6161  uint32_t maxAllocationsToMove);
    6162 
    6163  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6164  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6165 
    6166 private:
    6167  uint32_t m_AllocationCount;
    6168  bool m_AllAllocations;
    6169 
    6170  VkDeviceSize m_BytesMoved;
    6171  uint32_t m_AllocationsMoved;
    6172 
    6173  struct AllocationInfoSizeGreater
    6174  {
    6175  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6176  {
    6177  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6178  }
    6179  };
    6180 
    6181  struct AllocationInfoOffsetGreater
    6182  {
    6183  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6184  {
    6185  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6186  }
    6187  };
    6188 
    6189  struct BlockInfo
    6190  {
    6191  size_t m_OriginalBlockIndex;
    6192  VmaDeviceMemoryBlock* m_pBlock;
    6193  bool m_HasNonMovableAllocations;
    6194  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6195 
    6196  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6197  m_OriginalBlockIndex(SIZE_MAX),
    6198  m_pBlock(VMA_NULL),
    6199  m_HasNonMovableAllocations(true),
    6200  m_Allocations(pAllocationCallbacks)
    6201  {
    6202  }
    6203 
    6204  void CalcHasNonMovableAllocations()
    6205  {
    6206  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6207  const size_t defragmentAllocCount = m_Allocations.size();
    6208  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6209  }
    6210 
    6211  void SortAllocationsBySizeDescending()
    6212  {
    6213  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6214  }
    6215 
    6216  void SortAllocationsByOffsetDescending()
    6217  {
    6218  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6219  }
    6220  };
    6221 
    6222  struct BlockPointerLess
    6223  {
    6224  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6225  {
    6226  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6227  }
    6228  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6229  {
    6230  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6231  }
    6232  };
    6233 
    6234  // 1. Blocks with some non-movable allocations go first.
    6235  // 2. Blocks with smaller sumFreeSize go first.
    6236  struct BlockInfoCompareMoveDestination
    6237  {
    6238  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6239  {
    6240  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6241  {
    6242  return true;
    6243  }
    6244  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6245  {
    6246  return false;
    6247  }
    6248  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6249  {
    6250  return true;
    6251  }
    6252  return false;
    6253  }
    6254  };
    6255 
    6256  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6257  BlockInfoVector m_Blocks;
    6258 
    6259  VkResult DefragmentRound(
    6260  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6261  VkDeviceSize maxBytesToMove,
    6262  uint32_t maxAllocationsToMove);
    6263 
    6264  size_t CalcBlocksWithNonMovableCount() const;
    6265 
    6266  static bool MoveMakesSense(
    6267  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6268  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6269 };
    6270 
    6271 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6272 {
    6273  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6274 public:
    6275  VmaDefragmentationAlgorithm_Fast(
    6276  VmaAllocator hAllocator,
    6277  VmaBlockVector* pBlockVector,
    6278  uint32_t currentFrameIndex,
    6279  bool overlappingMoveSupported);
    6280  virtual ~VmaDefragmentationAlgorithm_Fast();
    6281 
    6282  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6283  virtual void AddAll() { m_AllAllocations = true; }
    6284 
    6285  virtual VkResult Defragment(
    6286  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6287  VkDeviceSize maxBytesToMove,
    6288  uint32_t maxAllocationsToMove);
    6289 
    6290  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6291  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6292 
    6293 private:
    6294  struct BlockInfo
    6295  {
    6296  size_t origBlockIndex;
    6297  };
    6298 
    6299  class FreeSpaceDatabase
    6300  {
    6301  public:
    6302  FreeSpaceDatabase()
    6303  {
    6304  FreeSpace s = {};
    6305  s.blockInfoIndex = SIZE_MAX;
    6306  for(size_t i = 0; i < MAX_COUNT; ++i)
    6307  {
    6308  m_FreeSpaces[i] = s;
    6309  }
    6310  }
    6311 
    6312  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6313  {
    6314  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6315  {
    6316  return;
    6317  }
    6318 
    6319  // Find first invalid or the smallest structure.
    6320  size_t bestIndex = SIZE_MAX;
    6321  for(size_t i = 0; i < MAX_COUNT; ++i)
    6322  {
    6323  // Empty structure.
    6324  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6325  {
    6326  bestIndex = i;
    6327  break;
    6328  }
    6329  if(m_FreeSpaces[i].size < size &&
    6330  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6331  {
    6332  bestIndex = i;
    6333  }
    6334  }
    6335 
    6336  if(bestIndex != SIZE_MAX)
    6337  {
    6338  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6339  m_FreeSpaces[bestIndex].offset = offset;
    6340  m_FreeSpaces[bestIndex].size = size;
    6341  }
    6342  }
    6343 
    6344  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6345  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6346  {
    6347  size_t bestIndex = SIZE_MAX;
    6348  VkDeviceSize bestFreeSpaceAfter = 0;
    6349  for(size_t i = 0; i < MAX_COUNT; ++i)
    6350  {
    6351  // Structure is valid.
    6352  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6353  {
    6354  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6355  // Allocation fits into this structure.
    6356  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6357  {
    6358  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6359  (dstOffset + size);
    6360  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6361  {
    6362  bestIndex = i;
    6363  bestFreeSpaceAfter = freeSpaceAfter;
    6364  }
    6365  }
    6366  }
    6367  }
    6368 
    6369  if(bestIndex != SIZE_MAX)
    6370  {
    6371  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6372  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6373 
    6374  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6375  {
    6376  // Leave this structure for remaining empty space.
    6377  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6378  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6379  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6380  }
    6381  else
    6382  {
    6383  // This structure becomes invalid.
    6384  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6385  }
    6386 
    6387  return true;
    6388  }
    6389 
    6390  return false;
    6391  }
    6392 
    6393  private:
    6394  static const size_t MAX_COUNT = 4;
    6395 
    6396  struct FreeSpace
    6397  {
    6398  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6399  VkDeviceSize offset;
    6400  VkDeviceSize size;
    6401  } m_FreeSpaces[MAX_COUNT];
    6402  };
    6403 
    6404  const bool m_OverlappingMoveSupported;
    6405 
    6406  uint32_t m_AllocationCount;
    6407  bool m_AllAllocations;
    6408 
    6409  VkDeviceSize m_BytesMoved;
    6410  uint32_t m_AllocationsMoved;
    6411 
    6412  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6413 
    6414  void PreprocessMetadata();
    6415  void PostprocessMetadata();
    6416  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6417 };
    6418 
    6419 struct VmaBlockDefragmentationContext
    6420 {
    6421  enum BLOCK_FLAG
    6422  {
    6423  BLOCK_FLAG_USED = 0x00000001,
    6424  };
    6425  uint32_t flags;
    6426  VkBuffer hBuffer;
    6427 };
    6428 
    6429 class VmaBlockVectorDefragmentationContext
    6430 {
    6431  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6432 public:
    6433  VkResult res;
    6434  bool mutexLocked;
    6435  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6436 
    6437  VmaBlockVectorDefragmentationContext(
    6438  VmaAllocator hAllocator,
    6439  VmaPool hCustomPool, // Optional.
    6440  VmaBlockVector* pBlockVector,
    6441  uint32_t currFrameIndex);
    6442  ~VmaBlockVectorDefragmentationContext();
    6443 
    6444  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6445  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6446  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6447 
    6448  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6449  void AddAll() { m_AllAllocations = true; }
    6450 
    6451  void Begin(bool overlappingMoveSupported);
    6452 
    6453 private:
    6454  const VmaAllocator m_hAllocator;
    6455  // Null if not from custom pool.
    6456  const VmaPool m_hCustomPool;
    6457  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6458  VmaBlockVector* const m_pBlockVector;
    6459  const uint32_t m_CurrFrameIndex;
    6460  // Owner of this object.
    6461  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6462 
    6463  struct AllocInfo
    6464  {
    6465  VmaAllocation hAlloc;
    6466  VkBool32* pChanged;
    6467  };
    6468  // Used between constructor and Begin.
    6469  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6470  bool m_AllAllocations;
    6471 };
    6472 
    6473 struct VmaDefragmentationContext_T
    6474 {
    6475 private:
    6476  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6477 public:
    6478  VmaDefragmentationContext_T(
    6479  VmaAllocator hAllocator,
    6480  uint32_t currFrameIndex,
    6481  uint32_t flags,
    6482  VmaDefragmentationStats* pStats);
    6483  ~VmaDefragmentationContext_T();
    6484 
    6485  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6486  void AddAllocations(
    6487  uint32_t allocationCount,
    6488  VmaAllocation* pAllocations,
    6489  VkBool32* pAllocationsChanged);
    6490 
    6491  /*
    6492  Returns:
    6493  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6494  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6495  - Negative value if error occured and object can be destroyed immediately.
    6496  */
    6497  VkResult Defragment(
    6498  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6499  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6500  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6501 
    6502 private:
    6503  const VmaAllocator m_hAllocator;
    6504  const uint32_t m_CurrFrameIndex;
    6505  const uint32_t m_Flags;
    6506  VmaDefragmentationStats* const m_pStats;
    6507  // Owner of these objects.
    6508  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6509  // Owner of these objects.
    6510  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6511 };
    6512 
    6513 #if VMA_RECORDING_ENABLED
    6514 
    6515 class VmaRecorder
    6516 {
    6517 public:
    6518  VmaRecorder();
    6519  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6520  void WriteConfiguration(
    6521  const VkPhysicalDeviceProperties& devProps,
    6522  const VkPhysicalDeviceMemoryProperties& memProps,
    6523  bool dedicatedAllocationExtensionEnabled);
    6524  ~VmaRecorder();
    6525 
    6526  void RecordCreateAllocator(uint32_t frameIndex);
    6527  void RecordDestroyAllocator(uint32_t frameIndex);
    6528  void RecordCreatePool(uint32_t frameIndex,
    6529  const VmaPoolCreateInfo& createInfo,
    6530  VmaPool pool);
    6531  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6532  void RecordAllocateMemory(uint32_t frameIndex,
    6533  const VkMemoryRequirements& vkMemReq,
    6534  const VmaAllocationCreateInfo& createInfo,
    6535  VmaAllocation allocation);
    6536  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6537  const VkMemoryRequirements& vkMemReq,
    6538  const VmaAllocationCreateInfo& createInfo,
    6539  uint64_t allocationCount,
    6540  const VmaAllocation* pAllocations);
    6541  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6542  const VkMemoryRequirements& vkMemReq,
    6543  bool requiresDedicatedAllocation,
    6544  bool prefersDedicatedAllocation,
    6545  const VmaAllocationCreateInfo& createInfo,
    6546  VmaAllocation allocation);
    6547  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6548  const VkMemoryRequirements& vkMemReq,
    6549  bool requiresDedicatedAllocation,
    6550  bool prefersDedicatedAllocation,
    6551  const VmaAllocationCreateInfo& createInfo,
    6552  VmaAllocation allocation);
    6553  void RecordFreeMemory(uint32_t frameIndex,
    6554  VmaAllocation allocation);
    6555  void RecordFreeMemoryPages(uint32_t frameIndex,
    6556  uint64_t allocationCount,
    6557  const VmaAllocation* pAllocations);
    6558  void RecordResizeAllocation(
    6559  uint32_t frameIndex,
    6560  VmaAllocation allocation,
    6561  VkDeviceSize newSize);
    6562  void RecordSetAllocationUserData(uint32_t frameIndex,
    6563  VmaAllocation allocation,
    6564  const void* pUserData);
    6565  void RecordCreateLostAllocation(uint32_t frameIndex,
    6566  VmaAllocation allocation);
    6567  void RecordMapMemory(uint32_t frameIndex,
    6568  VmaAllocation allocation);
    6569  void RecordUnmapMemory(uint32_t frameIndex,
    6570  VmaAllocation allocation);
    6571  void RecordFlushAllocation(uint32_t frameIndex,
    6572  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6573  void RecordInvalidateAllocation(uint32_t frameIndex,
    6574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6575  void RecordCreateBuffer(uint32_t frameIndex,
    6576  const VkBufferCreateInfo& bufCreateInfo,
    6577  const VmaAllocationCreateInfo& allocCreateInfo,
    6578  VmaAllocation allocation);
    6579  void RecordCreateImage(uint32_t frameIndex,
    6580  const VkImageCreateInfo& imageCreateInfo,
    6581  const VmaAllocationCreateInfo& allocCreateInfo,
    6582  VmaAllocation allocation);
    6583  void RecordDestroyBuffer(uint32_t frameIndex,
    6584  VmaAllocation allocation);
    6585  void RecordDestroyImage(uint32_t frameIndex,
    6586  VmaAllocation allocation);
    6587  void RecordTouchAllocation(uint32_t frameIndex,
    6588  VmaAllocation allocation);
    6589  void RecordGetAllocationInfo(uint32_t frameIndex,
    6590  VmaAllocation allocation);
    6591  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6592  VmaPool pool);
    6593  void RecordDefragmentationBegin(uint32_t frameIndex,
    6594  const VmaDefragmentationInfo2& info,
    6596  void RecordDefragmentationEnd(uint32_t frameIndex,
    6598 
    6599 private:
    6600  struct CallParams
    6601  {
    6602  uint32_t threadId;
    6603  double time;
    6604  };
    6605 
    6606  class UserDataString
    6607  {
    6608  public:
    6609  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6610  const char* GetString() const { return m_Str; }
    6611 
    6612  private:
    6613  char m_PtrStr[17];
    6614  const char* m_Str;
    6615  };
    6616 
    6617  bool m_UseMutex;
    6618  VmaRecordFlags m_Flags;
    6619  FILE* m_File;
    6620  VMA_MUTEX m_FileMutex;
    6621  int64_t m_Freq;
    6622  int64_t m_StartCounter;
    6623 
    6624  void GetBasicParams(CallParams& outParams);
    6625 
    6626  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6627  template<typename T>
    6628  void PrintPointerList(uint64_t count, const T* pItems)
    6629  {
    6630  if(count)
    6631  {
    6632  fprintf(m_File, "%p", pItems[0]);
    6633  for(uint64_t i = 1; i < count; ++i)
    6634  {
    6635  fprintf(m_File, " %p", pItems[i]);
    6636  }
    6637  }
    6638  }
    6639 
    6640  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6641  void Flush();
    6642 };
    6643 
    6644 #endif // #if VMA_RECORDING_ENABLED
    6645 
    6646 /*
    6647 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6648 */
    6649 class VmaAllocationObjectAllocator
    6650 {
    6651  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6652 public:
    6653  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6654 
    6655  VmaAllocation Allocate();
    6656  void Free(VmaAllocation hAlloc);
    6657 
    6658 private:
    6659  VMA_MUTEX m_Mutex;
    6660  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6661 };
    6662 
    6663 // Main allocator object.
    6664 struct VmaAllocator_T
    6665 {
    6666  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6667 public:
    6668  bool m_UseMutex;
    6669  bool m_UseKhrDedicatedAllocation;
    6670  VkDevice m_hDevice;
    6671  bool m_AllocationCallbacksSpecified;
    6672  VkAllocationCallbacks m_AllocationCallbacks;
    6673  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6674  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6675 
    6676  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6677  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6678  VMA_MUTEX m_HeapSizeLimitMutex;
    6679 
    6680  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6681  VkPhysicalDeviceMemoryProperties m_MemProps;
    6682 
    6683  // Default pools.
    6684  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6685 
    6686  // Each vector is sorted by memory (handle value).
    6687  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6688  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6689  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6690 
    6691  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6692  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6693  ~VmaAllocator_T();
    6694 
    6695  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6696  {
    6697  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6698  }
    6699  const VmaVulkanFunctions& GetVulkanFunctions() const
    6700  {
    6701  return m_VulkanFunctions;
    6702  }
    6703 
    6704  VkDeviceSize GetBufferImageGranularity() const
    6705  {
    6706  return VMA_MAX(
    6707  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6708  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6709  }
    6710 
    6711  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6712  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6713 
    6714  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6715  {
    6716  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6717  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6718  }
    6719  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6720  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6721  {
    6722  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6723  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6724  }
    6725  // Minimum alignment for all allocations in specific memory type.
    6726  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6727  {
    6728  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6729  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6730  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6731  }
    6732 
    6733  bool IsIntegratedGpu() const
    6734  {
    6735  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6736  }
    6737 
    6738 #if VMA_RECORDING_ENABLED
    6739  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6740 #endif
    6741 
    6742  void GetBufferMemoryRequirements(
    6743  VkBuffer hBuffer,
    6744  VkMemoryRequirements& memReq,
    6745  bool& requiresDedicatedAllocation,
    6746  bool& prefersDedicatedAllocation) const;
    6747  void GetImageMemoryRequirements(
    6748  VkImage hImage,
    6749  VkMemoryRequirements& memReq,
    6750  bool& requiresDedicatedAllocation,
    6751  bool& prefersDedicatedAllocation) const;
    6752 
    6753  // Main allocation function.
    6754  VkResult AllocateMemory(
    6755  const VkMemoryRequirements& vkMemReq,
    6756  bool requiresDedicatedAllocation,
    6757  bool prefersDedicatedAllocation,
    6758  VkBuffer dedicatedBuffer,
    6759  VkImage dedicatedImage,
    6760  const VmaAllocationCreateInfo& createInfo,
    6761  VmaSuballocationType suballocType,
    6762  size_t allocationCount,
    6763  VmaAllocation* pAllocations);
    6764 
    6765  // Main deallocation function.
    6766  void FreeMemory(
    6767  size_t allocationCount,
    6768  const VmaAllocation* pAllocations);
    6769 
    6770  VkResult ResizeAllocation(
    6771  const VmaAllocation alloc,
    6772  VkDeviceSize newSize);
    6773 
    6774  void CalculateStats(VmaStats* pStats);
    6775 
    6776 #if VMA_STATS_STRING_ENABLED
    6777  void PrintDetailedMap(class VmaJsonWriter& json);
    6778 #endif
    6779 
    6780  VkResult DefragmentationBegin(
    6781  const VmaDefragmentationInfo2& info,
    6782  VmaDefragmentationStats* pStats,
    6783  VmaDefragmentationContext* pContext);
    6784  VkResult DefragmentationEnd(
    6785  VmaDefragmentationContext context);
    6786 
    6787  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6788  bool TouchAllocation(VmaAllocation hAllocation);
    6789 
    6790  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6791  void DestroyPool(VmaPool pool);
    6792  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6793 
    6794  void SetCurrentFrameIndex(uint32_t frameIndex);
    6795  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6796 
    6797  void MakePoolAllocationsLost(
    6798  VmaPool hPool,
    6799  size_t* pLostAllocationCount);
    6800  VkResult CheckPoolCorruption(VmaPool hPool);
    6801  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6802 
    6803  void CreateLostAllocation(VmaAllocation* pAllocation);
    6804 
    6805  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6806  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6807 
    6808  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6809  void Unmap(VmaAllocation hAllocation);
    6810 
    6811  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6812  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6813 
    6814  void FlushOrInvalidateAllocation(
    6815  VmaAllocation hAllocation,
    6816  VkDeviceSize offset, VkDeviceSize size,
    6817  VMA_CACHE_OPERATION op);
    6818 
    6819  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6820 
    6821  /*
    6822  Returns bit mask of memory types that can support defragmentation on GPU as
    6823  they support creation of required buffer for copy operations.
    6824  */
    6825  uint32_t GetGpuDefragmentationMemoryTypeBits();
    6826 
    6827 private:
    6828  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6829 
    6830  VkPhysicalDevice m_PhysicalDevice;
    6831  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6832  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
    6833 
    6834  VMA_RW_MUTEX m_PoolsMutex;
    6835  // Protected by m_PoolsMutex. Sorted by pointer value.
    6836  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6837  uint32_t m_NextPoolId;
    6838 
    6839  VmaVulkanFunctions m_VulkanFunctions;
    6840 
    6841 #if VMA_RECORDING_ENABLED
    6842  VmaRecorder* m_pRecorder;
    6843 #endif
    6844 
    6845  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6846 
    6847  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6848 
    6849  VkResult AllocateMemoryOfType(
    6850  VkDeviceSize size,
    6851  VkDeviceSize alignment,
    6852  bool dedicatedAllocation,
    6853  VkBuffer dedicatedBuffer,
    6854  VkImage dedicatedImage,
    6855  const VmaAllocationCreateInfo& createInfo,
    6856  uint32_t memTypeIndex,
    6857  VmaSuballocationType suballocType,
    6858  size_t allocationCount,
    6859  VmaAllocation* pAllocations);
    6860 
    6861  // Helper function only to be used inside AllocateDedicatedMemory.
    6862  VkResult AllocateDedicatedMemoryPage(
    6863  VkDeviceSize size,
    6864  VmaSuballocationType suballocType,
    6865  uint32_t memTypeIndex,
    6866  const VkMemoryAllocateInfo& allocInfo,
    6867  bool map,
    6868  bool isUserDataString,
    6869  void* pUserData,
    6870  VmaAllocation* pAllocation);
    6871 
    6872  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6873  VkResult AllocateDedicatedMemory(
    6874  VkDeviceSize size,
    6875  VmaSuballocationType suballocType,
    6876  uint32_t memTypeIndex,
    6877  bool map,
    6878  bool isUserDataString,
    6879  void* pUserData,
    6880  VkBuffer dedicatedBuffer,
    6881  VkImage dedicatedImage,
    6882  size_t allocationCount,
    6883  VmaAllocation* pAllocations);
    6884 
    6885  void FreeDedicatedMemory(VmaAllocation allocation);
    6886 
    6887  /*
    6888  Calculates and returns bit mask of memory types that can support defragmentation
    6889  on GPU as they support creation of required buffer for copy operations.
    6890  */
    6891  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
    6892 };
    6893 
    6895 // Memory allocation #2 after VmaAllocator_T definition
    6896 
    6897 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6898 {
    6899  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6900 }
    6901 
    6902 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6903 {
    6904  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6905 }
    6906 
    6907 template<typename T>
    6908 static T* VmaAllocate(VmaAllocator hAllocator)
    6909 {
    6910  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6911 }
    6912 
    6913 template<typename T>
    6914 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6915 {
    6916  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6917 }
    6918 
    6919 template<typename T>
    6920 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6921 {
    6922  if(ptr != VMA_NULL)
    6923  {
    6924  ptr->~T();
    6925  VmaFree(hAllocator, ptr);
    6926  }
    6927 }
    6928 
    6929 template<typename T>
    6930 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6931 {
    6932  if(ptr != VMA_NULL)
    6933  {
    6934  for(size_t i = count; i--; )
    6935  ptr[i].~T();
    6936  VmaFree(hAllocator, ptr);
    6937  }
    6938 }
    6939 
    6941 // VmaStringBuilder
    6942 
    6943 #if VMA_STATS_STRING_ENABLED
    6944 
    6945 class VmaStringBuilder
    6946 {
    6947 public:
    6948  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6949  size_t GetLength() const { return m_Data.size(); }
    6950  const char* GetData() const { return m_Data.data(); }
    6951 
    6952  void Add(char ch) { m_Data.push_back(ch); }
    6953  void Add(const char* pStr);
    6954  void AddNewLine() { Add('\n'); }
    6955  void AddNumber(uint32_t num);
    6956  void AddNumber(uint64_t num);
    6957  void AddPointer(const void* ptr);
    6958 
    6959 private:
    6960  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6961 };
    6962 
    6963 void VmaStringBuilder::Add(const char* pStr)
    6964 {
    6965  const size_t strLen = strlen(pStr);
    6966  if(strLen > 0)
    6967  {
    6968  const size_t oldCount = m_Data.size();
    6969  m_Data.resize(oldCount + strLen);
    6970  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6971  }
    6972 }
    6973 
    6974 void VmaStringBuilder::AddNumber(uint32_t num)
    6975 {
    6976  char buf[11];
    6977  VmaUint32ToStr(buf, sizeof(buf), num);
    6978  Add(buf);
    6979 }
    6980 
    6981 void VmaStringBuilder::AddNumber(uint64_t num)
    6982 {
    6983  char buf[21];
    6984  VmaUint64ToStr(buf, sizeof(buf), num);
    6985  Add(buf);
    6986 }
    6987 
    6988 void VmaStringBuilder::AddPointer(const void* ptr)
    6989 {
    6990  char buf[21];
    6991  VmaPtrToStr(buf, sizeof(buf), ptr);
    6992  Add(buf);
    6993 }
    6994 
    6995 #endif // #if VMA_STATS_STRING_ENABLED
    6996 
    6998 // VmaJsonWriter
    6999 
    7000 #if VMA_STATS_STRING_ENABLED
    7001 
    7002 class VmaJsonWriter
    7003 {
    7004  VMA_CLASS_NO_COPY(VmaJsonWriter)
    7005 public:
    7006  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    7007  ~VmaJsonWriter();
    7008 
    7009  void BeginObject(bool singleLine = false);
    7010  void EndObject();
    7011 
    7012  void BeginArray(bool singleLine = false);
    7013  void EndArray();
    7014 
    7015  void WriteString(const char* pStr);
    7016  void BeginString(const char* pStr = VMA_NULL);
    7017  void ContinueString(const char* pStr);
    7018  void ContinueString(uint32_t n);
    7019  void ContinueString(uint64_t n);
    7020  void ContinueString_Pointer(const void* ptr);
    7021  void EndString(const char* pStr = VMA_NULL);
    7022 
    7023  void WriteNumber(uint32_t n);
    7024  void WriteNumber(uint64_t n);
    7025  void WriteBool(bool b);
    7026  void WriteNull();
    7027 
    7028 private:
    7029  static const char* const INDENT;
    7030 
    7031  enum COLLECTION_TYPE
    7032  {
    7033  COLLECTION_TYPE_OBJECT,
    7034  COLLECTION_TYPE_ARRAY,
    7035  };
    7036  struct StackItem
    7037  {
    7038  COLLECTION_TYPE type;
    7039  uint32_t valueCount;
    7040  bool singleLineMode;
    7041  };
    7042 
    7043  VmaStringBuilder& m_SB;
    7044  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    7045  bool m_InsideString;
    7046 
    7047  void BeginValue(bool isString);
    7048  void WriteIndent(bool oneLess = false);
    7049 };
    7050 
    7051 const char* const VmaJsonWriter::INDENT = " ";
    7052 
    7053 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    7054  m_SB(sb),
    7055  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    7056  m_InsideString(false)
    7057 {
    7058 }
    7059 
    7060 VmaJsonWriter::~VmaJsonWriter()
    7061 {
    7062  VMA_ASSERT(!m_InsideString);
    7063  VMA_ASSERT(m_Stack.empty());
    7064 }
    7065 
    7066 void VmaJsonWriter::BeginObject(bool singleLine)
    7067 {
    7068  VMA_ASSERT(!m_InsideString);
    7069 
    7070  BeginValue(false);
    7071  m_SB.Add('{');
    7072 
    7073  StackItem item;
    7074  item.type = COLLECTION_TYPE_OBJECT;
    7075  item.valueCount = 0;
    7076  item.singleLineMode = singleLine;
    7077  m_Stack.push_back(item);
    7078 }
    7079 
    7080 void VmaJsonWriter::EndObject()
    7081 {
    7082  VMA_ASSERT(!m_InsideString);
    7083 
    7084  WriteIndent(true);
    7085  m_SB.Add('}');
    7086 
    7087  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7088  m_Stack.pop_back();
    7089 }
    7090 
    7091 void VmaJsonWriter::BeginArray(bool singleLine)
    7092 {
    7093  VMA_ASSERT(!m_InsideString);
    7094 
    7095  BeginValue(false);
    7096  m_SB.Add('[');
    7097 
    7098  StackItem item;
    7099  item.type = COLLECTION_TYPE_ARRAY;
    7100  item.valueCount = 0;
    7101  item.singleLineMode = singleLine;
    7102  m_Stack.push_back(item);
    7103 }
    7104 
    7105 void VmaJsonWriter::EndArray()
    7106 {
    7107  VMA_ASSERT(!m_InsideString);
    7108 
    7109  WriteIndent(true);
    7110  m_SB.Add(']');
    7111 
    7112  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7113  m_Stack.pop_back();
    7114 }
    7115 
    7116 void VmaJsonWriter::WriteString(const char* pStr)
    7117 {
    7118  BeginString(pStr);
    7119  EndString();
    7120 }
    7121 
    7122 void VmaJsonWriter::BeginString(const char* pStr)
    7123 {
    7124  VMA_ASSERT(!m_InsideString);
    7125 
    7126  BeginValue(true);
    7127  m_SB.Add('"');
    7128  m_InsideString = true;
    7129  if(pStr != VMA_NULL && pStr[0] != '\0')
    7130  {
    7131  ContinueString(pStr);
    7132  }
    7133 }
    7134 
    7135 void VmaJsonWriter::ContinueString(const char* pStr)
    7136 {
    7137  VMA_ASSERT(m_InsideString);
    7138 
    7139  const size_t strLen = strlen(pStr);
    7140  for(size_t i = 0; i < strLen; ++i)
    7141  {
    7142  char ch = pStr[i];
    7143  if(ch == '\\')
    7144  {
    7145  m_SB.Add("\\\\");
    7146  }
    7147  else if(ch == '"')
    7148  {
    7149  m_SB.Add("\\\"");
    7150  }
    7151  else if(ch >= 32)
    7152  {
    7153  m_SB.Add(ch);
    7154  }
    7155  else switch(ch)
    7156  {
    7157  case '\b':
    7158  m_SB.Add("\\b");
    7159  break;
    7160  case '\f':
    7161  m_SB.Add("\\f");
    7162  break;
    7163  case '\n':
    7164  m_SB.Add("\\n");
    7165  break;
    7166  case '\r':
    7167  m_SB.Add("\\r");
    7168  break;
    7169  case '\t':
    7170  m_SB.Add("\\t");
    7171  break;
    7172  default:
    7173  VMA_ASSERT(0 && "Character not currently supported.");
    7174  break;
    7175  }
    7176  }
    7177 }
    7178 
    7179 void VmaJsonWriter::ContinueString(uint32_t n)
    7180 {
    7181  VMA_ASSERT(m_InsideString);
    7182  m_SB.AddNumber(n);
    7183 }
    7184 
    7185 void VmaJsonWriter::ContinueString(uint64_t n)
    7186 {
    7187  VMA_ASSERT(m_InsideString);
    7188  m_SB.AddNumber(n);
    7189 }
    7190 
    7191 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7192 {
    7193  VMA_ASSERT(m_InsideString);
    7194  m_SB.AddPointer(ptr);
    7195 }
    7196 
    7197 void VmaJsonWriter::EndString(const char* pStr)
    7198 {
    7199  VMA_ASSERT(m_InsideString);
    7200  if(pStr != VMA_NULL && pStr[0] != '\0')
    7201  {
    7202  ContinueString(pStr);
    7203  }
    7204  m_SB.Add('"');
    7205  m_InsideString = false;
    7206 }
    7207 
    7208 void VmaJsonWriter::WriteNumber(uint32_t n)
    7209 {
    7210  VMA_ASSERT(!m_InsideString);
    7211  BeginValue(false);
    7212  m_SB.AddNumber(n);
    7213 }
    7214 
    7215 void VmaJsonWriter::WriteNumber(uint64_t n)
    7216 {
    7217  VMA_ASSERT(!m_InsideString);
    7218  BeginValue(false);
    7219  m_SB.AddNumber(n);
    7220 }
    7221 
    7222 void VmaJsonWriter::WriteBool(bool b)
    7223 {
    7224  VMA_ASSERT(!m_InsideString);
    7225  BeginValue(false);
    7226  m_SB.Add(b ? "true" : "false");
    7227 }
    7228 
    7229 void VmaJsonWriter::WriteNull()
    7230 {
    7231  VMA_ASSERT(!m_InsideString);
    7232  BeginValue(false);
    7233  m_SB.Add("null");
    7234 }
    7235 
    7236 void VmaJsonWriter::BeginValue(bool isString)
    7237 {
    7238  if(!m_Stack.empty())
    7239  {
    7240  StackItem& currItem = m_Stack.back();
    7241  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7242  currItem.valueCount % 2 == 0)
    7243  {
    7244  VMA_ASSERT(isString);
    7245  }
    7246 
    7247  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7248  currItem.valueCount % 2 != 0)
    7249  {
    7250  m_SB.Add(": ");
    7251  }
    7252  else if(currItem.valueCount > 0)
    7253  {
    7254  m_SB.Add(", ");
    7255  WriteIndent();
    7256  }
    7257  else
    7258  {
    7259  WriteIndent();
    7260  }
    7261  ++currItem.valueCount;
    7262  }
    7263 }
    7264 
    7265 void VmaJsonWriter::WriteIndent(bool oneLess)
    7266 {
    7267  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7268  {
    7269  m_SB.AddNewLine();
    7270 
    7271  size_t count = m_Stack.size();
    7272  if(count > 0 && oneLess)
    7273  {
    7274  --count;
    7275  }
    7276  for(size_t i = 0; i < count; ++i)
    7277  {
    7278  m_SB.Add(INDENT);
    7279  }
    7280  }
    7281 }
    7282 
    7283 #endif // #if VMA_STATS_STRING_ENABLED
    7284 
    7286 
    7287 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7288 {
    7289  if(IsUserDataString())
    7290  {
    7291  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7292 
    7293  FreeUserDataString(hAllocator);
    7294 
    7295  if(pUserData != VMA_NULL)
    7296  {
    7297  const char* const newStrSrc = (char*)pUserData;
    7298  const size_t newStrLen = strlen(newStrSrc);
    7299  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7300  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7301  m_pUserData = newStrDst;
    7302  }
    7303  }
    7304  else
    7305  {
    7306  m_pUserData = pUserData;
    7307  }
    7308 }
    7309 
    7310 void VmaAllocation_T::ChangeBlockAllocation(
    7311  VmaAllocator hAllocator,
    7312  VmaDeviceMemoryBlock* block,
    7313  VkDeviceSize offset)
    7314 {
    7315  VMA_ASSERT(block != VMA_NULL);
    7316  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7317 
    7318  // Move mapping reference counter from old block to new block.
    7319  if(block != m_BlockAllocation.m_Block)
    7320  {
    7321  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7322  if(IsPersistentMap())
    7323  ++mapRefCount;
    7324  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7325  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7326  }
    7327 
    7328  m_BlockAllocation.m_Block = block;
    7329  m_BlockAllocation.m_Offset = offset;
    7330 }
    7331 
    7332 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7333 {
    7334  VMA_ASSERT(newSize > 0);
    7335  m_Size = newSize;
    7336 }
    7337 
    7338 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7339 {
    7340  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7341  m_BlockAllocation.m_Offset = newOffset;
    7342 }
    7343 
    7344 VkDeviceSize VmaAllocation_T::GetOffset() const
    7345 {
    7346  switch(m_Type)
    7347  {
    7348  case ALLOCATION_TYPE_BLOCK:
    7349  return m_BlockAllocation.m_Offset;
    7350  case ALLOCATION_TYPE_DEDICATED:
    7351  return 0;
    7352  default:
    7353  VMA_ASSERT(0);
    7354  return 0;
    7355  }
    7356 }
    7357 
    7358 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7359 {
    7360  switch(m_Type)
    7361  {
    7362  case ALLOCATION_TYPE_BLOCK:
    7363  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7364  case ALLOCATION_TYPE_DEDICATED:
    7365  return m_DedicatedAllocation.m_hMemory;
    7366  default:
    7367  VMA_ASSERT(0);
    7368  return VK_NULL_HANDLE;
    7369  }
    7370 }
    7371 
    7372 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7373 {
    7374  switch(m_Type)
    7375  {
    7376  case ALLOCATION_TYPE_BLOCK:
    7377  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7378  case ALLOCATION_TYPE_DEDICATED:
    7379  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7380  default:
    7381  VMA_ASSERT(0);
    7382  return UINT32_MAX;
    7383  }
    7384 }
    7385 
    7386 void* VmaAllocation_T::GetMappedData() const
    7387 {
    7388  switch(m_Type)
    7389  {
    7390  case ALLOCATION_TYPE_BLOCK:
    7391  if(m_MapCount != 0)
    7392  {
    7393  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7394  VMA_ASSERT(pBlockData != VMA_NULL);
    7395  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7396  }
    7397  else
    7398  {
    7399  return VMA_NULL;
    7400  }
    7401  break;
    7402  case ALLOCATION_TYPE_DEDICATED:
    7403  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7404  return m_DedicatedAllocation.m_pMappedData;
    7405  default:
    7406  VMA_ASSERT(0);
    7407  return VMA_NULL;
    7408  }
    7409 }
    7410 
    7411 bool VmaAllocation_T::CanBecomeLost() const
    7412 {
    7413  switch(m_Type)
    7414  {
    7415  case ALLOCATION_TYPE_BLOCK:
    7416  return m_BlockAllocation.m_CanBecomeLost;
    7417  case ALLOCATION_TYPE_DEDICATED:
    7418  return false;
    7419  default:
    7420  VMA_ASSERT(0);
    7421  return false;
    7422  }
    7423 }
    7424 
    7425 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7426 {
    7427  VMA_ASSERT(CanBecomeLost());
    7428 
    7429  /*
    7430  Warning: This is a carefully designed algorithm.
    7431  Do not modify unless you really know what you're doing :)
    7432  */
    7433  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7434  for(;;)
    7435  {
    7436  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7437  {
    7438  VMA_ASSERT(0);
    7439  return false;
    7440  }
    7441  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7442  {
    7443  return false;
    7444  }
    7445  else // Last use time earlier than current time.
    7446  {
    7447  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7448  {
    7449  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7450  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7451  return true;
    7452  }
    7453  }
    7454  }
    7455 }
    7456 
    7457 #if VMA_STATS_STRING_ENABLED
    7458 
    7459 // Correspond to values of enum VmaSuballocationType.
    7460 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7461  "FREE",
    7462  "UNKNOWN",
    7463  "BUFFER",
    7464  "IMAGE_UNKNOWN",
    7465  "IMAGE_LINEAR",
    7466  "IMAGE_OPTIMAL",
    7467 };
    7468 
    7469 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7470 {
    7471  json.WriteString("Type");
    7472  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7473 
    7474  json.WriteString("Size");
    7475  json.WriteNumber(m_Size);
    7476 
    7477  if(m_pUserData != VMA_NULL)
    7478  {
    7479  json.WriteString("UserData");
    7480  if(IsUserDataString())
    7481  {
    7482  json.WriteString((const char*)m_pUserData);
    7483  }
    7484  else
    7485  {
    7486  json.BeginString();
    7487  json.ContinueString_Pointer(m_pUserData);
    7488  json.EndString();
    7489  }
    7490  }
    7491 
    7492  json.WriteString("CreationFrameIndex");
    7493  json.WriteNumber(m_CreationFrameIndex);
    7494 
    7495  json.WriteString("LastUseFrameIndex");
    7496  json.WriteNumber(GetLastUseFrameIndex());
    7497 
    7498  if(m_BufferImageUsage != 0)
    7499  {
    7500  json.WriteString("Usage");
    7501  json.WriteNumber(m_BufferImageUsage);
    7502  }
    7503 }
    7504 
    7505 #endif
    7506 
    7507 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7508 {
    7509  VMA_ASSERT(IsUserDataString());
    7510  if(m_pUserData != VMA_NULL)
    7511  {
    7512  char* const oldStr = (char*)m_pUserData;
    7513  const size_t oldStrLen = strlen(oldStr);
    7514  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7515  m_pUserData = VMA_NULL;
    7516  }
    7517 }
    7518 
    7519 void VmaAllocation_T::BlockAllocMap()
    7520 {
    7521  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7522 
    7523  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7524  {
    7525  ++m_MapCount;
    7526  }
    7527  else
    7528  {
    7529  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7530  }
    7531 }
    7532 
    7533 void VmaAllocation_T::BlockAllocUnmap()
    7534 {
    7535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7536 
    7537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7538  {
    7539  --m_MapCount;
    7540  }
    7541  else
    7542  {
    7543  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7544  }
    7545 }
    7546 
    7547 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7548 {
    7549  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7550 
    7551  if(m_MapCount != 0)
    7552  {
    7553  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7554  {
    7555  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7556  *ppData = m_DedicatedAllocation.m_pMappedData;
    7557  ++m_MapCount;
    7558  return VK_SUCCESS;
    7559  }
    7560  else
    7561  {
    7562  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7563  return VK_ERROR_MEMORY_MAP_FAILED;
    7564  }
    7565  }
    7566  else
    7567  {
    7568  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7569  hAllocator->m_hDevice,
    7570  m_DedicatedAllocation.m_hMemory,
    7571  0, // offset
    7572  VK_WHOLE_SIZE,
    7573  0, // flags
    7574  ppData);
    7575  if(result == VK_SUCCESS)
    7576  {
    7577  m_DedicatedAllocation.m_pMappedData = *ppData;
    7578  m_MapCount = 1;
    7579  }
    7580  return result;
    7581  }
    7582 }
    7583 
    7584 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7585 {
    7586  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7587 
    7588  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7589  {
    7590  --m_MapCount;
    7591  if(m_MapCount == 0)
    7592  {
    7593  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7594  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7595  hAllocator->m_hDevice,
    7596  m_DedicatedAllocation.m_hMemory);
    7597  }
    7598  }
    7599  else
    7600  {
    7601  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7602  }
    7603 }
    7604 
    7605 #if VMA_STATS_STRING_ENABLED
    7606 
    7607 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7608 {
    7609  json.BeginObject();
    7610 
    7611  json.WriteString("Blocks");
    7612  json.WriteNumber(stat.blockCount);
    7613 
    7614  json.WriteString("Allocations");
    7615  json.WriteNumber(stat.allocationCount);
    7616 
    7617  json.WriteString("UnusedRanges");
    7618  json.WriteNumber(stat.unusedRangeCount);
    7619 
    7620  json.WriteString("UsedBytes");
    7621  json.WriteNumber(stat.usedBytes);
    7622 
    7623  json.WriteString("UnusedBytes");
    7624  json.WriteNumber(stat.unusedBytes);
    7625 
    7626  if(stat.allocationCount > 1)
    7627  {
    7628  json.WriteString("AllocationSize");
    7629  json.BeginObject(true);
    7630  json.WriteString("Min");
    7631  json.WriteNumber(stat.allocationSizeMin);
    7632  json.WriteString("Avg");
    7633  json.WriteNumber(stat.allocationSizeAvg);
    7634  json.WriteString("Max");
    7635  json.WriteNumber(stat.allocationSizeMax);
    7636  json.EndObject();
    7637  }
    7638 
    7639  if(stat.unusedRangeCount > 1)
    7640  {
    7641  json.WriteString("UnusedRangeSize");
    7642  json.BeginObject(true);
    7643  json.WriteString("Min");
    7644  json.WriteNumber(stat.unusedRangeSizeMin);
    7645  json.WriteString("Avg");
    7646  json.WriteNumber(stat.unusedRangeSizeAvg);
    7647  json.WriteString("Max");
    7648  json.WriteNumber(stat.unusedRangeSizeMax);
    7649  json.EndObject();
    7650  }
    7651 
    7652  json.EndObject();
    7653 }
    7654 
    7655 #endif // #if VMA_STATS_STRING_ENABLED
    7656 
    7657 struct VmaSuballocationItemSizeLess
    7658 {
    7659  bool operator()(
    7660  const VmaSuballocationList::iterator lhs,
    7661  const VmaSuballocationList::iterator rhs) const
    7662  {
    7663  return lhs->size < rhs->size;
    7664  }
    7665  bool operator()(
    7666  const VmaSuballocationList::iterator lhs,
    7667  VkDeviceSize rhsSize) const
    7668  {
    7669  return lhs->size < rhsSize;
    7670  }
    7671 };
    7672 
    7673 
    7675 // class VmaBlockMetadata
    7676 
    7677 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7678  m_Size(0),
    7679  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7680 {
    7681 }
    7682 
    7683 #if VMA_STATS_STRING_ENABLED
    7684 
    7685 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7686  VkDeviceSize unusedBytes,
    7687  size_t allocationCount,
    7688  size_t unusedRangeCount) const
    7689 {
    7690  json.BeginObject();
    7691 
    7692  json.WriteString("TotalBytes");
    7693  json.WriteNumber(GetSize());
    7694 
    7695  json.WriteString("UnusedBytes");
    7696  json.WriteNumber(unusedBytes);
    7697 
    7698  json.WriteString("Allocations");
    7699  json.WriteNumber((uint64_t)allocationCount);
    7700 
    7701  json.WriteString("UnusedRanges");
    7702  json.WriteNumber((uint64_t)unusedRangeCount);
    7703 
    7704  json.WriteString("Suballocations");
    7705  json.BeginArray();
    7706 }
    7707 
    7708 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7709  VkDeviceSize offset,
    7710  VmaAllocation hAllocation) const
    7711 {
    7712  json.BeginObject(true);
    7713 
    7714  json.WriteString("Offset");
    7715  json.WriteNumber(offset);
    7716 
    7717  hAllocation->PrintParameters(json);
    7718 
    7719  json.EndObject();
    7720 }
    7721 
    7722 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7723  VkDeviceSize offset,
    7724  VkDeviceSize size) const
    7725 {
    7726  json.BeginObject(true);
    7727 
    7728  json.WriteString("Offset");
    7729  json.WriteNumber(offset);
    7730 
    7731  json.WriteString("Type");
    7732  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7733 
    7734  json.WriteString("Size");
    7735  json.WriteNumber(size);
    7736 
    7737  json.EndObject();
    7738 }
    7739 
    7740 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7741 {
    7742  json.EndArray();
    7743  json.EndObject();
    7744 }
    7745 
    7746 #endif // #if VMA_STATS_STRING_ENABLED
    7747 
    7749 // class VmaBlockMetadata_Generic
    7750 
    7751 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7752  VmaBlockMetadata(hAllocator),
    7753  m_FreeCount(0),
    7754  m_SumFreeSize(0),
    7755  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7756  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7757 {
    7758 }
    7759 
    7760 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7761 {
    7762 }
    7763 
    7764 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7765 {
    7766  VmaBlockMetadata::Init(size);
    7767 
    7768  m_FreeCount = 1;
    7769  m_SumFreeSize = size;
    7770 
    7771  VmaSuballocation suballoc = {};
    7772  suballoc.offset = 0;
    7773  suballoc.size = size;
    7774  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7775  suballoc.hAllocation = VK_NULL_HANDLE;
    7776 
    7777  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7778  m_Suballocations.push_back(suballoc);
    7779  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7780  --suballocItem;
    7781  m_FreeSuballocationsBySize.push_back(suballocItem);
    7782 }
    7783 
    7784 bool VmaBlockMetadata_Generic::Validate() const
    7785 {
    7786  VMA_VALIDATE(!m_Suballocations.empty());
    7787 
    7788  // Expected offset of new suballocation as calculated from previous ones.
    7789  VkDeviceSize calculatedOffset = 0;
    7790  // Expected number of free suballocations as calculated from traversing their list.
    7791  uint32_t calculatedFreeCount = 0;
    7792  // Expected sum size of free suballocations as calculated from traversing their list.
    7793  VkDeviceSize calculatedSumFreeSize = 0;
    7794  // Expected number of free suballocations that should be registered in
    7795  // m_FreeSuballocationsBySize calculated from traversing their list.
    7796  size_t freeSuballocationsToRegister = 0;
    7797  // True if previous visited suballocation was free.
    7798  bool prevFree = false;
    7799 
    7800  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7801  suballocItem != m_Suballocations.cend();
    7802  ++suballocItem)
    7803  {
    7804  const VmaSuballocation& subAlloc = *suballocItem;
    7805 
    7806  // Actual offset of this suballocation doesn't match expected one.
    7807  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7808 
    7809  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7810  // Two adjacent free suballocations are invalid. They should be merged.
    7811  VMA_VALIDATE(!prevFree || !currFree);
    7812 
    7813  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7814 
    7815  if(currFree)
    7816  {
    7817  calculatedSumFreeSize += subAlloc.size;
    7818  ++calculatedFreeCount;
    7819  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7820  {
    7821  ++freeSuballocationsToRegister;
    7822  }
    7823 
    7824  // Margin required between allocations - every free space must be at least that large.
    7825  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7826  }
    7827  else
    7828  {
    7829  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7830  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7831 
    7832  // Margin required between allocations - previous allocation must be free.
    7833  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7834  }
    7835 
    7836  calculatedOffset += subAlloc.size;
    7837  prevFree = currFree;
    7838  }
    7839 
    7840  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7841  // match expected one.
    7842  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7843 
    7844  VkDeviceSize lastSize = 0;
    7845  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7846  {
    7847  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7848 
    7849  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7850  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7851  // They must be sorted by size ascending.
    7852  VMA_VALIDATE(suballocItem->size >= lastSize);
    7853 
    7854  lastSize = suballocItem->size;
    7855  }
    7856 
    7857  // Check if totals match calculacted values.
    7858  VMA_VALIDATE(ValidateFreeSuballocationList());
    7859  VMA_VALIDATE(calculatedOffset == GetSize());
    7860  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7861  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7862 
    7863  return true;
    7864 }
    7865 
    7866 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7867 {
    7868  if(!m_FreeSuballocationsBySize.empty())
    7869  {
    7870  return m_FreeSuballocationsBySize.back()->size;
    7871  }
    7872  else
    7873  {
    7874  return 0;
    7875  }
    7876 }
    7877 
    7878 bool VmaBlockMetadata_Generic::IsEmpty() const
    7879 {
    7880  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7881 }
    7882 
    7883 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7884 {
    7885  outInfo.blockCount = 1;
    7886 
    7887  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7888  outInfo.allocationCount = rangeCount - m_FreeCount;
    7889  outInfo.unusedRangeCount = m_FreeCount;
    7890 
    7891  outInfo.unusedBytes = m_SumFreeSize;
    7892  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7893 
    7894  outInfo.allocationSizeMin = UINT64_MAX;
    7895  outInfo.allocationSizeMax = 0;
    7896  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7897  outInfo.unusedRangeSizeMax = 0;
    7898 
    7899  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7900  suballocItem != m_Suballocations.cend();
    7901  ++suballocItem)
    7902  {
    7903  const VmaSuballocation& suballoc = *suballocItem;
    7904  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7905  {
    7906  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7907  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7908  }
    7909  else
    7910  {
    7911  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7912  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7913  }
    7914  }
    7915 }
    7916 
    7917 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7918 {
    7919  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7920 
    7921  inoutStats.size += GetSize();
    7922  inoutStats.unusedSize += m_SumFreeSize;
    7923  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7924  inoutStats.unusedRangeCount += m_FreeCount;
    7925  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7926 }
    7927 
    7928 #if VMA_STATS_STRING_ENABLED
    7929 
    7930 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7931 {
    7932  PrintDetailedMap_Begin(json,
    7933  m_SumFreeSize, // unusedBytes
    7934  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7935  m_FreeCount); // unusedRangeCount
    7936 
    7937  size_t i = 0;
    7938  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7939  suballocItem != m_Suballocations.cend();
    7940  ++suballocItem, ++i)
    7941  {
    7942  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7943  {
    7944  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7945  }
    7946  else
    7947  {
    7948  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7949  }
    7950  }
    7951 
    7952  PrintDetailedMap_End(json);
    7953 }
    7954 
    7955 #endif // #if VMA_STATS_STRING_ENABLED
    7956 
    7957 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7958  uint32_t currentFrameIndex,
    7959  uint32_t frameInUseCount,
    7960  VkDeviceSize bufferImageGranularity,
    7961  VkDeviceSize allocSize,
    7962  VkDeviceSize allocAlignment,
    7963  bool upperAddress,
    7964  VmaSuballocationType allocType,
    7965  bool canMakeOtherLost,
    7966  uint32_t strategy,
    7967  VmaAllocationRequest* pAllocationRequest)
    7968 {
    7969  VMA_ASSERT(allocSize > 0);
    7970  VMA_ASSERT(!upperAddress);
    7971  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7972  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7973  VMA_HEAVY_ASSERT(Validate());
    7974 
    7975  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7976 
    7977  // There is not enough total free space in this block to fullfill the request: Early return.
    7978  if(canMakeOtherLost == false &&
    7979  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7980  {
    7981  return false;
    7982  }
    7983 
    7984  // New algorithm, efficiently searching freeSuballocationsBySize.
    7985  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7986  if(freeSuballocCount > 0)
    7987  {
    7989  {
    7990  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7991  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7992  m_FreeSuballocationsBySize.data(),
    7993  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7994  allocSize + 2 * VMA_DEBUG_MARGIN,
    7995  VmaSuballocationItemSizeLess());
    7996  size_t index = it - m_FreeSuballocationsBySize.data();
    7997  for(; index < freeSuballocCount; ++index)
    7998  {
    7999  if(CheckAllocation(
    8000  currentFrameIndex,
    8001  frameInUseCount,
    8002  bufferImageGranularity,
    8003  allocSize,
    8004  allocAlignment,
    8005  allocType,
    8006  m_FreeSuballocationsBySize[index],
    8007  false, // canMakeOtherLost
    8008  &pAllocationRequest->offset,
    8009  &pAllocationRequest->itemsToMakeLostCount,
    8010  &pAllocationRequest->sumFreeSize,
    8011  &pAllocationRequest->sumItemSize))
    8012  {
    8013  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8014  return true;
    8015  }
    8016  }
    8017  }
    8018  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    8019  {
    8020  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8021  it != m_Suballocations.end();
    8022  ++it)
    8023  {
    8024  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    8025  currentFrameIndex,
    8026  frameInUseCount,
    8027  bufferImageGranularity,
    8028  allocSize,
    8029  allocAlignment,
    8030  allocType,
    8031  it,
    8032  false, // canMakeOtherLost
    8033  &pAllocationRequest->offset,
    8034  &pAllocationRequest->itemsToMakeLostCount,
    8035  &pAllocationRequest->sumFreeSize,
    8036  &pAllocationRequest->sumItemSize))
    8037  {
    8038  pAllocationRequest->item = it;
    8039  return true;
    8040  }
    8041  }
    8042  }
    8043  else // WORST_FIT, FIRST_FIT
    8044  {
    8045  // Search staring from biggest suballocations.
    8046  for(size_t index = freeSuballocCount; index--; )
    8047  {
    8048  if(CheckAllocation(
    8049  currentFrameIndex,
    8050  frameInUseCount,
    8051  bufferImageGranularity,
    8052  allocSize,
    8053  allocAlignment,
    8054  allocType,
    8055  m_FreeSuballocationsBySize[index],
    8056  false, // canMakeOtherLost
    8057  &pAllocationRequest->offset,
    8058  &pAllocationRequest->itemsToMakeLostCount,
    8059  &pAllocationRequest->sumFreeSize,
    8060  &pAllocationRequest->sumItemSize))
    8061  {
    8062  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8063  return true;
    8064  }
    8065  }
    8066  }
    8067  }
    8068 
    8069  if(canMakeOtherLost)
    8070  {
    8071  // Brute-force algorithm. TODO: Come up with something better.
    8072 
    8073  bool found = false;
    8074  VmaAllocationRequest tmpAllocRequest = {};
    8075  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8076  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8077  suballocIt != m_Suballocations.end();
    8078  ++suballocIt)
    8079  {
    8080  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8081  suballocIt->hAllocation->CanBecomeLost())
    8082  {
    8083  if(CheckAllocation(
    8084  currentFrameIndex,
    8085  frameInUseCount,
    8086  bufferImageGranularity,
    8087  allocSize,
    8088  allocAlignment,
    8089  allocType,
    8090  suballocIt,
    8091  canMakeOtherLost,
    8092  &tmpAllocRequest.offset,
    8093  &tmpAllocRequest.itemsToMakeLostCount,
    8094  &tmpAllocRequest.sumFreeSize,
    8095  &tmpAllocRequest.sumItemSize))
    8096  {
    8098  {
    8099  *pAllocationRequest = tmpAllocRequest;
    8100  pAllocationRequest->item = suballocIt;
    8101  break;
    8102  }
    8103  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8104  {
    8105  *pAllocationRequest = tmpAllocRequest;
    8106  pAllocationRequest->item = suballocIt;
    8107  found = true;
    8108  }
    8109  }
    8110  }
    8111  }
    8112 
    8113  return found;
    8114  }
    8115 
    8116  return false;
    8117 }
    8118 
    8119 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8120  uint32_t currentFrameIndex,
    8121  uint32_t frameInUseCount,
    8122  VmaAllocationRequest* pAllocationRequest)
    8123 {
    8124  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8125 
    8126  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8127  {
    8128  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8129  {
    8130  ++pAllocationRequest->item;
    8131  }
    8132  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8133  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8134  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8135  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8136  {
    8137  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8138  --pAllocationRequest->itemsToMakeLostCount;
    8139  }
    8140  else
    8141  {
    8142  return false;
    8143  }
    8144  }
    8145 
    8146  VMA_HEAVY_ASSERT(Validate());
    8147  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8148  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8149 
    8150  return true;
    8151 }
    8152 
    8153 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8154 {
    8155  uint32_t lostAllocationCount = 0;
    8156  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8157  it != m_Suballocations.end();
    8158  ++it)
    8159  {
    8160  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8161  it->hAllocation->CanBecomeLost() &&
    8162  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8163  {
    8164  it = FreeSuballocation(it);
    8165  ++lostAllocationCount;
    8166  }
    8167  }
    8168  return lostAllocationCount;
    8169 }
    8170 
    8171 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8172 {
    8173  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8174  it != m_Suballocations.end();
    8175  ++it)
    8176  {
    8177  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8178  {
    8179  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8180  {
    8181  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8182  return VK_ERROR_VALIDATION_FAILED_EXT;
    8183  }
    8184  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8185  {
    8186  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8187  return VK_ERROR_VALIDATION_FAILED_EXT;
    8188  }
    8189  }
    8190  }
    8191 
    8192  return VK_SUCCESS;
    8193 }
    8194 
    8195 void VmaBlockMetadata_Generic::Alloc(
    8196  const VmaAllocationRequest& request,
    8197  VmaSuballocationType type,
    8198  VkDeviceSize allocSize,
    8199  VmaAllocation hAllocation)
    8200 {
    8201  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8202  VMA_ASSERT(request.item != m_Suballocations.end());
    8203  VmaSuballocation& suballoc = *request.item;
    8204  // Given suballocation is a free block.
    8205  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8206  // Given offset is inside this suballocation.
    8207  VMA_ASSERT(request.offset >= suballoc.offset);
    8208  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8209  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8210  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8211 
    8212  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8213  // it to become used.
    8214  UnregisterFreeSuballocation(request.item);
    8215 
    8216  suballoc.offset = request.offset;
    8217  suballoc.size = allocSize;
    8218  suballoc.type = type;
    8219  suballoc.hAllocation = hAllocation;
    8220 
    8221  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8222  if(paddingEnd)
    8223  {
    8224  VmaSuballocation paddingSuballoc = {};
    8225  paddingSuballoc.offset = request.offset + allocSize;
    8226  paddingSuballoc.size = paddingEnd;
    8227  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8228  VmaSuballocationList::iterator next = request.item;
    8229  ++next;
    8230  const VmaSuballocationList::iterator paddingEndItem =
    8231  m_Suballocations.insert(next, paddingSuballoc);
    8232  RegisterFreeSuballocation(paddingEndItem);
    8233  }
    8234 
    8235  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8236  if(paddingBegin)
    8237  {
    8238  VmaSuballocation paddingSuballoc = {};
    8239  paddingSuballoc.offset = request.offset - paddingBegin;
    8240  paddingSuballoc.size = paddingBegin;
    8241  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8242  const VmaSuballocationList::iterator paddingBeginItem =
    8243  m_Suballocations.insert(request.item, paddingSuballoc);
    8244  RegisterFreeSuballocation(paddingBeginItem);
    8245  }
    8246 
    8247  // Update totals.
    8248  m_FreeCount = m_FreeCount - 1;
    8249  if(paddingBegin > 0)
    8250  {
    8251  ++m_FreeCount;
    8252  }
    8253  if(paddingEnd > 0)
    8254  {
    8255  ++m_FreeCount;
    8256  }
    8257  m_SumFreeSize -= allocSize;
    8258 }
    8259 
    8260 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8261 {
    8262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8263  suballocItem != m_Suballocations.end();
    8264  ++suballocItem)
    8265  {
    8266  VmaSuballocation& suballoc = *suballocItem;
    8267  if(suballoc.hAllocation == allocation)
    8268  {
    8269  FreeSuballocation(suballocItem);
    8270  VMA_HEAVY_ASSERT(Validate());
    8271  return;
    8272  }
    8273  }
    8274  VMA_ASSERT(0 && "Not found!");
    8275 }
    8276 
    8277 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8278 {
    8279  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8280  suballocItem != m_Suballocations.end();
    8281  ++suballocItem)
    8282  {
    8283  VmaSuballocation& suballoc = *suballocItem;
    8284  if(suballoc.offset == offset)
    8285  {
    8286  FreeSuballocation(suballocItem);
    8287  return;
    8288  }
    8289  }
    8290  VMA_ASSERT(0 && "Not found!");
    8291 }
    8292 
    8293 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8294 {
    8295  typedef VmaSuballocationList::iterator iter_type;
    8296  for(iter_type suballocItem = m_Suballocations.begin();
    8297  suballocItem != m_Suballocations.end();
    8298  ++suballocItem)
    8299  {
    8300  VmaSuballocation& suballoc = *suballocItem;
    8301  if(suballoc.hAllocation == alloc)
    8302  {
    8303  iter_type nextItem = suballocItem;
    8304  ++nextItem;
    8305 
    8306  // Should have been ensured on higher level.
    8307  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8308 
    8309  // Shrinking.
    8310  if(newSize < alloc->GetSize())
    8311  {
    8312  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8313 
    8314  // There is next item.
    8315  if(nextItem != m_Suballocations.end())
    8316  {
    8317  // Next item is free.
    8318  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8319  {
    8320  // Grow this next item backward.
    8321  UnregisterFreeSuballocation(nextItem);
    8322  nextItem->offset -= sizeDiff;
    8323  nextItem->size += sizeDiff;
    8324  RegisterFreeSuballocation(nextItem);
    8325  }
    8326  // Next item is not free.
    8327  else
    8328  {
    8329  // Create free item after current one.
    8330  VmaSuballocation newFreeSuballoc;
    8331  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8332  newFreeSuballoc.offset = suballoc.offset + newSize;
    8333  newFreeSuballoc.size = sizeDiff;
    8334  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8335  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8336  RegisterFreeSuballocation(newFreeSuballocIt);
    8337 
    8338  ++m_FreeCount;
    8339  }
    8340  }
    8341  // This is the last item.
    8342  else
    8343  {
    8344  // Create free item at the end.
    8345  VmaSuballocation newFreeSuballoc;
    8346  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8347  newFreeSuballoc.offset = suballoc.offset + newSize;
    8348  newFreeSuballoc.size = sizeDiff;
    8349  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8350  m_Suballocations.push_back(newFreeSuballoc);
    8351 
    8352  iter_type newFreeSuballocIt = m_Suballocations.end();
    8353  RegisterFreeSuballocation(--newFreeSuballocIt);
    8354 
    8355  ++m_FreeCount;
    8356  }
    8357 
    8358  suballoc.size = newSize;
    8359  m_SumFreeSize += sizeDiff;
    8360  }
    8361  // Growing.
    8362  else
    8363  {
    8364  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8365 
    8366  // There is next item.
    8367  if(nextItem != m_Suballocations.end())
    8368  {
    8369  // Next item is free.
    8370  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8371  {
    8372  // There is not enough free space, including margin.
    8373  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8374  {
    8375  return false;
    8376  }
    8377 
    8378  // There is more free space than required.
    8379  if(nextItem->size > sizeDiff)
    8380  {
    8381  // Move and shrink this next item.
    8382  UnregisterFreeSuballocation(nextItem);
    8383  nextItem->offset += sizeDiff;
    8384  nextItem->size -= sizeDiff;
    8385  RegisterFreeSuballocation(nextItem);
    8386  }
    8387  // There is exactly the amount of free space required.
    8388  else
    8389  {
    8390  // Remove this next free item.
    8391  UnregisterFreeSuballocation(nextItem);
    8392  m_Suballocations.erase(nextItem);
    8393  --m_FreeCount;
    8394  }
    8395  }
    8396  // Next item is not free - there is no space to grow.
    8397  else
    8398  {
    8399  return false;
    8400  }
    8401  }
    8402  // This is the last item - there is no space to grow.
    8403  else
    8404  {
    8405  return false;
    8406  }
    8407 
    8408  suballoc.size = newSize;
    8409  m_SumFreeSize -= sizeDiff;
    8410  }
    8411 
    8412  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8413  return true;
    8414  }
    8415  }
    8416  VMA_ASSERT(0 && "Not found!");
    8417  return false;
    8418 }
    8419 
    8420 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8421 {
    8422  VkDeviceSize lastSize = 0;
    8423  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8424  {
    8425  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8426 
    8427  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8428  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8429  VMA_VALIDATE(it->size >= lastSize);
    8430  lastSize = it->size;
    8431  }
    8432  return true;
    8433 }
    8434 
    8435 bool VmaBlockMetadata_Generic::CheckAllocation(
    8436  uint32_t currentFrameIndex,
    8437  uint32_t frameInUseCount,
    8438  VkDeviceSize bufferImageGranularity,
    8439  VkDeviceSize allocSize,
    8440  VkDeviceSize allocAlignment,
    8441  VmaSuballocationType allocType,
    8442  VmaSuballocationList::const_iterator suballocItem,
    8443  bool canMakeOtherLost,
    8444  VkDeviceSize* pOffset,
    8445  size_t* itemsToMakeLostCount,
    8446  VkDeviceSize* pSumFreeSize,
    8447  VkDeviceSize* pSumItemSize) const
    8448 {
    8449  VMA_ASSERT(allocSize > 0);
    8450  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8451  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8452  VMA_ASSERT(pOffset != VMA_NULL);
    8453 
    8454  *itemsToMakeLostCount = 0;
    8455  *pSumFreeSize = 0;
    8456  *pSumItemSize = 0;
    8457 
    8458  if(canMakeOtherLost)
    8459  {
    8460  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8461  {
    8462  *pSumFreeSize = suballocItem->size;
    8463  }
    8464  else
    8465  {
    8466  if(suballocItem->hAllocation->CanBecomeLost() &&
    8467  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8468  {
    8469  ++*itemsToMakeLostCount;
    8470  *pSumItemSize = suballocItem->size;
    8471  }
    8472  else
    8473  {
    8474  return false;
    8475  }
    8476  }
    8477 
    8478  // Remaining size is too small for this request: Early return.
    8479  if(GetSize() - suballocItem->offset < allocSize)
    8480  {
    8481  return false;
    8482  }
    8483 
    8484  // Start from offset equal to beginning of this suballocation.
    8485  *pOffset = suballocItem->offset;
    8486 
    8487  // Apply VMA_DEBUG_MARGIN at the beginning.
    8488  if(VMA_DEBUG_MARGIN > 0)
    8489  {
    8490  *pOffset += VMA_DEBUG_MARGIN;
    8491  }
    8492 
    8493  // Apply alignment.
    8494  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8495 
    8496  // Check previous suballocations for BufferImageGranularity conflicts.
    8497  // Make bigger alignment if necessary.
    8498  if(bufferImageGranularity > 1)
    8499  {
    8500  bool bufferImageGranularityConflict = false;
    8501  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8502  while(prevSuballocItem != m_Suballocations.cbegin())
    8503  {
    8504  --prevSuballocItem;
    8505  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8506  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8507  {
    8508  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8509  {
    8510  bufferImageGranularityConflict = true;
    8511  break;
    8512  }
    8513  }
    8514  else
    8515  // Already on previous page.
    8516  break;
    8517  }
    8518  if(bufferImageGranularityConflict)
    8519  {
    8520  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8521  }
    8522  }
    8523 
    8524  // Now that we have final *pOffset, check if we are past suballocItem.
    8525  // If yes, return false - this function should be called for another suballocItem as starting point.
    8526  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8527  {
    8528  return false;
    8529  }
    8530 
    8531  // Calculate padding at the beginning based on current offset.
    8532  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8533 
    8534  // Calculate required margin at the end.
    8535  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8536 
    8537  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8538  // Another early return check.
    8539  if(suballocItem->offset + totalSize > GetSize())
    8540  {
    8541  return false;
    8542  }
    8543 
    8544  // Advance lastSuballocItem until desired size is reached.
    8545  // Update itemsToMakeLostCount.
    8546  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8547  if(totalSize > suballocItem->size)
    8548  {
    8549  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8550  while(remainingSize > 0)
    8551  {
    8552  ++lastSuballocItem;
    8553  if(lastSuballocItem == m_Suballocations.cend())
    8554  {
    8555  return false;
    8556  }
    8557  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8558  {
    8559  *pSumFreeSize += lastSuballocItem->size;
    8560  }
    8561  else
    8562  {
    8563  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8564  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8565  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8566  {
    8567  ++*itemsToMakeLostCount;
    8568  *pSumItemSize += lastSuballocItem->size;
    8569  }
    8570  else
    8571  {
    8572  return false;
    8573  }
    8574  }
    8575  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8576  remainingSize - lastSuballocItem->size : 0;
    8577  }
    8578  }
    8579 
    8580  // Check next suballocations for BufferImageGranularity conflicts.
    8581  // If conflict exists, we must mark more allocations lost or fail.
    8582  if(bufferImageGranularity > 1)
    8583  {
    8584  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8585  ++nextSuballocItem;
    8586  while(nextSuballocItem != m_Suballocations.cend())
    8587  {
    8588  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8589  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8590  {
    8591  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8592  {
    8593  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8594  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8595  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8596  {
    8597  ++*itemsToMakeLostCount;
    8598  }
    8599  else
    8600  {
    8601  return false;
    8602  }
    8603  }
    8604  }
    8605  else
    8606  {
    8607  // Already on next page.
    8608  break;
    8609  }
    8610  ++nextSuballocItem;
    8611  }
    8612  }
    8613  }
    8614  else
    8615  {
    8616  const VmaSuballocation& suballoc = *suballocItem;
    8617  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8618 
    8619  *pSumFreeSize = suballoc.size;
    8620 
    8621  // Size of this suballocation is too small for this request: Early return.
    8622  if(suballoc.size < allocSize)
    8623  {
    8624  return false;
    8625  }
    8626 
    8627  // Start from offset equal to beginning of this suballocation.
    8628  *pOffset = suballoc.offset;
    8629 
    8630  // Apply VMA_DEBUG_MARGIN at the beginning.
    8631  if(VMA_DEBUG_MARGIN > 0)
    8632  {
    8633  *pOffset += VMA_DEBUG_MARGIN;
    8634  }
    8635 
    8636  // Apply alignment.
    8637  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8638 
    8639  // Check previous suballocations for BufferImageGranularity conflicts.
    8640  // Make bigger alignment if necessary.
    8641  if(bufferImageGranularity > 1)
    8642  {
    8643  bool bufferImageGranularityConflict = false;
    8644  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8645  while(prevSuballocItem != m_Suballocations.cbegin())
    8646  {
    8647  --prevSuballocItem;
    8648  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8649  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8650  {
    8651  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8652  {
    8653  bufferImageGranularityConflict = true;
    8654  break;
    8655  }
    8656  }
    8657  else
    8658  // Already on previous page.
    8659  break;
    8660  }
    8661  if(bufferImageGranularityConflict)
    8662  {
    8663  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8664  }
    8665  }
    8666 
    8667  // Calculate padding at the beginning based on current offset.
    8668  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8669 
    8670  // Calculate required margin at the end.
    8671  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8672 
    8673  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8674  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8675  {
    8676  return false;
    8677  }
    8678 
    8679  // Check next suballocations for BufferImageGranularity conflicts.
    8680  // If conflict exists, allocation cannot be made here.
    8681  if(bufferImageGranularity > 1)
    8682  {
    8683  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8684  ++nextSuballocItem;
    8685  while(nextSuballocItem != m_Suballocations.cend())
    8686  {
    8687  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8688  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8689  {
    8690  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8691  {
    8692  return false;
    8693  }
    8694  }
    8695  else
    8696  {
    8697  // Already on next page.
    8698  break;
    8699  }
    8700  ++nextSuballocItem;
    8701  }
    8702  }
    8703  }
    8704 
    8705  // All tests passed: Success. pOffset is already filled.
    8706  return true;
    8707 }
    8708 
    8709 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8710 {
    8711  VMA_ASSERT(item != m_Suballocations.end());
    8712  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8713 
    8714  VmaSuballocationList::iterator nextItem = item;
    8715  ++nextItem;
    8716  VMA_ASSERT(nextItem != m_Suballocations.end());
    8717  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8718 
    8719  item->size += nextItem->size;
    8720  --m_FreeCount;
    8721  m_Suballocations.erase(nextItem);
    8722 }
    8723 
    8724 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8725 {
    8726  // Change this suballocation to be marked as free.
    8727  VmaSuballocation& suballoc = *suballocItem;
    8728  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8729  suballoc.hAllocation = VK_NULL_HANDLE;
    8730 
    8731  // Update totals.
    8732  ++m_FreeCount;
    8733  m_SumFreeSize += suballoc.size;
    8734 
    8735  // Merge with previous and/or next suballocation if it's also free.
    8736  bool mergeWithNext = false;
    8737  bool mergeWithPrev = false;
    8738 
    8739  VmaSuballocationList::iterator nextItem = suballocItem;
    8740  ++nextItem;
    8741  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8742  {
    8743  mergeWithNext = true;
    8744  }
    8745 
    8746  VmaSuballocationList::iterator prevItem = suballocItem;
    8747  if(suballocItem != m_Suballocations.begin())
    8748  {
    8749  --prevItem;
    8750  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8751  {
    8752  mergeWithPrev = true;
    8753  }
    8754  }
    8755 
    8756  if(mergeWithNext)
    8757  {
    8758  UnregisterFreeSuballocation(nextItem);
    8759  MergeFreeWithNext(suballocItem);
    8760  }
    8761 
    8762  if(mergeWithPrev)
    8763  {
    8764  UnregisterFreeSuballocation(prevItem);
    8765  MergeFreeWithNext(prevItem);
    8766  RegisterFreeSuballocation(prevItem);
    8767  return prevItem;
    8768  }
    8769  else
    8770  {
    8771  RegisterFreeSuballocation(suballocItem);
    8772  return suballocItem;
    8773  }
    8774 }
    8775 
    8776 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8777 {
    8778  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8779  VMA_ASSERT(item->size > 0);
    8780 
    8781  // You may want to enable this validation at the beginning or at the end of
    8782  // this function, depending on what do you want to check.
    8783  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8784 
    8785  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8786  {
    8787  if(m_FreeSuballocationsBySize.empty())
    8788  {
    8789  m_FreeSuballocationsBySize.push_back(item);
    8790  }
    8791  else
    8792  {
    8793  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8794  }
    8795  }
    8796 
    8797  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8798 }
    8799 
    8800 
    8801 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8802 {
    8803  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8804  VMA_ASSERT(item->size > 0);
    8805 
    8806  // You may want to enable this validation at the beginning or at the end of
    8807  // this function, depending on what do you want to check.
    8808  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8809 
    8810  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8811  {
    8812  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8813  m_FreeSuballocationsBySize.data(),
    8814  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8815  item,
    8816  VmaSuballocationItemSizeLess());
    8817  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8818  index < m_FreeSuballocationsBySize.size();
    8819  ++index)
    8820  {
    8821  if(m_FreeSuballocationsBySize[index] == item)
    8822  {
    8823  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8824  return;
    8825  }
    8826  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8827  }
    8828  VMA_ASSERT(0 && "Not found.");
    8829  }
    8830 
    8831  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8832 }
    8833 
    8834 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8835  VkDeviceSize bufferImageGranularity,
    8836  VmaSuballocationType& inOutPrevSuballocType) const
    8837 {
    8838  if(bufferImageGranularity == 1 || IsEmpty())
    8839  {
    8840  return false;
    8841  }
    8842 
    8843  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8844  bool typeConflictFound = false;
    8845  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8846  it != m_Suballocations.cend();
    8847  ++it)
    8848  {
    8849  const VmaSuballocationType suballocType = it->type;
    8850  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8851  {
    8852  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8853  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8854  {
    8855  typeConflictFound = true;
    8856  }
    8857  inOutPrevSuballocType = suballocType;
    8858  }
    8859  }
    8860 
    8861  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8862 }
    8863 
    8865 // class VmaBlockMetadata_Linear
    8866 
    8867 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8868  VmaBlockMetadata(hAllocator),
    8869  m_SumFreeSize(0),
    8870  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8871  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8872  m_1stVectorIndex(0),
    8873  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8874  m_1stNullItemsBeginCount(0),
    8875  m_1stNullItemsMiddleCount(0),
    8876  m_2ndNullItemsCount(0)
    8877 {
    8878 }
    8879 
    8880 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8881 {
    8882 }
    8883 
    8884 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8885 {
    8886  VmaBlockMetadata::Init(size);
    8887  m_SumFreeSize = size;
    8888 }
    8889 
    8890 bool VmaBlockMetadata_Linear::Validate() const
    8891 {
    8892  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8893  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8894 
    8895  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8896  VMA_VALIDATE(!suballocations1st.empty() ||
    8897  suballocations2nd.empty() ||
    8898  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8899 
    8900  if(!suballocations1st.empty())
    8901  {
    8902  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8903  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8904  // Null item at the end should be just pop_back().
    8905  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8906  }
    8907  if(!suballocations2nd.empty())
    8908  {
    8909  // Null item at the end should be just pop_back().
    8910  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8911  }
    8912 
    8913  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8914  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8915 
    8916  VkDeviceSize sumUsedSize = 0;
    8917  const size_t suballoc1stCount = suballocations1st.size();
    8918  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8919 
    8920  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8921  {
    8922  const size_t suballoc2ndCount = suballocations2nd.size();
    8923  size_t nullItem2ndCount = 0;
    8924  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8925  {
    8926  const VmaSuballocation& suballoc = suballocations2nd[i];
    8927  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8928 
    8929  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8930  VMA_VALIDATE(suballoc.offset >= offset);
    8931 
    8932  if(!currFree)
    8933  {
    8934  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8935  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8936  sumUsedSize += suballoc.size;
    8937  }
    8938  else
    8939  {
    8940  ++nullItem2ndCount;
    8941  }
    8942 
    8943  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8944  }
    8945 
    8946  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8947  }
    8948 
    8949  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8950  {
    8951  const VmaSuballocation& suballoc = suballocations1st[i];
    8952  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8953  suballoc.hAllocation == VK_NULL_HANDLE);
    8954  }
    8955 
    8956  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8957 
    8958  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8959  {
    8960  const VmaSuballocation& suballoc = suballocations1st[i];
    8961  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8962 
    8963  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8964  VMA_VALIDATE(suballoc.offset >= offset);
    8965  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8966 
    8967  if(!currFree)
    8968  {
    8969  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8970  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8971  sumUsedSize += suballoc.size;
    8972  }
    8973  else
    8974  {
    8975  ++nullItem1stCount;
    8976  }
    8977 
    8978  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8979  }
    8980  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8981 
    8982  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8983  {
    8984  const size_t suballoc2ndCount = suballocations2nd.size();
    8985  size_t nullItem2ndCount = 0;
    8986  for(size_t i = suballoc2ndCount; i--; )
    8987  {
    8988  const VmaSuballocation& suballoc = suballocations2nd[i];
    8989  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8990 
    8991  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8992  VMA_VALIDATE(suballoc.offset >= offset);
    8993 
    8994  if(!currFree)
    8995  {
    8996  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8997  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8998  sumUsedSize += suballoc.size;
    8999  }
    9000  else
    9001  {
    9002  ++nullItem2ndCount;
    9003  }
    9004 
    9005  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    9006  }
    9007 
    9008  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    9009  }
    9010 
    9011  VMA_VALIDATE(offset <= GetSize());
    9012  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    9013 
    9014  return true;
    9015 }
    9016 
    9017 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    9018 {
    9019  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    9020  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    9021 }
    9022 
    9023 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    9024 {
    9025  const VkDeviceSize size = GetSize();
    9026 
    9027  /*
    9028  We don't consider gaps inside allocation vectors with freed allocations because
    9029  they are not suitable for reuse in linear allocator. We consider only space that
    9030  is available for new allocations.
    9031  */
    9032  if(IsEmpty())
    9033  {
    9034  return size;
    9035  }
    9036 
    9037  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9038 
    9039  switch(m_2ndVectorMode)
    9040  {
    9041  case SECOND_VECTOR_EMPTY:
    9042  /*
    9043  Available space is after end of 1st, as well as before beginning of 1st (which
    9044  whould make it a ring buffer).
    9045  */
    9046  {
    9047  const size_t suballocations1stCount = suballocations1st.size();
    9048  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    9049  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9050  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    9051  return VMA_MAX(
    9052  firstSuballoc.offset,
    9053  size - (lastSuballoc.offset + lastSuballoc.size));
    9054  }
    9055  break;
    9056 
    9057  case SECOND_VECTOR_RING_BUFFER:
    9058  /*
    9059  Available space is only between end of 2nd and beginning of 1st.
    9060  */
    9061  {
    9062  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9063  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    9064  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    9065  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    9066  }
    9067  break;
    9068 
    9069  case SECOND_VECTOR_DOUBLE_STACK:
    9070  /*
    9071  Available space is only between end of 1st and top of 2nd.
    9072  */
    9073  {
    9074  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9075  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9076  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9077  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9078  }
    9079  break;
    9080 
    9081  default:
    9082  VMA_ASSERT(0);
    9083  return 0;
    9084  }
    9085 }
    9086 
    9087 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9088 {
    9089  const VkDeviceSize size = GetSize();
    9090  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9091  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9092  const size_t suballoc1stCount = suballocations1st.size();
    9093  const size_t suballoc2ndCount = suballocations2nd.size();
    9094 
    9095  outInfo.blockCount = 1;
    9096  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9097  outInfo.unusedRangeCount = 0;
    9098  outInfo.usedBytes = 0;
    9099  outInfo.allocationSizeMin = UINT64_MAX;
    9100  outInfo.allocationSizeMax = 0;
    9101  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9102  outInfo.unusedRangeSizeMax = 0;
    9103 
    9104  VkDeviceSize lastOffset = 0;
    9105 
    9106  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9107  {
    9108  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9109  size_t nextAlloc2ndIndex = 0;
    9110  while(lastOffset < freeSpace2ndTo1stEnd)
    9111  {
    9112  // Find next non-null allocation or move nextAllocIndex to the end.
    9113  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9114  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9115  {
    9116  ++nextAlloc2ndIndex;
    9117  }
    9118 
    9119  // Found non-null allocation.
    9120  if(nextAlloc2ndIndex < suballoc2ndCount)
    9121  {
    9122  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9123 
    9124  // 1. Process free space before this allocation.
    9125  if(lastOffset < suballoc.offset)
    9126  {
    9127  // There is free space from lastOffset to suballoc.offset.
    9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9129  ++outInfo.unusedRangeCount;
    9130  outInfo.unusedBytes += unusedRangeSize;
    9131  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9132  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9133  }
    9134 
    9135  // 2. Process this allocation.
    9136  // There is allocation with suballoc.offset, suballoc.size.
    9137  outInfo.usedBytes += suballoc.size;
    9138  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9139  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9140 
    9141  // 3. Prepare for next iteration.
    9142  lastOffset = suballoc.offset + suballoc.size;
    9143  ++nextAlloc2ndIndex;
    9144  }
    9145  // We are at the end.
    9146  else
    9147  {
    9148  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9149  if(lastOffset < freeSpace2ndTo1stEnd)
    9150  {
    9151  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9152  ++outInfo.unusedRangeCount;
    9153  outInfo.unusedBytes += unusedRangeSize;
    9154  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9155  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9156  }
    9157 
    9158  // End of loop.
    9159  lastOffset = freeSpace2ndTo1stEnd;
    9160  }
    9161  }
    9162  }
    9163 
    9164  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9165  const VkDeviceSize freeSpace1stTo2ndEnd =
    9166  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9167  while(lastOffset < freeSpace1stTo2ndEnd)
    9168  {
    9169  // Find next non-null allocation or move nextAllocIndex to the end.
    9170  while(nextAlloc1stIndex < suballoc1stCount &&
    9171  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9172  {
    9173  ++nextAlloc1stIndex;
    9174  }
    9175 
    9176  // Found non-null allocation.
    9177  if(nextAlloc1stIndex < suballoc1stCount)
    9178  {
    9179  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9180 
    9181  // 1. Process free space before this allocation.
    9182  if(lastOffset < suballoc.offset)
    9183  {
    9184  // There is free space from lastOffset to suballoc.offset.
    9185  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9186  ++outInfo.unusedRangeCount;
    9187  outInfo.unusedBytes += unusedRangeSize;
    9188  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9189  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9190  }
    9191 
    9192  // 2. Process this allocation.
    9193  // There is allocation with suballoc.offset, suballoc.size.
    9194  outInfo.usedBytes += suballoc.size;
    9195  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9196  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9197 
    9198  // 3. Prepare for next iteration.
    9199  lastOffset = suballoc.offset + suballoc.size;
    9200  ++nextAlloc1stIndex;
    9201  }
    9202  // We are at the end.
    9203  else
    9204  {
    9205  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9206  if(lastOffset < freeSpace1stTo2ndEnd)
    9207  {
    9208  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9209  ++outInfo.unusedRangeCount;
    9210  outInfo.unusedBytes += unusedRangeSize;
    9211  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9212  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9213  }
    9214 
    9215  // End of loop.
    9216  lastOffset = freeSpace1stTo2ndEnd;
    9217  }
    9218  }
    9219 
    9220  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9221  {
    9222  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9223  while(lastOffset < size)
    9224  {
    9225  // Find next non-null allocation or move nextAllocIndex to the end.
    9226  while(nextAlloc2ndIndex != SIZE_MAX &&
    9227  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9228  {
    9229  --nextAlloc2ndIndex;
    9230  }
    9231 
    9232  // Found non-null allocation.
    9233  if(nextAlloc2ndIndex != SIZE_MAX)
    9234  {
    9235  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9236 
    9237  // 1. Process free space before this allocation.
    9238  if(lastOffset < suballoc.offset)
    9239  {
    9240  // There is free space from lastOffset to suballoc.offset.
    9241  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9242  ++outInfo.unusedRangeCount;
    9243  outInfo.unusedBytes += unusedRangeSize;
    9244  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9245  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9246  }
    9247 
    9248  // 2. Process this allocation.
    9249  // There is allocation with suballoc.offset, suballoc.size.
    9250  outInfo.usedBytes += suballoc.size;
    9251  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9252  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9253 
    9254  // 3. Prepare for next iteration.
    9255  lastOffset = suballoc.offset + suballoc.size;
    9256  --nextAlloc2ndIndex;
    9257  }
    9258  // We are at the end.
    9259  else
    9260  {
    9261  // There is free space from lastOffset to size.
    9262  if(lastOffset < size)
    9263  {
    9264  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9265  ++outInfo.unusedRangeCount;
    9266  outInfo.unusedBytes += unusedRangeSize;
    9267  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9268  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9269  }
    9270 
    9271  // End of loop.
    9272  lastOffset = size;
    9273  }
    9274  }
    9275  }
    9276 
    9277  outInfo.unusedBytes = size - outInfo.usedBytes;
    9278 }
    9279 
    9280 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9281 {
    9282  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9283  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9284  const VkDeviceSize size = GetSize();
    9285  const size_t suballoc1stCount = suballocations1st.size();
    9286  const size_t suballoc2ndCount = suballocations2nd.size();
    9287 
    9288  inoutStats.size += size;
    9289 
    9290  VkDeviceSize lastOffset = 0;
    9291 
    9292  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9293  {
    9294  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9295  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9296  while(lastOffset < freeSpace2ndTo1stEnd)
    9297  {
    9298  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9299  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9300  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9301  {
    9302  ++nextAlloc2ndIndex;
    9303  }
    9304 
    9305  // Found non-null allocation.
    9306  if(nextAlloc2ndIndex < suballoc2ndCount)
    9307  {
    9308  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9309 
    9310  // 1. Process free space before this allocation.
    9311  if(lastOffset < suballoc.offset)
    9312  {
    9313  // There is free space from lastOffset to suballoc.offset.
    9314  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9315  inoutStats.unusedSize += unusedRangeSize;
    9316  ++inoutStats.unusedRangeCount;
    9317  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9318  }
    9319 
    9320  // 2. Process this allocation.
    9321  // There is allocation with suballoc.offset, suballoc.size.
    9322  ++inoutStats.allocationCount;
    9323 
    9324  // 3. Prepare for next iteration.
    9325  lastOffset = suballoc.offset + suballoc.size;
    9326  ++nextAlloc2ndIndex;
    9327  }
    9328  // We are at the end.
    9329  else
    9330  {
    9331  if(lastOffset < freeSpace2ndTo1stEnd)
    9332  {
    9333  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9334  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9335  inoutStats.unusedSize += unusedRangeSize;
    9336  ++inoutStats.unusedRangeCount;
    9337  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9338  }
    9339 
    9340  // End of loop.
    9341  lastOffset = freeSpace2ndTo1stEnd;
    9342  }
    9343  }
    9344  }
    9345 
    9346  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9347  const VkDeviceSize freeSpace1stTo2ndEnd =
    9348  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9349  while(lastOffset < freeSpace1stTo2ndEnd)
    9350  {
    9351  // Find next non-null allocation or move nextAllocIndex to the end.
    9352  while(nextAlloc1stIndex < suballoc1stCount &&
    9353  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9354  {
    9355  ++nextAlloc1stIndex;
    9356  }
    9357 
    9358  // Found non-null allocation.
    9359  if(nextAlloc1stIndex < suballoc1stCount)
    9360  {
    9361  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9362 
    9363  // 1. Process free space before this allocation.
    9364  if(lastOffset < suballoc.offset)
    9365  {
    9366  // There is free space from lastOffset to suballoc.offset.
    9367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9368  inoutStats.unusedSize += unusedRangeSize;
    9369  ++inoutStats.unusedRangeCount;
    9370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9371  }
    9372 
    9373  // 2. Process this allocation.
    9374  // There is allocation with suballoc.offset, suballoc.size.
    9375  ++inoutStats.allocationCount;
    9376 
    9377  // 3. Prepare for next iteration.
    9378  lastOffset = suballoc.offset + suballoc.size;
    9379  ++nextAlloc1stIndex;
    9380  }
    9381  // We are at the end.
    9382  else
    9383  {
    9384  if(lastOffset < freeSpace1stTo2ndEnd)
    9385  {
    9386  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9387  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9388  inoutStats.unusedSize += unusedRangeSize;
    9389  ++inoutStats.unusedRangeCount;
    9390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9391  }
    9392 
    9393  // End of loop.
    9394  lastOffset = freeSpace1stTo2ndEnd;
    9395  }
    9396  }
    9397 
    9398  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9399  {
    9400  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9401  while(lastOffset < size)
    9402  {
    9403  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9404  while(nextAlloc2ndIndex != SIZE_MAX &&
    9405  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9406  {
    9407  --nextAlloc2ndIndex;
    9408  }
    9409 
    9410  // Found non-null allocation.
    9411  if(nextAlloc2ndIndex != SIZE_MAX)
    9412  {
    9413  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9414 
    9415  // 1. Process free space before this allocation.
    9416  if(lastOffset < suballoc.offset)
    9417  {
    9418  // There is free space from lastOffset to suballoc.offset.
    9419  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9420  inoutStats.unusedSize += unusedRangeSize;
    9421  ++inoutStats.unusedRangeCount;
    9422  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9423  }
    9424 
    9425  // 2. Process this allocation.
    9426  // There is allocation with suballoc.offset, suballoc.size.
    9427  ++inoutStats.allocationCount;
    9428 
    9429  // 3. Prepare for next iteration.
    9430  lastOffset = suballoc.offset + suballoc.size;
    9431  --nextAlloc2ndIndex;
    9432  }
    9433  // We are at the end.
    9434  else
    9435  {
    9436  if(lastOffset < size)
    9437  {
    9438  // There is free space from lastOffset to size.
    9439  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9440  inoutStats.unusedSize += unusedRangeSize;
    9441  ++inoutStats.unusedRangeCount;
    9442  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9443  }
    9444 
    9445  // End of loop.
    9446  lastOffset = size;
    9447  }
    9448  }
    9449  }
    9450 }
    9451 
    9452 #if VMA_STATS_STRING_ENABLED
    9453 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9454 {
    9455  const VkDeviceSize size = GetSize();
    9456  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9457  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9458  const size_t suballoc1stCount = suballocations1st.size();
    9459  const size_t suballoc2ndCount = suballocations2nd.size();
    9460 
    9461  // FIRST PASS
    9462 
    9463  size_t unusedRangeCount = 0;
    9464  VkDeviceSize usedBytes = 0;
    9465 
    9466  VkDeviceSize lastOffset = 0;
    9467 
    9468  size_t alloc2ndCount = 0;
    9469  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9470  {
    9471  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9472  size_t nextAlloc2ndIndex = 0;
    9473  while(lastOffset < freeSpace2ndTo1stEnd)
    9474  {
    9475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9476  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9478  {
    9479  ++nextAlloc2ndIndex;
    9480  }
    9481 
    9482  // Found non-null allocation.
    9483  if(nextAlloc2ndIndex < suballoc2ndCount)
    9484  {
    9485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9486 
    9487  // 1. Process free space before this allocation.
    9488  if(lastOffset < suballoc.offset)
    9489  {
    9490  // There is free space from lastOffset to suballoc.offset.
    9491  ++unusedRangeCount;
    9492  }
    9493 
    9494  // 2. Process this allocation.
    9495  // There is allocation with suballoc.offset, suballoc.size.
    9496  ++alloc2ndCount;
    9497  usedBytes += suballoc.size;
    9498 
    9499  // 3. Prepare for next iteration.
    9500  lastOffset = suballoc.offset + suballoc.size;
    9501  ++nextAlloc2ndIndex;
    9502  }
    9503  // We are at the end.
    9504  else
    9505  {
    9506  if(lastOffset < freeSpace2ndTo1stEnd)
    9507  {
    9508  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9509  ++unusedRangeCount;
    9510  }
    9511 
    9512  // End of loop.
    9513  lastOffset = freeSpace2ndTo1stEnd;
    9514  }
    9515  }
    9516  }
    9517 
    9518  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9519  size_t alloc1stCount = 0;
    9520  const VkDeviceSize freeSpace1stTo2ndEnd =
    9521  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9522  while(lastOffset < freeSpace1stTo2ndEnd)
    9523  {
    9524  // Find next non-null allocation or move nextAllocIndex to the end.
    9525  while(nextAlloc1stIndex < suballoc1stCount &&
    9526  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9527  {
    9528  ++nextAlloc1stIndex;
    9529  }
    9530 
    9531  // Found non-null allocation.
    9532  if(nextAlloc1stIndex < suballoc1stCount)
    9533  {
    9534  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9535 
    9536  // 1. Process free space before this allocation.
    9537  if(lastOffset < suballoc.offset)
    9538  {
    9539  // There is free space from lastOffset to suballoc.offset.
    9540  ++unusedRangeCount;
    9541  }
    9542 
    9543  // 2. Process this allocation.
    9544  // There is allocation with suballoc.offset, suballoc.size.
    9545  ++alloc1stCount;
    9546  usedBytes += suballoc.size;
    9547 
    9548  // 3. Prepare for next iteration.
    9549  lastOffset = suballoc.offset + suballoc.size;
    9550  ++nextAlloc1stIndex;
    9551  }
    9552  // We are at the end.
    9553  else
    9554  {
    9555  if(lastOffset < size)
    9556  {
    9557  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9558  ++unusedRangeCount;
    9559  }
    9560 
    9561  // End of loop.
    9562  lastOffset = freeSpace1stTo2ndEnd;
    9563  }
    9564  }
    9565 
    9566  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9567  {
    9568  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9569  while(lastOffset < size)
    9570  {
    9571  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9572  while(nextAlloc2ndIndex != SIZE_MAX &&
    9573  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9574  {
    9575  --nextAlloc2ndIndex;
    9576  }
    9577 
    9578  // Found non-null allocation.
    9579  if(nextAlloc2ndIndex != SIZE_MAX)
    9580  {
    9581  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9582 
    9583  // 1. Process free space before this allocation.
    9584  if(lastOffset < suballoc.offset)
    9585  {
    9586  // There is free space from lastOffset to suballoc.offset.
    9587  ++unusedRangeCount;
    9588  }
    9589 
    9590  // 2. Process this allocation.
    9591  // There is allocation with suballoc.offset, suballoc.size.
    9592  ++alloc2ndCount;
    9593  usedBytes += suballoc.size;
    9594 
    9595  // 3. Prepare for next iteration.
    9596  lastOffset = suballoc.offset + suballoc.size;
    9597  --nextAlloc2ndIndex;
    9598  }
    9599  // We are at the end.
    9600  else
    9601  {
    9602  if(lastOffset < size)
    9603  {
    9604  // There is free space from lastOffset to size.
    9605  ++unusedRangeCount;
    9606  }
    9607 
    9608  // End of loop.
    9609  lastOffset = size;
    9610  }
    9611  }
    9612  }
    9613 
    9614  const VkDeviceSize unusedBytes = size - usedBytes;
    9615  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9616 
    9617  // SECOND PASS
    9618  lastOffset = 0;
    9619 
    9620  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9621  {
    9622  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9623  size_t nextAlloc2ndIndex = 0;
    9624  while(lastOffset < freeSpace2ndTo1stEnd)
    9625  {
    9626  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9627  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9628  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9629  {
    9630  ++nextAlloc2ndIndex;
    9631  }
    9632 
    9633  // Found non-null allocation.
    9634  if(nextAlloc2ndIndex < suballoc2ndCount)
    9635  {
    9636  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9637 
    9638  // 1. Process free space before this allocation.
    9639  if(lastOffset < suballoc.offset)
    9640  {
    9641  // There is free space from lastOffset to suballoc.offset.
    9642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9644  }
    9645 
    9646  // 2. Process this allocation.
    9647  // There is allocation with suballoc.offset, suballoc.size.
    9648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9649 
    9650  // 3. Prepare for next iteration.
    9651  lastOffset = suballoc.offset + suballoc.size;
    9652  ++nextAlloc2ndIndex;
    9653  }
    9654  // We are at the end.
    9655  else
    9656  {
    9657  if(lastOffset < freeSpace2ndTo1stEnd)
    9658  {
    9659  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9660  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9662  }
    9663 
    9664  // End of loop.
    9665  lastOffset = freeSpace2ndTo1stEnd;
    9666  }
    9667  }
    9668  }
    9669 
    9670  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9671  while(lastOffset < freeSpace1stTo2ndEnd)
    9672  {
    9673  // Find next non-null allocation or move nextAllocIndex to the end.
    9674  while(nextAlloc1stIndex < suballoc1stCount &&
    9675  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9676  {
    9677  ++nextAlloc1stIndex;
    9678  }
    9679 
    9680  // Found non-null allocation.
    9681  if(nextAlloc1stIndex < suballoc1stCount)
    9682  {
    9683  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9684 
    9685  // 1. Process free space before this allocation.
    9686  if(lastOffset < suballoc.offset)
    9687  {
    9688  // There is free space from lastOffset to suballoc.offset.
    9689  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9690  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9691  }
    9692 
    9693  // 2. Process this allocation.
    9694  // There is allocation with suballoc.offset, suballoc.size.
    9695  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9696 
    9697  // 3. Prepare for next iteration.
    9698  lastOffset = suballoc.offset + suballoc.size;
    9699  ++nextAlloc1stIndex;
    9700  }
    9701  // We are at the end.
    9702  else
    9703  {
    9704  if(lastOffset < freeSpace1stTo2ndEnd)
    9705  {
    9706  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9707  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9708  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9709  }
    9710 
    9711  // End of loop.
    9712  lastOffset = freeSpace1stTo2ndEnd;
    9713  }
    9714  }
    9715 
    9716  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9717  {
    9718  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9719  while(lastOffset < size)
    9720  {
    9721  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9722  while(nextAlloc2ndIndex != SIZE_MAX &&
    9723  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9724  {
    9725  --nextAlloc2ndIndex;
    9726  }
    9727 
    9728  // Found non-null allocation.
    9729  if(nextAlloc2ndIndex != SIZE_MAX)
    9730  {
    9731  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9732 
    9733  // 1. Process free space before this allocation.
    9734  if(lastOffset < suballoc.offset)
    9735  {
    9736  // There is free space from lastOffset to suballoc.offset.
    9737  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9738  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9739  }
    9740 
    9741  // 2. Process this allocation.
    9742  // There is allocation with suballoc.offset, suballoc.size.
    9743  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9744 
    9745  // 3. Prepare for next iteration.
    9746  lastOffset = suballoc.offset + suballoc.size;
    9747  --nextAlloc2ndIndex;
    9748  }
    9749  // We are at the end.
    9750  else
    9751  {
    9752  if(lastOffset < size)
    9753  {
    9754  // There is free space from lastOffset to size.
    9755  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9756  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9757  }
    9758 
    9759  // End of loop.
    9760  lastOffset = size;
    9761  }
    9762  }
    9763  }
    9764 
    9765  PrintDetailedMap_End(json);
    9766 }
    9767 #endif // #if VMA_STATS_STRING_ENABLED
    9768 
    9769 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9770  uint32_t currentFrameIndex,
    9771  uint32_t frameInUseCount,
    9772  VkDeviceSize bufferImageGranularity,
    9773  VkDeviceSize allocSize,
    9774  VkDeviceSize allocAlignment,
    9775  bool upperAddress,
    9776  VmaSuballocationType allocType,
    9777  bool canMakeOtherLost,
    9778  uint32_t strategy,
    9779  VmaAllocationRequest* pAllocationRequest)
    9780 {
    9781  VMA_ASSERT(allocSize > 0);
    9782  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9783  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9784  VMA_HEAVY_ASSERT(Validate());
    9785  return upperAddress ?
    9786  CreateAllocationRequest_UpperAddress(
    9787  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9788  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9789  CreateAllocationRequest_LowerAddress(
    9790  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9791  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9792 }
    9793 
    9794 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9795  uint32_t currentFrameIndex,
    9796  uint32_t frameInUseCount,
    9797  VkDeviceSize bufferImageGranularity,
    9798  VkDeviceSize allocSize,
    9799  VkDeviceSize allocAlignment,
    9800  VmaSuballocationType allocType,
    9801  bool canMakeOtherLost,
    9802  uint32_t strategy,
    9803  VmaAllocationRequest* pAllocationRequest)
    9804 {
    9805  const VkDeviceSize size = GetSize();
    9806  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9807  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9808 
    9809  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9810  {
    9811  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9812  return false;
    9813  }
    9814 
    9815  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9816  if(allocSize > size)
    9817  {
    9818  return false;
    9819  }
    9820  VkDeviceSize resultBaseOffset = size - allocSize;
    9821  if(!suballocations2nd.empty())
    9822  {
    9823  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9824  resultBaseOffset = lastSuballoc.offset - allocSize;
    9825  if(allocSize > lastSuballoc.offset)
    9826  {
    9827  return false;
    9828  }
    9829  }
    9830 
    9831  // Start from offset equal to end of free space.
    9832  VkDeviceSize resultOffset = resultBaseOffset;
    9833 
    9834  // Apply VMA_DEBUG_MARGIN at the end.
    9835  if(VMA_DEBUG_MARGIN > 0)
    9836  {
    9837  if(resultOffset < VMA_DEBUG_MARGIN)
    9838  {
    9839  return false;
    9840  }
    9841  resultOffset -= VMA_DEBUG_MARGIN;
    9842  }
    9843 
    9844  // Apply alignment.
    9845  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9846 
    9847  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9848  // Make bigger alignment if necessary.
    9849  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9850  {
    9851  bool bufferImageGranularityConflict = false;
    9852  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9853  {
    9854  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9855  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9856  {
    9857  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9858  {
    9859  bufferImageGranularityConflict = true;
    9860  break;
    9861  }
    9862  }
    9863  else
    9864  // Already on previous page.
    9865  break;
    9866  }
    9867  if(bufferImageGranularityConflict)
    9868  {
    9869  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9870  }
    9871  }
    9872 
    9873  // There is enough free space.
    9874  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9875  suballocations1st.back().offset + suballocations1st.back().size :
    9876  0;
    9877  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9878  {
    9879  // Check previous suballocations for BufferImageGranularity conflicts.
    9880  // If conflict exists, allocation cannot be made here.
    9881  if(bufferImageGranularity > 1)
    9882  {
    9883  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9884  {
    9885  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9886  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9887  {
    9888  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9889  {
    9890  return false;
    9891  }
    9892  }
    9893  else
    9894  {
    9895  // Already on next page.
    9896  break;
    9897  }
    9898  }
    9899  }
    9900 
    9901  // All tests passed: Success.
    9902  pAllocationRequest->offset = resultOffset;
    9903  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9904  pAllocationRequest->sumItemSize = 0;
    9905  // pAllocationRequest->item unused.
    9906  pAllocationRequest->itemsToMakeLostCount = 0;
    9907  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9908  return true;
    9909  }
    9910 
    9911  return false;
    9912 }
    9913 
    9914 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9915  uint32_t currentFrameIndex,
    9916  uint32_t frameInUseCount,
    9917  VkDeviceSize bufferImageGranularity,
    9918  VkDeviceSize allocSize,
    9919  VkDeviceSize allocAlignment,
    9920  VmaSuballocationType allocType,
    9921  bool canMakeOtherLost,
    9922  uint32_t strategy,
    9923  VmaAllocationRequest* pAllocationRequest)
    9924 {
    9925  const VkDeviceSize size = GetSize();
    9926  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9927  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9928 
    9929  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9930  {
    9931  // Try to allocate at the end of 1st vector.
    9932 
    9933  VkDeviceSize resultBaseOffset = 0;
    9934  if(!suballocations1st.empty())
    9935  {
    9936  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9937  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9938  }
    9939 
    9940  // Start from offset equal to beginning of free space.
    9941  VkDeviceSize resultOffset = resultBaseOffset;
    9942 
    9943  // Apply VMA_DEBUG_MARGIN at the beginning.
    9944  if(VMA_DEBUG_MARGIN > 0)
    9945  {
    9946  resultOffset += VMA_DEBUG_MARGIN;
    9947  }
    9948 
    9949  // Apply alignment.
    9950  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9951 
    9952  // Check previous suballocations for BufferImageGranularity conflicts.
    9953  // Make bigger alignment if necessary.
    9954  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9955  {
    9956  bool bufferImageGranularityConflict = false;
    9957  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9958  {
    9959  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9960  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9961  {
    9962  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9963  {
    9964  bufferImageGranularityConflict = true;
    9965  break;
    9966  }
    9967  }
    9968  else
    9969  // Already on previous page.
    9970  break;
    9971  }
    9972  if(bufferImageGranularityConflict)
    9973  {
    9974  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9975  }
    9976  }
    9977 
    9978  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9979  suballocations2nd.back().offset : size;
    9980 
    9981  // There is enough free space at the end after alignment.
    9982  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9983  {
    9984  // Check next suballocations for BufferImageGranularity conflicts.
    9985  // If conflict exists, allocation cannot be made here.
    9986  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9987  {
    9988  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9989  {
    9990  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9991  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9992  {
    9993  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9994  {
    9995  return false;
    9996  }
    9997  }
    9998  else
    9999  {
    10000  // Already on previous page.
    10001  break;
    10002  }
    10003  }
    10004  }
    10005 
    10006  // All tests passed: Success.
    10007  pAllocationRequest->offset = resultOffset;
    10008  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    10009  pAllocationRequest->sumItemSize = 0;
    10010  // pAllocationRequest->item, customData unused.
    10011  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    10012  pAllocationRequest->itemsToMakeLostCount = 0;
    10013  return true;
    10014  }
    10015  }
    10016 
    10017  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    10018  // beginning of 1st vector as the end of free space.
    10019  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10020  {
    10021  VMA_ASSERT(!suballocations1st.empty());
    10022 
    10023  VkDeviceSize resultBaseOffset = 0;
    10024  if(!suballocations2nd.empty())
    10025  {
    10026  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10027  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    10028  }
    10029 
    10030  // Start from offset equal to beginning of free space.
    10031  VkDeviceSize resultOffset = resultBaseOffset;
    10032 
    10033  // Apply VMA_DEBUG_MARGIN at the beginning.
    10034  if(VMA_DEBUG_MARGIN > 0)
    10035  {
    10036  resultOffset += VMA_DEBUG_MARGIN;
    10037  }
    10038 
    10039  // Apply alignment.
    10040  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    10041 
    10042  // Check previous suballocations for BufferImageGranularity conflicts.
    10043  // Make bigger alignment if necessary.
    10044  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    10045  {
    10046  bool bufferImageGranularityConflict = false;
    10047  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    10048  {
    10049  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    10050  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    10051  {
    10052  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    10053  {
    10054  bufferImageGranularityConflict = true;
    10055  break;
    10056  }
    10057  }
    10058  else
    10059  // Already on previous page.
    10060  break;
    10061  }
    10062  if(bufferImageGranularityConflict)
    10063  {
    10064  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    10065  }
    10066  }
    10067 
    10068  pAllocationRequest->itemsToMakeLostCount = 0;
    10069  pAllocationRequest->sumItemSize = 0;
    10070  size_t index1st = m_1stNullItemsBeginCount;
    10071 
    10072  if(canMakeOtherLost)
    10073  {
    10074  while(index1st < suballocations1st.size() &&
    10075  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10076  {
    10077  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10078  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10079  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10080  {
    10081  // No problem.
    10082  }
    10083  else
    10084  {
    10085  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10086  if(suballoc.hAllocation->CanBecomeLost() &&
    10087  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10088  {
    10089  ++pAllocationRequest->itemsToMakeLostCount;
    10090  pAllocationRequest->sumItemSize += suballoc.size;
    10091  }
    10092  else
    10093  {
    10094  return false;
    10095  }
    10096  }
    10097  ++index1st;
    10098  }
    10099 
    10100  // Check next suballocations for BufferImageGranularity conflicts.
    10101  // If conflict exists, we must mark more allocations lost or fail.
    10102  if(bufferImageGranularity > 1)
    10103  {
    10104  while(index1st < suballocations1st.size())
    10105  {
    10106  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10107  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10108  {
    10109  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10110  {
    10111  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10112  if(suballoc.hAllocation->CanBecomeLost() &&
    10113  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10114  {
    10115  ++pAllocationRequest->itemsToMakeLostCount;
    10116  pAllocationRequest->sumItemSize += suballoc.size;
    10117  }
    10118  else
    10119  {
    10120  return false;
    10121  }
    10122  }
    10123  }
    10124  else
    10125  {
    10126  // Already on next page.
    10127  break;
    10128  }
    10129  ++index1st;
    10130  }
    10131  }
    10132 
    10133  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10134  if(index1st == suballocations1st.size() &&
    10135  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10136  {
    10137  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10138  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10139  }
    10140  }
    10141 
    10142  // There is enough free space at the end after alignment.
    10143  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10144  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10145  {
    10146  // Check next suballocations for BufferImageGranularity conflicts.
    10147  // If conflict exists, allocation cannot be made here.
    10148  if(bufferImageGranularity > 1)
    10149  {
    10150  for(size_t nextSuballocIndex = index1st;
    10151  nextSuballocIndex < suballocations1st.size();
    10152  nextSuballocIndex++)
    10153  {
    10154  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10155  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10156  {
    10157  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10158  {
    10159  return false;
    10160  }
    10161  }
    10162  else
    10163  {
    10164  // Already on next page.
    10165  break;
    10166  }
    10167  }
    10168  }
    10169 
    10170  // All tests passed: Success.
    10171  pAllocationRequest->offset = resultOffset;
    10172  pAllocationRequest->sumFreeSize =
    10173  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10174  - resultBaseOffset
    10175  - pAllocationRequest->sumItemSize;
    10176  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10177  // pAllocationRequest->item, customData unused.
    10178  return true;
    10179  }
    10180  }
    10181 
    10182  return false;
    10183 }
    10184 
    10185 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10186  uint32_t currentFrameIndex,
    10187  uint32_t frameInUseCount,
    10188  VmaAllocationRequest* pAllocationRequest)
    10189 {
    10190  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10191  {
    10192  return true;
    10193  }
    10194 
    10195  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10196 
    10197  // We always start from 1st.
    10198  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10199  size_t index = m_1stNullItemsBeginCount;
    10200  size_t madeLostCount = 0;
    10201  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10202  {
    10203  if(index == suballocations->size())
    10204  {
    10205  index = 0;
    10206  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10207  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10208  {
    10209  suballocations = &AccessSuballocations2nd();
    10210  }
    10211  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10212  // suballocations continues pointing at AccessSuballocations1st().
    10213  VMA_ASSERT(!suballocations->empty());
    10214  }
    10215  VmaSuballocation& suballoc = (*suballocations)[index];
    10216  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10217  {
    10218  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10219  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10220  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10221  {
    10222  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10223  suballoc.hAllocation = VK_NULL_HANDLE;
    10224  m_SumFreeSize += suballoc.size;
    10225  if(suballocations == &AccessSuballocations1st())
    10226  {
    10227  ++m_1stNullItemsMiddleCount;
    10228  }
    10229  else
    10230  {
    10231  ++m_2ndNullItemsCount;
    10232  }
    10233  ++madeLostCount;
    10234  }
    10235  else
    10236  {
    10237  return false;
    10238  }
    10239  }
    10240  ++index;
    10241  }
    10242 
    10243  CleanupAfterFree();
    10244  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10245 
    10246  return true;
    10247 }
    10248 
    10249 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10250 {
    10251  uint32_t lostAllocationCount = 0;
    10252 
    10253  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10254  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10255  {
    10256  VmaSuballocation& suballoc = suballocations1st[i];
    10257  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10258  suballoc.hAllocation->CanBecomeLost() &&
    10259  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10260  {
    10261  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10262  suballoc.hAllocation = VK_NULL_HANDLE;
    10263  ++m_1stNullItemsMiddleCount;
    10264  m_SumFreeSize += suballoc.size;
    10265  ++lostAllocationCount;
    10266  }
    10267  }
    10268 
    10269  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10270  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10271  {
    10272  VmaSuballocation& suballoc = suballocations2nd[i];
    10273  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10274  suballoc.hAllocation->CanBecomeLost() &&
    10275  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10276  {
    10277  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10278  suballoc.hAllocation = VK_NULL_HANDLE;
    10279  ++m_2ndNullItemsCount;
    10280  m_SumFreeSize += suballoc.size;
    10281  ++lostAllocationCount;
    10282  }
    10283  }
    10284 
    10285  if(lostAllocationCount)
    10286  {
    10287  CleanupAfterFree();
    10288  }
    10289 
    10290  return lostAllocationCount;
    10291 }
    10292 
    10293 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10294 {
    10295  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10296  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10297  {
    10298  const VmaSuballocation& suballoc = suballocations1st[i];
    10299  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10300  {
    10301  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10302  {
    10303  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10304  return VK_ERROR_VALIDATION_FAILED_EXT;
    10305  }
    10306  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10307  {
    10308  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10309  return VK_ERROR_VALIDATION_FAILED_EXT;
    10310  }
    10311  }
    10312  }
    10313 
    10314  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10315  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10316  {
    10317  const VmaSuballocation& suballoc = suballocations2nd[i];
    10318  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10319  {
    10320  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10321  {
    10322  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10323  return VK_ERROR_VALIDATION_FAILED_EXT;
    10324  }
    10325  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10326  {
    10327  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10328  return VK_ERROR_VALIDATION_FAILED_EXT;
    10329  }
    10330  }
    10331  }
    10332 
    10333  return VK_SUCCESS;
    10334 }
    10335 
    10336 void VmaBlockMetadata_Linear::Alloc(
    10337  const VmaAllocationRequest& request,
    10338  VmaSuballocationType type,
    10339  VkDeviceSize allocSize,
    10340  VmaAllocation hAllocation)
    10341 {
    10342  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10343 
    10344  switch(request.type)
    10345  {
    10346  case VmaAllocationRequestType::UpperAddress:
    10347  {
    10348  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10349  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10350  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10351  suballocations2nd.push_back(newSuballoc);
    10352  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10353  }
    10354  break;
    10355  case VmaAllocationRequestType::EndOf1st:
    10356  {
    10357  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10358 
    10359  VMA_ASSERT(suballocations1st.empty() ||
    10360  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10361  // Check if it fits before the end of the block.
    10362  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10363 
    10364  suballocations1st.push_back(newSuballoc);
    10365  }
    10366  break;
    10367  case VmaAllocationRequestType::EndOf2nd:
    10368  {
    10369  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10370  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10371  VMA_ASSERT(!suballocations1st.empty() &&
    10372  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10373  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10374 
    10375  switch(m_2ndVectorMode)
    10376  {
    10377  case SECOND_VECTOR_EMPTY:
    10378  // First allocation from second part ring buffer.
    10379  VMA_ASSERT(suballocations2nd.empty());
    10380  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10381  break;
    10382  case SECOND_VECTOR_RING_BUFFER:
    10383  // 2-part ring buffer is already started.
    10384  VMA_ASSERT(!suballocations2nd.empty());
    10385  break;
    10386  case SECOND_VECTOR_DOUBLE_STACK:
    10387  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10388  break;
    10389  default:
    10390  VMA_ASSERT(0);
    10391  }
    10392 
    10393  suballocations2nd.push_back(newSuballoc);
    10394  }
    10395  break;
    10396  default:
    10397  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10398  }
    10399 
    10400  m_SumFreeSize -= newSuballoc.size;
    10401 }
    10402 
    10403 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10404 {
    10405  FreeAtOffset(allocation->GetOffset());
    10406 }
    10407 
    10408 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10409 {
    10410  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10411  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10412 
    10413  if(!suballocations1st.empty())
    10414  {
    10415  // First allocation: Mark it as next empty at the beginning.
    10416  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10417  if(firstSuballoc.offset == offset)
    10418  {
    10419  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10420  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10421  m_SumFreeSize += firstSuballoc.size;
    10422  ++m_1stNullItemsBeginCount;
    10423  CleanupAfterFree();
    10424  return;
    10425  }
    10426  }
    10427 
    10428  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10429  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10430  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10431  {
    10432  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10433  if(lastSuballoc.offset == offset)
    10434  {
    10435  m_SumFreeSize += lastSuballoc.size;
    10436  suballocations2nd.pop_back();
    10437  CleanupAfterFree();
    10438  return;
    10439  }
    10440  }
    10441  // Last allocation in 1st vector.
    10442  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10443  {
    10444  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10445  if(lastSuballoc.offset == offset)
    10446  {
    10447  m_SumFreeSize += lastSuballoc.size;
    10448  suballocations1st.pop_back();
    10449  CleanupAfterFree();
    10450  return;
    10451  }
    10452  }
    10453 
    10454  // Item from the middle of 1st vector.
    10455  {
    10456  VmaSuballocation refSuballoc;
    10457  refSuballoc.offset = offset;
    10458  // Rest of members stays uninitialized intentionally for better performance.
    10459  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
    10460  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10461  suballocations1st.end(),
    10462  refSuballoc,
    10463  VmaSuballocationOffsetLess());
    10464  if(it != suballocations1st.end())
    10465  {
    10466  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10467  it->hAllocation = VK_NULL_HANDLE;
    10468  ++m_1stNullItemsMiddleCount;
    10469  m_SumFreeSize += it->size;
    10470  CleanupAfterFree();
    10471  return;
    10472  }
    10473  }
    10474 
    10475  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10476  {
    10477  // Item from the middle of 2nd vector.
    10478  VmaSuballocation refSuballoc;
    10479  refSuballoc.offset = offset;
    10480  // Rest of members stays uninitialized intentionally for better performance.
    10481  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10482  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
    10483  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
    10484  if(it != suballocations2nd.end())
    10485  {
    10486  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10487  it->hAllocation = VK_NULL_HANDLE;
    10488  ++m_2ndNullItemsCount;
    10489  m_SumFreeSize += it->size;
    10490  CleanupAfterFree();
    10491  return;
    10492  }
    10493  }
    10494 
    10495  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10496 }
    10497 
    10498 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10499 {
    10500  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10501  const size_t suballocCount = AccessSuballocations1st().size();
    10502  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10503 }
    10504 
    10505 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10506 {
    10507  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10508  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10509 
    10510  if(IsEmpty())
    10511  {
    10512  suballocations1st.clear();
    10513  suballocations2nd.clear();
    10514  m_1stNullItemsBeginCount = 0;
    10515  m_1stNullItemsMiddleCount = 0;
    10516  m_2ndNullItemsCount = 0;
    10517  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10518  }
    10519  else
    10520  {
    10521  const size_t suballoc1stCount = suballocations1st.size();
    10522  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10523  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10524 
    10525  // Find more null items at the beginning of 1st vector.
    10526  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10527  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10528  {
    10529  ++m_1stNullItemsBeginCount;
    10530  --m_1stNullItemsMiddleCount;
    10531  }
    10532 
    10533  // Find more null items at the end of 1st vector.
    10534  while(m_1stNullItemsMiddleCount > 0 &&
    10535  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10536  {
    10537  --m_1stNullItemsMiddleCount;
    10538  suballocations1st.pop_back();
    10539  }
    10540 
    10541  // Find more null items at the end of 2nd vector.
    10542  while(m_2ndNullItemsCount > 0 &&
    10543  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10544  {
    10545  --m_2ndNullItemsCount;
    10546  suballocations2nd.pop_back();
    10547  }
    10548 
    10549  // Find more null items at the beginning of 2nd vector.
    10550  while(m_2ndNullItemsCount > 0 &&
    10551  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10552  {
    10553  --m_2ndNullItemsCount;
    10554  VmaVectorRemove(suballocations2nd, 0);
    10555  }
    10556 
    10557  if(ShouldCompact1st())
    10558  {
    10559  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10560  size_t srcIndex = m_1stNullItemsBeginCount;
    10561  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10562  {
    10563  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10564  {
    10565  ++srcIndex;
    10566  }
    10567  if(dstIndex != srcIndex)
    10568  {
    10569  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10570  }
    10571  ++srcIndex;
    10572  }
    10573  suballocations1st.resize(nonNullItemCount);
    10574  m_1stNullItemsBeginCount = 0;
    10575  m_1stNullItemsMiddleCount = 0;
    10576  }
    10577 
    10578  // 2nd vector became empty.
    10579  if(suballocations2nd.empty())
    10580  {
    10581  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10582  }
    10583 
    10584  // 1st vector became empty.
    10585  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10586  {
    10587  suballocations1st.clear();
    10588  m_1stNullItemsBeginCount = 0;
    10589 
    10590  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10591  {
    10592  // Swap 1st with 2nd. Now 2nd is empty.
    10593  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10594  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10595  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10596  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10597  {
    10598  ++m_1stNullItemsBeginCount;
    10599  --m_1stNullItemsMiddleCount;
    10600  }
    10601  m_2ndNullItemsCount = 0;
    10602  m_1stVectorIndex ^= 1;
    10603  }
    10604  }
    10605  }
    10606 
    10607  VMA_HEAVY_ASSERT(Validate());
    10608 }
    10609 
    10610 
    10612 // class VmaBlockMetadata_Buddy
    10613 
    10614 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10615  VmaBlockMetadata(hAllocator),
    10616  m_Root(VMA_NULL),
    10617  m_AllocationCount(0),
    10618  m_FreeCount(1),
    10619  m_SumFreeSize(0)
    10620 {
    10621  memset(m_FreeList, 0, sizeof(m_FreeList));
    10622 }
    10623 
    10624 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10625 {
    10626  DeleteNode(m_Root);
    10627 }
    10628 
    10629 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10630 {
    10631  VmaBlockMetadata::Init(size);
    10632 
    10633  m_UsableSize = VmaPrevPow2(size);
    10634  m_SumFreeSize = m_UsableSize;
    10635 
    10636  // Calculate m_LevelCount.
    10637  m_LevelCount = 1;
    10638  while(m_LevelCount < MAX_LEVELS &&
    10639  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10640  {
    10641  ++m_LevelCount;
    10642  }
    10643 
    10644  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10645  rootNode->offset = 0;
    10646  rootNode->type = Node::TYPE_FREE;
    10647  rootNode->parent = VMA_NULL;
    10648  rootNode->buddy = VMA_NULL;
    10649 
    10650  m_Root = rootNode;
    10651  AddToFreeListFront(0, rootNode);
    10652 }
    10653 
    10654 bool VmaBlockMetadata_Buddy::Validate() const
    10655 {
    10656  // Validate tree.
    10657  ValidationContext ctx;
    10658  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10659  {
    10660  VMA_VALIDATE(false && "ValidateNode failed.");
    10661  }
    10662  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10663  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10664 
    10665  // Validate free node lists.
    10666  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10667  {
    10668  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10669  m_FreeList[level].front->free.prev == VMA_NULL);
    10670 
    10671  for(Node* node = m_FreeList[level].front;
    10672  node != VMA_NULL;
    10673  node = node->free.next)
    10674  {
    10675  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10676 
    10677  if(node->free.next == VMA_NULL)
    10678  {
    10679  VMA_VALIDATE(m_FreeList[level].back == node);
    10680  }
    10681  else
    10682  {
    10683  VMA_VALIDATE(node->free.next->free.prev == node);
    10684  }
    10685  }
    10686  }
    10687 
    10688  // Validate that free lists ar higher levels are empty.
    10689  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10690  {
    10691  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10692  }
    10693 
    10694  return true;
    10695 }
    10696 
    10697 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10698 {
    10699  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10700  {
    10701  if(m_FreeList[level].front != VMA_NULL)
    10702  {
    10703  return LevelToNodeSize(level);
    10704  }
    10705  }
    10706  return 0;
    10707 }
    10708 
    10709 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10710 {
    10711  const VkDeviceSize unusableSize = GetUnusableSize();
    10712 
    10713  outInfo.blockCount = 1;
    10714 
    10715  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10716  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10717 
    10718  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10719  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10720  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10721 
    10722  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10723 
    10724  if(unusableSize > 0)
    10725  {
    10726  ++outInfo.unusedRangeCount;
    10727  outInfo.unusedBytes += unusableSize;
    10728  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10729  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10730  }
    10731 }
    10732 
    10733 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10734 {
    10735  const VkDeviceSize unusableSize = GetUnusableSize();
    10736 
    10737  inoutStats.size += GetSize();
    10738  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10739  inoutStats.allocationCount += m_AllocationCount;
    10740  inoutStats.unusedRangeCount += m_FreeCount;
    10741  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10742 
    10743  if(unusableSize > 0)
    10744  {
    10745  ++inoutStats.unusedRangeCount;
    10746  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10747  }
    10748 }
    10749 
    10750 #if VMA_STATS_STRING_ENABLED
    10751 
    10752 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10753 {
    10754  // TODO optimize
    10755  VmaStatInfo stat;
    10756  CalcAllocationStatInfo(stat);
    10757 
    10758  PrintDetailedMap_Begin(
    10759  json,
    10760  stat.unusedBytes,
    10761  stat.allocationCount,
    10762  stat.unusedRangeCount);
    10763 
    10764  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10765 
    10766  const VkDeviceSize unusableSize = GetUnusableSize();
    10767  if(unusableSize > 0)
    10768  {
    10769  PrintDetailedMap_UnusedRange(json,
    10770  m_UsableSize, // offset
    10771  unusableSize); // size
    10772  }
    10773 
    10774  PrintDetailedMap_End(json);
    10775 }
    10776 
    10777 #endif // #if VMA_STATS_STRING_ENABLED
    10778 
    10779 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10780  uint32_t currentFrameIndex,
    10781  uint32_t frameInUseCount,
    10782  VkDeviceSize bufferImageGranularity,
    10783  VkDeviceSize allocSize,
    10784  VkDeviceSize allocAlignment,
    10785  bool upperAddress,
    10786  VmaSuballocationType allocType,
    10787  bool canMakeOtherLost,
    10788  uint32_t strategy,
    10789  VmaAllocationRequest* pAllocationRequest)
    10790 {
    10791  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10792 
    10793  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10794  // Whenever it might be an OPTIMAL image...
    10795  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10796  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10797  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10798  {
    10799  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10800  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10801  }
    10802 
    10803  if(allocSize > m_UsableSize)
    10804  {
    10805  return false;
    10806  }
    10807 
    10808  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10809  for(uint32_t level = targetLevel + 1; level--; )
    10810  {
    10811  for(Node* freeNode = m_FreeList[level].front;
    10812  freeNode != VMA_NULL;
    10813  freeNode = freeNode->free.next)
    10814  {
    10815  if(freeNode->offset % allocAlignment == 0)
    10816  {
    10817  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10818  pAllocationRequest->offset = freeNode->offset;
    10819  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10820  pAllocationRequest->sumItemSize = 0;
    10821  pAllocationRequest->itemsToMakeLostCount = 0;
    10822  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10823  return true;
    10824  }
    10825  }
    10826  }
    10827 
    10828  return false;
    10829 }
    10830 
    10831 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10832  uint32_t currentFrameIndex,
    10833  uint32_t frameInUseCount,
    10834  VmaAllocationRequest* pAllocationRequest)
    10835 {
    10836  /*
    10837  Lost allocations are not supported in buddy allocator at the moment.
    10838  Support might be added in the future.
    10839  */
    10840  return pAllocationRequest->itemsToMakeLostCount == 0;
    10841 }
    10842 
    10843 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10844 {
    10845  /*
    10846  Lost allocations are not supported in buddy allocator at the moment.
    10847  Support might be added in the future.
    10848  */
    10849  return 0;
    10850 }
    10851 
    10852 void VmaBlockMetadata_Buddy::Alloc(
    10853  const VmaAllocationRequest& request,
    10854  VmaSuballocationType type,
    10855  VkDeviceSize allocSize,
    10856  VmaAllocation hAllocation)
    10857 {
    10858  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10859 
    10860  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10861  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10862 
    10863  Node* currNode = m_FreeList[currLevel].front;
    10864  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10865  while(currNode->offset != request.offset)
    10866  {
    10867  currNode = currNode->free.next;
    10868  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10869  }
    10870 
    10871  // Go down, splitting free nodes.
    10872  while(currLevel < targetLevel)
    10873  {
    10874  // currNode is already first free node at currLevel.
    10875  // Remove it from list of free nodes at this currLevel.
    10876  RemoveFromFreeList(currLevel, currNode);
    10877 
    10878  const uint32_t childrenLevel = currLevel + 1;
    10879 
    10880  // Create two free sub-nodes.
    10881  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10882  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10883 
    10884  leftChild->offset = currNode->offset;
    10885  leftChild->type = Node::TYPE_FREE;
    10886  leftChild->parent = currNode;
    10887  leftChild->buddy = rightChild;
    10888 
    10889  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10890  rightChild->type = Node::TYPE_FREE;
    10891  rightChild->parent = currNode;
    10892  rightChild->buddy = leftChild;
    10893 
    10894  // Convert current currNode to split type.
    10895  currNode->type = Node::TYPE_SPLIT;
    10896  currNode->split.leftChild = leftChild;
    10897 
    10898  // Add child nodes to free list. Order is important!
    10899  AddToFreeListFront(childrenLevel, rightChild);
    10900  AddToFreeListFront(childrenLevel, leftChild);
    10901 
    10902  ++m_FreeCount;
    10903  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10904  ++currLevel;
    10905  currNode = m_FreeList[currLevel].front;
    10906 
    10907  /*
    10908  We can be sure that currNode, as left child of node previously split,
    10909  also fullfills the alignment requirement.
    10910  */
    10911  }
    10912 
    10913  // Remove from free list.
    10914  VMA_ASSERT(currLevel == targetLevel &&
    10915  currNode != VMA_NULL &&
    10916  currNode->type == Node::TYPE_FREE);
    10917  RemoveFromFreeList(currLevel, currNode);
    10918 
    10919  // Convert to allocation node.
    10920  currNode->type = Node::TYPE_ALLOCATION;
    10921  currNode->allocation.alloc = hAllocation;
    10922 
    10923  ++m_AllocationCount;
    10924  --m_FreeCount;
    10925  m_SumFreeSize -= allocSize;
    10926 }
    10927 
    10928 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10929 {
    10930  if(node->type == Node::TYPE_SPLIT)
    10931  {
    10932  DeleteNode(node->split.leftChild->buddy);
    10933  DeleteNode(node->split.leftChild);
    10934  }
    10935 
    10936  vma_delete(GetAllocationCallbacks(), node);
    10937 }
    10938 
    10939 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10940 {
    10941  VMA_VALIDATE(level < m_LevelCount);
    10942  VMA_VALIDATE(curr->parent == parent);
    10943  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10944  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10945  switch(curr->type)
    10946  {
    10947  case Node::TYPE_FREE:
    10948  // curr->free.prev, next are validated separately.
    10949  ctx.calculatedSumFreeSize += levelNodeSize;
    10950  ++ctx.calculatedFreeCount;
    10951  break;
    10952  case Node::TYPE_ALLOCATION:
    10953  ++ctx.calculatedAllocationCount;
    10954  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10955  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10956  break;
    10957  case Node::TYPE_SPLIT:
    10958  {
    10959  const uint32_t childrenLevel = level + 1;
    10960  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10961  const Node* const leftChild = curr->split.leftChild;
    10962  VMA_VALIDATE(leftChild != VMA_NULL);
    10963  VMA_VALIDATE(leftChild->offset == curr->offset);
    10964  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10965  {
    10966  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10967  }
    10968  const Node* const rightChild = leftChild->buddy;
    10969  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10970  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10971  {
    10972  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10973  }
    10974  }
    10975  break;
    10976  default:
    10977  return false;
    10978  }
    10979 
    10980  return true;
    10981 }
    10982 
    10983 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10984 {
    10985  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10986  uint32_t level = 0;
    10987  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10988  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10989  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10990  {
    10991  ++level;
    10992  currLevelNodeSize = nextLevelNodeSize;
    10993  nextLevelNodeSize = currLevelNodeSize >> 1;
    10994  }
    10995  return level;
    10996 }
    10997 
    10998 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10999 {
    11000  // Find node and level.
    11001  Node* node = m_Root;
    11002  VkDeviceSize nodeOffset = 0;
    11003  uint32_t level = 0;
    11004  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    11005  while(node->type == Node::TYPE_SPLIT)
    11006  {
    11007  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    11008  if(offset < nodeOffset + nextLevelSize)
    11009  {
    11010  node = node->split.leftChild;
    11011  }
    11012  else
    11013  {
    11014  node = node->split.leftChild->buddy;
    11015  nodeOffset += nextLevelSize;
    11016  }
    11017  ++level;
    11018  levelNodeSize = nextLevelSize;
    11019  }
    11020 
    11021  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    11022  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    11023 
    11024  ++m_FreeCount;
    11025  --m_AllocationCount;
    11026  m_SumFreeSize += alloc->GetSize();
    11027 
    11028  node->type = Node::TYPE_FREE;
    11029 
    11030  // Join free nodes if possible.
    11031  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    11032  {
    11033  RemoveFromFreeList(level, node->buddy);
    11034  Node* const parent = node->parent;
    11035 
    11036  vma_delete(GetAllocationCallbacks(), node->buddy);
    11037  vma_delete(GetAllocationCallbacks(), node);
    11038  parent->type = Node::TYPE_FREE;
    11039 
    11040  node = parent;
    11041  --level;
    11042  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    11043  --m_FreeCount;
    11044  }
    11045 
    11046  AddToFreeListFront(level, node);
    11047 }
    11048 
    11049 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    11050 {
    11051  switch(node->type)
    11052  {
    11053  case Node::TYPE_FREE:
    11054  ++outInfo.unusedRangeCount;
    11055  outInfo.unusedBytes += levelNodeSize;
    11056  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    11057  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    11058  break;
    11059  case Node::TYPE_ALLOCATION:
    11060  {
    11061  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11062  ++outInfo.allocationCount;
    11063  outInfo.usedBytes += allocSize;
    11064  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    11065  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    11066 
    11067  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    11068  if(unusedRangeSize > 0)
    11069  {
    11070  ++outInfo.unusedRangeCount;
    11071  outInfo.unusedBytes += unusedRangeSize;
    11072  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11073  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11074  }
    11075  }
    11076  break;
    11077  case Node::TYPE_SPLIT:
    11078  {
    11079  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11080  const Node* const leftChild = node->split.leftChild;
    11081  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11082  const Node* const rightChild = leftChild->buddy;
    11083  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11084  }
    11085  break;
    11086  default:
    11087  VMA_ASSERT(0);
    11088  }
    11089 }
    11090 
    11091 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11092 {
    11093  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11094 
    11095  // List is empty.
    11096  Node* const frontNode = m_FreeList[level].front;
    11097  if(frontNode == VMA_NULL)
    11098  {
    11099  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11100  node->free.prev = node->free.next = VMA_NULL;
    11101  m_FreeList[level].front = m_FreeList[level].back = node;
    11102  }
    11103  else
    11104  {
    11105  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11106  node->free.prev = VMA_NULL;
    11107  node->free.next = frontNode;
    11108  frontNode->free.prev = node;
    11109  m_FreeList[level].front = node;
    11110  }
    11111 }
    11112 
    11113 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11114 {
    11115  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11116 
    11117  // It is at the front.
    11118  if(node->free.prev == VMA_NULL)
    11119  {
    11120  VMA_ASSERT(m_FreeList[level].front == node);
    11121  m_FreeList[level].front = node->free.next;
    11122  }
    11123  else
    11124  {
    11125  Node* const prevFreeNode = node->free.prev;
    11126  VMA_ASSERT(prevFreeNode->free.next == node);
    11127  prevFreeNode->free.next = node->free.next;
    11128  }
    11129 
    11130  // It is at the back.
    11131  if(node->free.next == VMA_NULL)
    11132  {
    11133  VMA_ASSERT(m_FreeList[level].back == node);
    11134  m_FreeList[level].back = node->free.prev;
    11135  }
    11136  else
    11137  {
    11138  Node* const nextFreeNode = node->free.next;
    11139  VMA_ASSERT(nextFreeNode->free.prev == node);
    11140  nextFreeNode->free.prev = node->free.prev;
    11141  }
    11142 }
    11143 
    11144 #if VMA_STATS_STRING_ENABLED
    11145 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11146 {
    11147  switch(node->type)
    11148  {
    11149  case Node::TYPE_FREE:
    11150  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11151  break;
    11152  case Node::TYPE_ALLOCATION:
    11153  {
    11154  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11155  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11156  if(allocSize < levelNodeSize)
    11157  {
    11158  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11159  }
    11160  }
    11161  break;
    11162  case Node::TYPE_SPLIT:
    11163  {
    11164  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11165  const Node* const leftChild = node->split.leftChild;
    11166  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11167  const Node* const rightChild = leftChild->buddy;
    11168  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11169  }
    11170  break;
    11171  default:
    11172  VMA_ASSERT(0);
    11173  }
    11174 }
    11175 #endif // #if VMA_STATS_STRING_ENABLED
    11176 
    11177 
    11179 // class VmaDeviceMemoryBlock
    11180 
    11181 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11182  m_pMetadata(VMA_NULL),
    11183  m_MemoryTypeIndex(UINT32_MAX),
    11184  m_Id(0),
    11185  m_hMemory(VK_NULL_HANDLE),
    11186  m_MapCount(0),
    11187  m_pMappedData(VMA_NULL)
    11188 {
    11189 }
    11190 
    11191 void VmaDeviceMemoryBlock::Init(
    11192  VmaAllocator hAllocator,
    11193  VmaPool hParentPool,
    11194  uint32_t newMemoryTypeIndex,
    11195  VkDeviceMemory newMemory,
    11196  VkDeviceSize newSize,
    11197  uint32_t id,
    11198  uint32_t algorithm)
    11199 {
    11200  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11201 
    11202  m_hParentPool = hParentPool;
    11203  m_MemoryTypeIndex = newMemoryTypeIndex;
    11204  m_Id = id;
    11205  m_hMemory = newMemory;
    11206 
    11207  switch(algorithm)
    11208  {
    11210  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11211  break;
    11213  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11214  break;
    11215  default:
    11216  VMA_ASSERT(0);
    11217  // Fall-through.
    11218  case 0:
    11219  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11220  }
    11221  m_pMetadata->Init(newSize);
    11222 }
    11223 
    11224 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11225 {
    11226  // This is the most important assert in the entire library.
    11227  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11228  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11229 
    11230  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11231  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11232  m_hMemory = VK_NULL_HANDLE;
    11233 
    11234  vma_delete(allocator, m_pMetadata);
    11235  m_pMetadata = VMA_NULL;
    11236 }
    11237 
    11238 bool VmaDeviceMemoryBlock::Validate() const
    11239 {
    11240  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11241  (m_pMetadata->GetSize() != 0));
    11242 
    11243  return m_pMetadata->Validate();
    11244 }
    11245 
    11246 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11247 {
    11248  void* pData = nullptr;
    11249  VkResult res = Map(hAllocator, 1, &pData);
    11250  if(res != VK_SUCCESS)
    11251  {
    11252  return res;
    11253  }
    11254 
    11255  res = m_pMetadata->CheckCorruption(pData);
    11256 
    11257  Unmap(hAllocator, 1);
    11258 
    11259  return res;
    11260 }
    11261 
    11262 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11263 {
    11264  if(count == 0)
    11265  {
    11266  return VK_SUCCESS;
    11267  }
    11268 
    11269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11270  if(m_MapCount != 0)
    11271  {
    11272  m_MapCount += count;
    11273  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11274  if(ppData != VMA_NULL)
    11275  {
    11276  *ppData = m_pMappedData;
    11277  }
    11278  return VK_SUCCESS;
    11279  }
    11280  else
    11281  {
    11282  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11283  hAllocator->m_hDevice,
    11284  m_hMemory,
    11285  0, // offset
    11286  VK_WHOLE_SIZE,
    11287  0, // flags
    11288  &m_pMappedData);
    11289  if(result == VK_SUCCESS)
    11290  {
    11291  if(ppData != VMA_NULL)
    11292  {
    11293  *ppData = m_pMappedData;
    11294  }
    11295  m_MapCount = count;
    11296  }
    11297  return result;
    11298  }
    11299 }
    11300 
    11301 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11302 {
    11303  if(count == 0)
    11304  {
    11305  return;
    11306  }
    11307 
    11308  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11309  if(m_MapCount >= count)
    11310  {
    11311  m_MapCount -= count;
    11312  if(m_MapCount == 0)
    11313  {
    11314  m_pMappedData = VMA_NULL;
    11315  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11316  }
    11317  }
    11318  else
    11319  {
    11320  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11321  }
    11322 }
    11323 
    11324 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11325 {
    11326  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11327  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11328 
    11329  void* pData;
    11330  VkResult res = Map(hAllocator, 1, &pData);
    11331  if(res != VK_SUCCESS)
    11332  {
    11333  return res;
    11334  }
    11335 
    11336  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11337  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11338 
    11339  Unmap(hAllocator, 1);
    11340 
    11341  return VK_SUCCESS;
    11342 }
    11343 
    11344 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11345 {
    11346  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11347  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11348 
    11349  void* pData;
    11350  VkResult res = Map(hAllocator, 1, &pData);
    11351  if(res != VK_SUCCESS)
    11352  {
    11353  return res;
    11354  }
    11355 
    11356  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11357  {
    11358  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11359  }
    11360  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11361  {
    11362  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11363  }
    11364 
    11365  Unmap(hAllocator, 1);
    11366 
    11367  return VK_SUCCESS;
    11368 }
    11369 
    11370 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11371  const VmaAllocator hAllocator,
    11372  const VmaAllocation hAllocation,
    11373  VkBuffer hBuffer)
    11374 {
    11375  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11376  hAllocation->GetBlock() == this);
    11377  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11378  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11379  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11380  hAllocator->m_hDevice,
    11381  hBuffer,
    11382  m_hMemory,
    11383  hAllocation->GetOffset());
    11384 }
    11385 
    11386 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11387  const VmaAllocator hAllocator,
    11388  const VmaAllocation hAllocation,
    11389  VkImage hImage)
    11390 {
    11391  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11392  hAllocation->GetBlock() == this);
    11393  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11394  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11395  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11396  hAllocator->m_hDevice,
    11397  hImage,
    11398  m_hMemory,
    11399  hAllocation->GetOffset());
    11400 }
    11401 
    11402 static void InitStatInfo(VmaStatInfo& outInfo)
    11403 {
    11404  memset(&outInfo, 0, sizeof(outInfo));
    11405  outInfo.allocationSizeMin = UINT64_MAX;
    11406  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11407 }
    11408 
    11409 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11410 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11411 {
    11412  inoutInfo.blockCount += srcInfo.blockCount;
    11413  inoutInfo.allocationCount += srcInfo.allocationCount;
    11414  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11415  inoutInfo.usedBytes += srcInfo.usedBytes;
    11416  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11417  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11418  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11419  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11420  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11421 }
    11422 
    11423 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11424 {
    11425  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11426  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11427  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11428  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11429 }
    11430 
    11431 VmaPool_T::VmaPool_T(
    11432  VmaAllocator hAllocator,
    11433  const VmaPoolCreateInfo& createInfo,
    11434  VkDeviceSize preferredBlockSize) :
    11435  m_BlockVector(
    11436  hAllocator,
    11437  this, // hParentPool
    11438  createInfo.memoryTypeIndex,
    11439  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11440  createInfo.minBlockCount,
    11441  createInfo.maxBlockCount,
    11442  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11443  createInfo.frameInUseCount,
    11444  true, // isCustomPool
    11445  createInfo.blockSize != 0, // explicitBlockSize
    11446  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11447  m_Id(0)
    11448 {
    11449 }
    11450 
    11451 VmaPool_T::~VmaPool_T()
    11452 {
    11453 }
    11454 
    11455 #if VMA_STATS_STRING_ENABLED
    11456 
    11457 #endif // #if VMA_STATS_STRING_ENABLED
    11458 
    11459 VmaBlockVector::VmaBlockVector(
    11460  VmaAllocator hAllocator,
    11461  VmaPool hParentPool,
    11462  uint32_t memoryTypeIndex,
    11463  VkDeviceSize preferredBlockSize,
    11464  size_t minBlockCount,
    11465  size_t maxBlockCount,
    11466  VkDeviceSize bufferImageGranularity,
    11467  uint32_t frameInUseCount,
    11468  bool isCustomPool,
    11469  bool explicitBlockSize,
    11470  uint32_t algorithm) :
    11471  m_hAllocator(hAllocator),
    11472  m_hParentPool(hParentPool),
    11473  m_MemoryTypeIndex(memoryTypeIndex),
    11474  m_PreferredBlockSize(preferredBlockSize),
    11475  m_MinBlockCount(minBlockCount),
    11476  m_MaxBlockCount(maxBlockCount),
    11477  m_BufferImageGranularity(bufferImageGranularity),
    11478  m_FrameInUseCount(frameInUseCount),
    11479  m_IsCustomPool(isCustomPool),
    11480  m_ExplicitBlockSize(explicitBlockSize),
    11481  m_Algorithm(algorithm),
    11482  m_HasEmptyBlock(false),
    11483  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11484  m_NextBlockId(0)
    11485 {
    11486 }
    11487 
    11488 VmaBlockVector::~VmaBlockVector()
    11489 {
    11490  for(size_t i = m_Blocks.size(); i--; )
    11491  {
    11492  m_Blocks[i]->Destroy(m_hAllocator);
    11493  vma_delete(m_hAllocator, m_Blocks[i]);
    11494  }
    11495 }
    11496 
    11497 VkResult VmaBlockVector::CreateMinBlocks()
    11498 {
    11499  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11500  {
    11501  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11502  if(res != VK_SUCCESS)
    11503  {
    11504  return res;
    11505  }
    11506  }
    11507  return VK_SUCCESS;
    11508 }
    11509 
    11510 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11511 {
    11512  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11513 
    11514  const size_t blockCount = m_Blocks.size();
    11515 
    11516  pStats->size = 0;
    11517  pStats->unusedSize = 0;
    11518  pStats->allocationCount = 0;
    11519  pStats->unusedRangeCount = 0;
    11520  pStats->unusedRangeSizeMax = 0;
    11521  pStats->blockCount = blockCount;
    11522 
    11523  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11524  {
    11525  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11526  VMA_ASSERT(pBlock);
    11527  VMA_HEAVY_ASSERT(pBlock->Validate());
    11528  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11529  }
    11530 }
    11531 
    11532 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11533 {
    11534  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11535  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11536  (VMA_DEBUG_MARGIN > 0) &&
    11537  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11538  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11539 }
    11540 
    11541 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11542 
    11543 VkResult VmaBlockVector::Allocate(
    11544  uint32_t currentFrameIndex,
    11545  VkDeviceSize size,
    11546  VkDeviceSize alignment,
    11547  const VmaAllocationCreateInfo& createInfo,
    11548  VmaSuballocationType suballocType,
    11549  size_t allocationCount,
    11550  VmaAllocation* pAllocations)
    11551 {
    11552  size_t allocIndex;
    11553  VkResult res = VK_SUCCESS;
    11554 
    11555  if(IsCorruptionDetectionEnabled())
    11556  {
    11557  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11558  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11559  }
    11560 
    11561  {
    11562  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11563  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11564  {
    11565  res = AllocatePage(
    11566  currentFrameIndex,
    11567  size,
    11568  alignment,
    11569  createInfo,
    11570  suballocType,
    11571  pAllocations + allocIndex);
    11572  if(res != VK_SUCCESS)
    11573  {
    11574  break;
    11575  }
    11576  }
    11577  }
    11578 
    11579  if(res != VK_SUCCESS)
    11580  {
    11581  // Free all already created allocations.
    11582  while(allocIndex--)
    11583  {
    11584  Free(pAllocations[allocIndex]);
    11585  }
    11586  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11587  }
    11588 
    11589  return res;
    11590 }
    11591 
    11592 VkResult VmaBlockVector::AllocatePage(
    11593  uint32_t currentFrameIndex,
    11594  VkDeviceSize size,
    11595  VkDeviceSize alignment,
    11596  const VmaAllocationCreateInfo& createInfo,
    11597  VmaSuballocationType suballocType,
    11598  VmaAllocation* pAllocation)
    11599 {
    11600  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11601  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11602  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11603  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11604  const bool canCreateNewBlock =
    11605  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11606  (m_Blocks.size() < m_MaxBlockCount);
    11607  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11608 
    11609  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11610  // Which in turn is available only when maxBlockCount = 1.
    11611  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11612  {
    11613  canMakeOtherLost = false;
    11614  }
    11615 
    11616  // Upper address can only be used with linear allocator and within single memory block.
    11617  if(isUpperAddress &&
    11618  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11619  {
    11620  return VK_ERROR_FEATURE_NOT_PRESENT;
    11621  }
    11622 
    11623  // Validate strategy.
    11624  switch(strategy)
    11625  {
    11626  case 0:
    11628  break;
    11632  break;
    11633  default:
    11634  return VK_ERROR_FEATURE_NOT_PRESENT;
    11635  }
    11636 
    11637  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11638  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11639  {
    11640  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11641  }
    11642 
    11643  /*
    11644  Under certain condition, this whole section can be skipped for optimization, so
    11645  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11646  e.g. for custom pools with linear algorithm.
    11647  */
    11648  if(!canMakeOtherLost || canCreateNewBlock)
    11649  {
    11650  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11651  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11653 
    11654  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11655  {
    11656  // Use only last block.
    11657  if(!m_Blocks.empty())
    11658  {
    11659  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11660  VMA_ASSERT(pCurrBlock);
    11661  VkResult res = AllocateFromBlock(
    11662  pCurrBlock,
    11663  currentFrameIndex,
    11664  size,
    11665  alignment,
    11666  allocFlagsCopy,
    11667  createInfo.pUserData,
    11668  suballocType,
    11669  strategy,
    11670  pAllocation);
    11671  if(res == VK_SUCCESS)
    11672  {
    11673  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11674  return VK_SUCCESS;
    11675  }
    11676  }
    11677  }
    11678  else
    11679  {
    11681  {
    11682  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11683  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11684  {
    11685  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11686  VMA_ASSERT(pCurrBlock);
    11687  VkResult res = AllocateFromBlock(
    11688  pCurrBlock,
    11689  currentFrameIndex,
    11690  size,
    11691  alignment,
    11692  allocFlagsCopy,
    11693  createInfo.pUserData,
    11694  suballocType,
    11695  strategy,
    11696  pAllocation);
    11697  if(res == VK_SUCCESS)
    11698  {
    11699  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11700  return VK_SUCCESS;
    11701  }
    11702  }
    11703  }
    11704  else // WORST_FIT, FIRST_FIT
    11705  {
    11706  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11707  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11708  {
    11709  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11710  VMA_ASSERT(pCurrBlock);
    11711  VkResult res = AllocateFromBlock(
    11712  pCurrBlock,
    11713  currentFrameIndex,
    11714  size,
    11715  alignment,
    11716  allocFlagsCopy,
    11717  createInfo.pUserData,
    11718  suballocType,
    11719  strategy,
    11720  pAllocation);
    11721  if(res == VK_SUCCESS)
    11722  {
    11723  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11724  return VK_SUCCESS;
    11725  }
    11726  }
    11727  }
    11728  }
    11729 
    11730  // 2. Try to create new block.
    11731  if(canCreateNewBlock)
    11732  {
    11733  // Calculate optimal size for new block.
    11734  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11735  uint32_t newBlockSizeShift = 0;
    11736  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11737 
    11738  if(!m_ExplicitBlockSize)
    11739  {
    11740  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11741  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11742  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11743  {
    11744  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11745  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11746  {
    11747  newBlockSize = smallerNewBlockSize;
    11748  ++newBlockSizeShift;
    11749  }
    11750  else
    11751  {
    11752  break;
    11753  }
    11754  }
    11755  }
    11756 
    11757  size_t newBlockIndex = 0;
    11758  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11759  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11760  if(!m_ExplicitBlockSize)
    11761  {
    11762  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11763  {
    11764  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11765  if(smallerNewBlockSize >= size)
    11766  {
    11767  newBlockSize = smallerNewBlockSize;
    11768  ++newBlockSizeShift;
    11769  res = CreateBlock(newBlockSize, &newBlockIndex);
    11770  }
    11771  else
    11772  {
    11773  break;
    11774  }
    11775  }
    11776  }
    11777 
    11778  if(res == VK_SUCCESS)
    11779  {
    11780  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11781  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11782 
    11783  res = AllocateFromBlock(
    11784  pBlock,
    11785  currentFrameIndex,
    11786  size,
    11787  alignment,
    11788  allocFlagsCopy,
    11789  createInfo.pUserData,
    11790  suballocType,
    11791  strategy,
    11792  pAllocation);
    11793  if(res == VK_SUCCESS)
    11794  {
    11795  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11796  return VK_SUCCESS;
    11797  }
    11798  else
    11799  {
    11800  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11802  }
    11803  }
    11804  }
    11805  }
    11806 
    11807  // 3. Try to allocate from existing blocks with making other allocations lost.
    11808  if(canMakeOtherLost)
    11809  {
    11810  uint32_t tryIndex = 0;
    11811  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11812  {
    11813  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11814  VmaAllocationRequest bestRequest = {};
    11815  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11816 
    11817  // 1. Search existing allocations.
    11819  {
    11820  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11821  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11822  {
    11823  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11824  VMA_ASSERT(pCurrBlock);
    11825  VmaAllocationRequest currRequest = {};
    11826  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11827  currentFrameIndex,
    11828  m_FrameInUseCount,
    11829  m_BufferImageGranularity,
    11830  size,
    11831  alignment,
    11832  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11833  suballocType,
    11834  canMakeOtherLost,
    11835  strategy,
    11836  &currRequest))
    11837  {
    11838  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11839  if(pBestRequestBlock == VMA_NULL ||
    11840  currRequestCost < bestRequestCost)
    11841  {
    11842  pBestRequestBlock = pCurrBlock;
    11843  bestRequest = currRequest;
    11844  bestRequestCost = currRequestCost;
    11845 
    11846  if(bestRequestCost == 0)
    11847  {
    11848  break;
    11849  }
    11850  }
    11851  }
    11852  }
    11853  }
    11854  else // WORST_FIT, FIRST_FIT
    11855  {
    11856  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11857  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11858  {
    11859  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11860  VMA_ASSERT(pCurrBlock);
    11861  VmaAllocationRequest currRequest = {};
    11862  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11863  currentFrameIndex,
    11864  m_FrameInUseCount,
    11865  m_BufferImageGranularity,
    11866  size,
    11867  alignment,
    11868  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11869  suballocType,
    11870  canMakeOtherLost,
    11871  strategy,
    11872  &currRequest))
    11873  {
    11874  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11875  if(pBestRequestBlock == VMA_NULL ||
    11876  currRequestCost < bestRequestCost ||
    11878  {
    11879  pBestRequestBlock = pCurrBlock;
    11880  bestRequest = currRequest;
    11881  bestRequestCost = currRequestCost;
    11882 
    11883  if(bestRequestCost == 0 ||
    11885  {
    11886  break;
    11887  }
    11888  }
    11889  }
    11890  }
    11891  }
    11892 
    11893  if(pBestRequestBlock != VMA_NULL)
    11894  {
    11895  if(mapped)
    11896  {
    11897  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11898  if(res != VK_SUCCESS)
    11899  {
    11900  return res;
    11901  }
    11902  }
    11903 
    11904  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11905  currentFrameIndex,
    11906  m_FrameInUseCount,
    11907  &bestRequest))
    11908  {
    11909  // We no longer have an empty Allocation.
    11910  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11911  {
    11912  m_HasEmptyBlock = false;
    11913  }
    11914  // Allocate from this pBlock.
    11915  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11916  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11917  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11918  (*pAllocation)->InitBlockAllocation(
    11919  pBestRequestBlock,
    11920  bestRequest.offset,
    11921  alignment,
    11922  size,
    11923  suballocType,
    11924  mapped,
    11925  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11926  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11927  VMA_DEBUG_LOG(" Returned from existing block");
    11928  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11929  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11930  {
    11931  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11932  }
    11933  if(IsCorruptionDetectionEnabled())
    11934  {
    11935  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11936  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11937  }
    11938  return VK_SUCCESS;
    11939  }
    11940  // else: Some allocations must have been touched while we are here. Next try.
    11941  }
    11942  else
    11943  {
    11944  // Could not find place in any of the blocks - break outer loop.
    11945  break;
    11946  }
    11947  }
    11948  /* Maximum number of tries exceeded - a very unlike event when many other
    11949  threads are simultaneously touching allocations making it impossible to make
    11950  lost at the same time as we try to allocate. */
    11951  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11952  {
    11953  return VK_ERROR_TOO_MANY_OBJECTS;
    11954  }
    11955  }
    11956 
    11957  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11958 }
    11959 
    11960 void VmaBlockVector::Free(
    11961  VmaAllocation hAllocation)
    11962 {
    11963  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11964 
    11965  // Scope for lock.
    11966  {
    11967  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11968 
    11969  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11970 
    11971  if(IsCorruptionDetectionEnabled())
    11972  {
    11973  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11974  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11975  }
    11976 
    11977  if(hAllocation->IsPersistentMap())
    11978  {
    11979  pBlock->Unmap(m_hAllocator, 1);
    11980  }
    11981 
    11982  pBlock->m_pMetadata->Free(hAllocation);
    11983  VMA_HEAVY_ASSERT(pBlock->Validate());
    11984 
    11985  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11986 
    11987  // pBlock became empty after this deallocation.
    11988  if(pBlock->m_pMetadata->IsEmpty())
    11989  {
    11990  // Already has empty Allocation. We don't want to have two, so delete this one.
    11991  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11992  {
    11993  pBlockToDelete = pBlock;
    11994  Remove(pBlock);
    11995  }
    11996  // We now have first empty block.
    11997  else
    11998  {
    11999  m_HasEmptyBlock = true;
    12000  }
    12001  }
    12002  // pBlock didn't become empty, but we have another empty block - find and free that one.
    12003  // (This is optional, heuristics.)
    12004  else if(m_HasEmptyBlock)
    12005  {
    12006  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    12007  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    12008  {
    12009  pBlockToDelete = pLastBlock;
    12010  m_Blocks.pop_back();
    12011  m_HasEmptyBlock = false;
    12012  }
    12013  }
    12014 
    12015  IncrementallySortBlocks();
    12016  }
    12017 
    12018  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    12019  // lock, for performance reason.
    12020  if(pBlockToDelete != VMA_NULL)
    12021  {
    12022  VMA_DEBUG_LOG(" Deleted empty allocation");
    12023  pBlockToDelete->Destroy(m_hAllocator);
    12024  vma_delete(m_hAllocator, pBlockToDelete);
    12025  }
    12026 }
    12027 
    12028 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    12029 {
    12030  VkDeviceSize result = 0;
    12031  for(size_t i = m_Blocks.size(); i--; )
    12032  {
    12033  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    12034  if(result >= m_PreferredBlockSize)
    12035  {
    12036  break;
    12037  }
    12038  }
    12039  return result;
    12040 }
    12041 
    12042 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    12043 {
    12044  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12045  {
    12046  if(m_Blocks[blockIndex] == pBlock)
    12047  {
    12048  VmaVectorRemove(m_Blocks, blockIndex);
    12049  return;
    12050  }
    12051  }
    12052  VMA_ASSERT(0);
    12053 }
    12054 
    12055 void VmaBlockVector::IncrementallySortBlocks()
    12056 {
    12057  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    12058  {
    12059  // Bubble sort only until first swap.
    12060  for(size_t i = 1; i < m_Blocks.size(); ++i)
    12061  {
    12062  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    12063  {
    12064  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    12065  return;
    12066  }
    12067  }
    12068  }
    12069 }
    12070 
    12071 VkResult VmaBlockVector::AllocateFromBlock(
    12072  VmaDeviceMemoryBlock* pBlock,
    12073  uint32_t currentFrameIndex,
    12074  VkDeviceSize size,
    12075  VkDeviceSize alignment,
    12076  VmaAllocationCreateFlags allocFlags,
    12077  void* pUserData,
    12078  VmaSuballocationType suballocType,
    12079  uint32_t strategy,
    12080  VmaAllocation* pAllocation)
    12081 {
    12082  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12083  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12084  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12085  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12086 
    12087  VmaAllocationRequest currRequest = {};
    12088  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12089  currentFrameIndex,
    12090  m_FrameInUseCount,
    12091  m_BufferImageGranularity,
    12092  size,
    12093  alignment,
    12094  isUpperAddress,
    12095  suballocType,
    12096  false, // canMakeOtherLost
    12097  strategy,
    12098  &currRequest))
    12099  {
    12100  // Allocate from pCurrBlock.
    12101  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12102 
    12103  if(mapped)
    12104  {
    12105  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12106  if(res != VK_SUCCESS)
    12107  {
    12108  return res;
    12109  }
    12110  }
    12111 
    12112  // We no longer have an empty Allocation.
    12113  if(pBlock->m_pMetadata->IsEmpty())
    12114  {
    12115  m_HasEmptyBlock = false;
    12116  }
    12117 
    12118  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12119  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12120  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12121  (*pAllocation)->InitBlockAllocation(
    12122  pBlock,
    12123  currRequest.offset,
    12124  alignment,
    12125  size,
    12126  suballocType,
    12127  mapped,
    12128  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12129  VMA_HEAVY_ASSERT(pBlock->Validate());
    12130  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12131  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12132  {
    12133  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12134  }
    12135  if(IsCorruptionDetectionEnabled())
    12136  {
    12137  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12138  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12139  }
    12140  return VK_SUCCESS;
    12141  }
    12142  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12143 }
    12144 
    12145 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12146 {
    12147  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12148  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12149  allocInfo.allocationSize = blockSize;
    12150  VkDeviceMemory mem = VK_NULL_HANDLE;
    12151  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12152  if(res < 0)
    12153  {
    12154  return res;
    12155  }
    12156 
    12157  // New VkDeviceMemory successfully created.
    12158 
    12159  // Create new Allocation for it.
    12160  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12161  pBlock->Init(
    12162  m_hAllocator,
    12163  m_hParentPool,
    12164  m_MemoryTypeIndex,
    12165  mem,
    12166  allocInfo.allocationSize,
    12167  m_NextBlockId++,
    12168  m_Algorithm);
    12169 
    12170  m_Blocks.push_back(pBlock);
    12171  if(pNewBlockIndex != VMA_NULL)
    12172  {
    12173  *pNewBlockIndex = m_Blocks.size() - 1;
    12174  }
    12175 
    12176  return VK_SUCCESS;
    12177 }
    12178 
    12179 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12180  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12181  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12182 {
    12183  const size_t blockCount = m_Blocks.size();
    12184  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12185 
    12186  enum BLOCK_FLAG
    12187  {
    12188  BLOCK_FLAG_USED = 0x00000001,
    12189  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12190  };
    12191 
    12192  struct BlockInfo
    12193  {
    12194  uint32_t flags;
    12195  void* pMappedData;
    12196  };
    12197  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12198  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12199  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12200 
    12201  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12202  const size_t moveCount = moves.size();
    12203  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12204  {
    12205  const VmaDefragmentationMove& move = moves[moveIndex];
    12206  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12207  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12208  }
    12209 
    12210  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12211 
    12212  // Go over all blocks. Get mapped pointer or map if necessary.
    12213  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12214  {
    12215  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12216  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12217  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12218  {
    12219  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12220  // It is not originally mapped - map it.
    12221  if(currBlockInfo.pMappedData == VMA_NULL)
    12222  {
    12223  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12224  if(pDefragCtx->res == VK_SUCCESS)
    12225  {
    12226  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12227  }
    12228  }
    12229  }
    12230  }
    12231 
    12232  // Go over all moves. Do actual data transfer.
    12233  if(pDefragCtx->res == VK_SUCCESS)
    12234  {
    12235  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12236  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12237 
    12238  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12239  {
    12240  const VmaDefragmentationMove& move = moves[moveIndex];
    12241 
    12242  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12243  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12244 
    12245  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12246 
    12247  // Invalidate source.
    12248  if(isNonCoherent)
    12249  {
    12250  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12251  memRange.memory = pSrcBlock->GetDeviceMemory();
    12252  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12253  memRange.size = VMA_MIN(
    12254  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12255  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12256  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12257  }
    12258 
    12259  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12260  memmove(
    12261  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12262  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12263  static_cast<size_t>(move.size));
    12264 
    12265  if(IsCorruptionDetectionEnabled())
    12266  {
    12267  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12268  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12269  }
    12270 
    12271  // Flush destination.
    12272  if(isNonCoherent)
    12273  {
    12274  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12275  memRange.memory = pDstBlock->GetDeviceMemory();
    12276  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12277  memRange.size = VMA_MIN(
    12278  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12279  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12280  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12281  }
    12282  }
    12283  }
    12284 
    12285  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12286  // Regardless of pCtx->res == VK_SUCCESS.
    12287  for(size_t blockIndex = blockCount; blockIndex--; )
    12288  {
    12289  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12290  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12291  {
    12292  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12293  pBlock->Unmap(m_hAllocator, 1);
    12294  }
    12295  }
    12296 }
    12297 
    12298 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12299  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12300  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12301  VkCommandBuffer commandBuffer)
    12302 {
    12303  const size_t blockCount = m_Blocks.size();
    12304 
    12305  pDefragCtx->blockContexts.resize(blockCount);
    12306  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12307 
    12308  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12309  const size_t moveCount = moves.size();
    12310  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12311  {
    12312  const VmaDefragmentationMove& move = moves[moveIndex];
    12313  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12314  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12315  }
    12316 
    12317  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12318 
    12319  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12320  {
    12321  VkBufferCreateInfo bufCreateInfo;
    12322  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
    12323 
    12324  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12325  {
    12326  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12327  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12328  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12329  {
    12330  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12331  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12332  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12333  if(pDefragCtx->res == VK_SUCCESS)
    12334  {
    12335  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12336  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12337  }
    12338  }
    12339  }
    12340  }
    12341 
    12342  // Go over all moves. Post data transfer commands to command buffer.
    12343  if(pDefragCtx->res == VK_SUCCESS)
    12344  {
    12345  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12346  {
    12347  const VmaDefragmentationMove& move = moves[moveIndex];
    12348 
    12349  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12350  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12351 
    12352  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12353 
    12354  VkBufferCopy region = {
    12355  move.srcOffset,
    12356  move.dstOffset,
    12357  move.size };
    12358  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12359  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12360  }
    12361  }
    12362 
    12363  // Save buffers to defrag context for later destruction.
    12364  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12365  {
    12366  pDefragCtx->res = VK_NOT_READY;
    12367  }
    12368 }
    12369 
    12370 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12371 {
    12372  m_HasEmptyBlock = false;
    12373  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12374  {
    12375  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12376  if(pBlock->m_pMetadata->IsEmpty())
    12377  {
    12378  if(m_Blocks.size() > m_MinBlockCount)
    12379  {
    12380  if(pDefragmentationStats != VMA_NULL)
    12381  {
    12382  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12383  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12384  }
    12385 
    12386  VmaVectorRemove(m_Blocks, blockIndex);
    12387  pBlock->Destroy(m_hAllocator);
    12388  vma_delete(m_hAllocator, pBlock);
    12389  }
    12390  else
    12391  {
    12392  m_HasEmptyBlock = true;
    12393  }
    12394  }
    12395  }
    12396 }
    12397 
    12398 #if VMA_STATS_STRING_ENABLED
    12399 
    12400 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12401 {
    12402  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12403 
    12404  json.BeginObject();
    12405 
    12406  if(m_IsCustomPool)
    12407  {
    12408  json.WriteString("MemoryTypeIndex");
    12409  json.WriteNumber(m_MemoryTypeIndex);
    12410 
    12411  json.WriteString("BlockSize");
    12412  json.WriteNumber(m_PreferredBlockSize);
    12413 
    12414  json.WriteString("BlockCount");
    12415  json.BeginObject(true);
    12416  if(m_MinBlockCount > 0)
    12417  {
    12418  json.WriteString("Min");
    12419  json.WriteNumber((uint64_t)m_MinBlockCount);
    12420  }
    12421  if(m_MaxBlockCount < SIZE_MAX)
    12422  {
    12423  json.WriteString("Max");
    12424  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12425  }
    12426  json.WriteString("Cur");
    12427  json.WriteNumber((uint64_t)m_Blocks.size());
    12428  json.EndObject();
    12429 
    12430  if(m_FrameInUseCount > 0)
    12431  {
    12432  json.WriteString("FrameInUseCount");
    12433  json.WriteNumber(m_FrameInUseCount);
    12434  }
    12435 
    12436  if(m_Algorithm != 0)
    12437  {
    12438  json.WriteString("Algorithm");
    12439  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12440  }
    12441  }
    12442  else
    12443  {
    12444  json.WriteString("PreferredBlockSize");
    12445  json.WriteNumber(m_PreferredBlockSize);
    12446  }
    12447 
    12448  json.WriteString("Blocks");
    12449  json.BeginObject();
    12450  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12451  {
    12452  json.BeginString();
    12453  json.ContinueString(m_Blocks[i]->GetId());
    12454  json.EndString();
    12455 
    12456  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12457  }
    12458  json.EndObject();
    12459 
    12460  json.EndObject();
    12461 }
    12462 
    12463 #endif // #if VMA_STATS_STRING_ENABLED
    12464 
    12465 void VmaBlockVector::Defragment(
    12466  class VmaBlockVectorDefragmentationContext* pCtx,
    12467  VmaDefragmentationStats* pStats,
    12468  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12469  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12470  VkCommandBuffer commandBuffer)
    12471 {
    12472  pCtx->res = VK_SUCCESS;
    12473 
    12474  const VkMemoryPropertyFlags memPropFlags =
    12475  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12476  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12477 
    12478  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12479  isHostVisible;
    12480  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12481  !IsCorruptionDetectionEnabled() &&
    12482  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
    12483 
    12484  // There are options to defragment this memory type.
    12485  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12486  {
    12487  bool defragmentOnGpu;
    12488  // There is only one option to defragment this memory type.
    12489  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12490  {
    12491  defragmentOnGpu = canDefragmentOnGpu;
    12492  }
    12493  // Both options are available: Heuristics to choose the best one.
    12494  else
    12495  {
    12496  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12497  m_hAllocator->IsIntegratedGpu();
    12498  }
    12499 
    12500  bool overlappingMoveSupported = !defragmentOnGpu;
    12501 
    12502  if(m_hAllocator->m_UseMutex)
    12503  {
    12504  m_Mutex.LockWrite();
    12505  pCtx->mutexLocked = true;
    12506  }
    12507 
    12508  pCtx->Begin(overlappingMoveSupported);
    12509 
    12510  // Defragment.
    12511 
    12512  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12513  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12514  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12515  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12516  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12517 
    12518  // Accumulate statistics.
    12519  if(pStats != VMA_NULL)
    12520  {
    12521  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12522  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12523  pStats->bytesMoved += bytesMoved;
    12524  pStats->allocationsMoved += allocationsMoved;
    12525  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12526  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12527  if(defragmentOnGpu)
    12528  {
    12529  maxGpuBytesToMove -= bytesMoved;
    12530  maxGpuAllocationsToMove -= allocationsMoved;
    12531  }
    12532  else
    12533  {
    12534  maxCpuBytesToMove -= bytesMoved;
    12535  maxCpuAllocationsToMove -= allocationsMoved;
    12536  }
    12537  }
    12538 
    12539  if(pCtx->res >= VK_SUCCESS)
    12540  {
    12541  if(defragmentOnGpu)
    12542  {
    12543  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12544  }
    12545  else
    12546  {
    12547  ApplyDefragmentationMovesCpu(pCtx, moves);
    12548  }
    12549  }
    12550  }
    12551 }
    12552 
    12553 void VmaBlockVector::DefragmentationEnd(
    12554  class VmaBlockVectorDefragmentationContext* pCtx,
    12555  VmaDefragmentationStats* pStats)
    12556 {
    12557  // Destroy buffers.
    12558  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12559  {
    12560  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12561  if(blockCtx.hBuffer)
    12562  {
    12563  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12564  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12565  }
    12566  }
    12567 
    12568  if(pCtx->res >= VK_SUCCESS)
    12569  {
    12570  FreeEmptyBlocks(pStats);
    12571  }
    12572 
    12573  if(pCtx->mutexLocked)
    12574  {
    12575  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12576  m_Mutex.UnlockWrite();
    12577  }
    12578 }
    12579 
    12580 size_t VmaBlockVector::CalcAllocationCount() const
    12581 {
    12582  size_t result = 0;
    12583  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12584  {
    12585  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12586  }
    12587  return result;
    12588 }
    12589 
    12590 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12591 {
    12592  if(m_BufferImageGranularity == 1)
    12593  {
    12594  return false;
    12595  }
    12596  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12597  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12598  {
    12599  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12600  VMA_ASSERT(m_Algorithm == 0);
    12601  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12602  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12603  {
    12604  return true;
    12605  }
    12606  }
    12607  return false;
    12608 }
    12609 
    12610 void VmaBlockVector::MakePoolAllocationsLost(
    12611  uint32_t currentFrameIndex,
    12612  size_t* pLostAllocationCount)
    12613 {
    12614  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12615  size_t lostAllocationCount = 0;
    12616  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12617  {
    12618  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12619  VMA_ASSERT(pBlock);
    12620  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12621  }
    12622  if(pLostAllocationCount != VMA_NULL)
    12623  {
    12624  *pLostAllocationCount = lostAllocationCount;
    12625  }
    12626 }
    12627 
    12628 VkResult VmaBlockVector::CheckCorruption()
    12629 {
    12630  if(!IsCorruptionDetectionEnabled())
    12631  {
    12632  return VK_ERROR_FEATURE_NOT_PRESENT;
    12633  }
    12634 
    12635  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12636  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12637  {
    12638  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12639  VMA_ASSERT(pBlock);
    12640  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12641  if(res != VK_SUCCESS)
    12642  {
    12643  return res;
    12644  }
    12645  }
    12646  return VK_SUCCESS;
    12647 }
    12648 
    12649 void VmaBlockVector::AddStats(VmaStats* pStats)
    12650 {
    12651  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12652  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12653 
    12654  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12655 
    12656  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12657  {
    12658  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12659  VMA_ASSERT(pBlock);
    12660  VMA_HEAVY_ASSERT(pBlock->Validate());
    12661  VmaStatInfo allocationStatInfo;
    12662  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12663  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12664  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12665  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12666  }
    12667 }
    12668 
    12670 // VmaDefragmentationAlgorithm_Generic members definition
    12671 
    12672 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12673  VmaAllocator hAllocator,
    12674  VmaBlockVector* pBlockVector,
    12675  uint32_t currentFrameIndex,
    12676  bool overlappingMoveSupported) :
    12677  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12678  m_AllocationCount(0),
    12679  m_AllAllocations(false),
    12680  m_BytesMoved(0),
    12681  m_AllocationsMoved(0),
    12682  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12683 {
    12684  // Create block info for each block.
    12685  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12686  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12687  {
    12688  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12689  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12690  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12691  m_Blocks.push_back(pBlockInfo);
    12692  }
    12693 
    12694  // Sort them by m_pBlock pointer value.
    12695  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12696 }
    12697 
    12698 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12699 {
    12700  for(size_t i = m_Blocks.size(); i--; )
    12701  {
    12702  vma_delete(m_hAllocator, m_Blocks[i]);
    12703  }
    12704 }
    12705 
    12706 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12707 {
    12708  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12709  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12710  {
    12711  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12712  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12713  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12714  {
    12715  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12716  (*it)->m_Allocations.push_back(allocInfo);
    12717  }
    12718  else
    12719  {
    12720  VMA_ASSERT(0);
    12721  }
    12722 
    12723  ++m_AllocationCount;
    12724  }
    12725 }
    12726 
    12727 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12728  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12729  VkDeviceSize maxBytesToMove,
    12730  uint32_t maxAllocationsToMove)
    12731 {
    12732  if(m_Blocks.empty())
    12733  {
    12734  return VK_SUCCESS;
    12735  }
    12736 
    12737  // This is a choice based on research.
    12738  // Option 1:
    12739  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12740  // Option 2:
    12741  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12742  // Option 3:
    12743  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12744 
    12745  size_t srcBlockMinIndex = 0;
    12746  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12747  /*
    12748  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12749  {
    12750  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12751  if(blocksWithNonMovableCount > 0)
    12752  {
    12753  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12754  }
    12755  }
    12756  */
    12757 
    12758  size_t srcBlockIndex = m_Blocks.size() - 1;
    12759  size_t srcAllocIndex = SIZE_MAX;
    12760  for(;;)
    12761  {
    12762  // 1. Find next allocation to move.
    12763  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12764  // 1.2. Then start from last to first m_Allocations.
    12765  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12766  {
    12767  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12768  {
    12769  // Finished: no more allocations to process.
    12770  if(srcBlockIndex == srcBlockMinIndex)
    12771  {
    12772  return VK_SUCCESS;
    12773  }
    12774  else
    12775  {
    12776  --srcBlockIndex;
    12777  srcAllocIndex = SIZE_MAX;
    12778  }
    12779  }
    12780  else
    12781  {
    12782  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12783  }
    12784  }
    12785 
    12786  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12787  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12788 
    12789  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12790  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12791  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12792  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12793 
    12794  // 2. Try to find new place for this allocation in preceding or current block.
    12795  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12796  {
    12797  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12798  VmaAllocationRequest dstAllocRequest;
    12799  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12800  m_CurrentFrameIndex,
    12801  m_pBlockVector->GetFrameInUseCount(),
    12802  m_pBlockVector->GetBufferImageGranularity(),
    12803  size,
    12804  alignment,
    12805  false, // upperAddress
    12806  suballocType,
    12807  false, // canMakeOtherLost
    12808  strategy,
    12809  &dstAllocRequest) &&
    12810  MoveMakesSense(
    12811  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12812  {
    12813  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12814 
    12815  // Reached limit on number of allocations or bytes to move.
    12816  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12817  (m_BytesMoved + size > maxBytesToMove))
    12818  {
    12819  return VK_SUCCESS;
    12820  }
    12821 
    12822  VmaDefragmentationMove move;
    12823  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12824  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12825  move.srcOffset = srcOffset;
    12826  move.dstOffset = dstAllocRequest.offset;
    12827  move.size = size;
    12828  moves.push_back(move);
    12829 
    12830  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12831  dstAllocRequest,
    12832  suballocType,
    12833  size,
    12834  allocInfo.m_hAllocation);
    12835  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12836 
    12837  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12838 
    12839  if(allocInfo.m_pChanged != VMA_NULL)
    12840  {
    12841  *allocInfo.m_pChanged = VK_TRUE;
    12842  }
    12843 
    12844  ++m_AllocationsMoved;
    12845  m_BytesMoved += size;
    12846 
    12847  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12848 
    12849  break;
    12850  }
    12851  }
    12852 
    12853  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12854 
    12855  if(srcAllocIndex > 0)
    12856  {
    12857  --srcAllocIndex;
    12858  }
    12859  else
    12860  {
    12861  if(srcBlockIndex > 0)
    12862  {
    12863  --srcBlockIndex;
    12864  srcAllocIndex = SIZE_MAX;
    12865  }
    12866  else
    12867  {
    12868  return VK_SUCCESS;
    12869  }
    12870  }
    12871  }
    12872 }
    12873 
    12874 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12875 {
    12876  size_t result = 0;
    12877  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12878  {
    12879  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12880  {
    12881  ++result;
    12882  }
    12883  }
    12884  return result;
    12885 }
    12886 
    12887 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12888  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12889  VkDeviceSize maxBytesToMove,
    12890  uint32_t maxAllocationsToMove)
    12891 {
    12892  if(!m_AllAllocations && m_AllocationCount == 0)
    12893  {
    12894  return VK_SUCCESS;
    12895  }
    12896 
    12897  const size_t blockCount = m_Blocks.size();
    12898  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12899  {
    12900  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12901 
    12902  if(m_AllAllocations)
    12903  {
    12904  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12905  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12906  it != pMetadata->m_Suballocations.end();
    12907  ++it)
    12908  {
    12909  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12910  {
    12911  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12912  pBlockInfo->m_Allocations.push_back(allocInfo);
    12913  }
    12914  }
    12915  }
    12916 
    12917  pBlockInfo->CalcHasNonMovableAllocations();
    12918 
    12919  // This is a choice based on research.
    12920  // Option 1:
    12921  pBlockInfo->SortAllocationsByOffsetDescending();
    12922  // Option 2:
    12923  //pBlockInfo->SortAllocationsBySizeDescending();
    12924  }
    12925 
    12926  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12927  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12928 
    12929  // This is a choice based on research.
    12930  const uint32_t roundCount = 2;
    12931 
    12932  // Execute defragmentation rounds (the main part).
    12933  VkResult result = VK_SUCCESS;
    12934  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12935  {
    12936  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12937  }
    12938 
    12939  return result;
    12940 }
    12941 
    12942 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12943  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12944  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12945 {
    12946  if(dstBlockIndex < srcBlockIndex)
    12947  {
    12948  return true;
    12949  }
    12950  if(dstBlockIndex > srcBlockIndex)
    12951  {
    12952  return false;
    12953  }
    12954  if(dstOffset < srcOffset)
    12955  {
    12956  return true;
    12957  }
    12958  return false;
    12959 }
    12960 
    12962 // VmaDefragmentationAlgorithm_Fast
    12963 
    12964 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12965  VmaAllocator hAllocator,
    12966  VmaBlockVector* pBlockVector,
    12967  uint32_t currentFrameIndex,
    12968  bool overlappingMoveSupported) :
    12969  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12970  m_OverlappingMoveSupported(overlappingMoveSupported),
    12971  m_AllocationCount(0),
    12972  m_AllAllocations(false),
    12973  m_BytesMoved(0),
    12974  m_AllocationsMoved(0),
    12975  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12976 {
    12977  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12978 
    12979 }
    12980 
    12981 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12982 {
    12983 }
    12984 
    12985 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12986  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12987  VkDeviceSize maxBytesToMove,
    12988  uint32_t maxAllocationsToMove)
    12989 {
    12990  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12991 
    12992  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12993  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12994  {
    12995  return VK_SUCCESS;
    12996  }
    12997 
    12998  PreprocessMetadata();
    12999 
    13000  // Sort blocks in order from most destination.
    13001 
    13002  m_BlockInfos.resize(blockCount);
    13003  for(size_t i = 0; i < blockCount; ++i)
    13004  {
    13005  m_BlockInfos[i].origBlockIndex = i;
    13006  }
    13007 
    13008  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    13009  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    13010  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    13011  });
    13012 
    13013  // THE MAIN ALGORITHM
    13014 
    13015  FreeSpaceDatabase freeSpaceDb;
    13016 
    13017  size_t dstBlockInfoIndex = 0;
    13018  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13019  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13020  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13021  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    13022  VkDeviceSize dstOffset = 0;
    13023 
    13024  bool end = false;
    13025  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    13026  {
    13027  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    13028  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    13029  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    13030  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    13031  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    13032  {
    13033  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    13034  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    13035  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    13036  if(m_AllocationsMoved == maxAllocationsToMove ||
    13037  m_BytesMoved + srcAllocSize > maxBytesToMove)
    13038  {
    13039  end = true;
    13040  break;
    13041  }
    13042  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    13043 
    13044  // Try to place it in one of free spaces from the database.
    13045  size_t freeSpaceInfoIndex;
    13046  VkDeviceSize dstAllocOffset;
    13047  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    13048  freeSpaceInfoIndex, dstAllocOffset))
    13049  {
    13050  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    13051  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    13052  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    13053 
    13054  // Same block
    13055  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    13056  {
    13057  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13058 
    13059  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13060 
    13061  VmaSuballocation suballoc = *srcSuballocIt;
    13062  suballoc.offset = dstAllocOffset;
    13063  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    13064  m_BytesMoved += srcAllocSize;
    13065  ++m_AllocationsMoved;
    13066 
    13067  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13068  ++nextSuballocIt;
    13069  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13070  srcSuballocIt = nextSuballocIt;
    13071 
    13072  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13073 
    13074  VmaDefragmentationMove move = {
    13075  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13076  srcAllocOffset, dstAllocOffset,
    13077  srcAllocSize };
    13078  moves.push_back(move);
    13079  }
    13080  // Different block
    13081  else
    13082  {
    13083  // MOVE OPTION 2: Move the allocation to a different block.
    13084 
    13085  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13086 
    13087  VmaSuballocation suballoc = *srcSuballocIt;
    13088  suballoc.offset = dstAllocOffset;
    13089  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13090  m_BytesMoved += srcAllocSize;
    13091  ++m_AllocationsMoved;
    13092 
    13093  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13094  ++nextSuballocIt;
    13095  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13096  srcSuballocIt = nextSuballocIt;
    13097 
    13098  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13099 
    13100  VmaDefragmentationMove move = {
    13101  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13102  srcAllocOffset, dstAllocOffset,
    13103  srcAllocSize };
    13104  moves.push_back(move);
    13105  }
    13106  }
    13107  else
    13108  {
    13109  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13110 
    13111  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13112  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13113  dstAllocOffset + srcAllocSize > dstBlockSize)
    13114  {
    13115  // But before that, register remaining free space at the end of dst block.
    13116  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13117 
    13118  ++dstBlockInfoIndex;
    13119  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13120  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13121  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13122  dstBlockSize = pDstMetadata->GetSize();
    13123  dstOffset = 0;
    13124  dstAllocOffset = 0;
    13125  }
    13126 
    13127  // Same block
    13128  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13129  {
    13130  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13131 
    13132  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13133 
    13134  bool skipOver = overlap;
    13135  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13136  {
    13137  // If destination and source place overlap, skip if it would move it
    13138  // by only < 1/64 of its size.
    13139  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13140  }
    13141 
    13142  if(skipOver)
    13143  {
    13144  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13145 
    13146  dstOffset = srcAllocOffset + srcAllocSize;
    13147  ++srcSuballocIt;
    13148  }
    13149  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13150  else
    13151  {
    13152  srcSuballocIt->offset = dstAllocOffset;
    13153  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13154  dstOffset = dstAllocOffset + srcAllocSize;
    13155  m_BytesMoved += srcAllocSize;
    13156  ++m_AllocationsMoved;
    13157  ++srcSuballocIt;
    13158  VmaDefragmentationMove move = {
    13159  srcOrigBlockIndex, dstOrigBlockIndex,
    13160  srcAllocOffset, dstAllocOffset,
    13161  srcAllocSize };
    13162  moves.push_back(move);
    13163  }
    13164  }
    13165  // Different block
    13166  else
    13167  {
    13168  // MOVE OPTION 2: Move the allocation to a different block.
    13169 
    13170  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13171  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13172 
    13173  VmaSuballocation suballoc = *srcSuballocIt;
    13174  suballoc.offset = dstAllocOffset;
    13175  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13176  dstOffset = dstAllocOffset + srcAllocSize;
    13177  m_BytesMoved += srcAllocSize;
    13178  ++m_AllocationsMoved;
    13179 
    13180  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13181  ++nextSuballocIt;
    13182  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13183  srcSuballocIt = nextSuballocIt;
    13184 
    13185  pDstMetadata->m_Suballocations.push_back(suballoc);
    13186 
    13187  VmaDefragmentationMove move = {
    13188  srcOrigBlockIndex, dstOrigBlockIndex,
    13189  srcAllocOffset, dstAllocOffset,
    13190  srcAllocSize };
    13191  moves.push_back(move);
    13192  }
    13193  }
    13194  }
    13195  }
    13196 
    13197  m_BlockInfos.clear();
    13198 
    13199  PostprocessMetadata();
    13200 
    13201  return VK_SUCCESS;
    13202 }
    13203 
    13204 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13205 {
    13206  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13207  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13208  {
    13209  VmaBlockMetadata_Generic* const pMetadata =
    13210  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13211  pMetadata->m_FreeCount = 0;
    13212  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13213  pMetadata->m_FreeSuballocationsBySize.clear();
    13214  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13215  it != pMetadata->m_Suballocations.end(); )
    13216  {
    13217  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13218  {
    13219  VmaSuballocationList::iterator nextIt = it;
    13220  ++nextIt;
    13221  pMetadata->m_Suballocations.erase(it);
    13222  it = nextIt;
    13223  }
    13224  else
    13225  {
    13226  ++it;
    13227  }
    13228  }
    13229  }
    13230 }
    13231 
    13232 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13233 {
    13234  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13235  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13236  {
    13237  VmaBlockMetadata_Generic* const pMetadata =
    13238  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13239  const VkDeviceSize blockSize = pMetadata->GetSize();
    13240 
    13241  // No allocations in this block - entire area is free.
    13242  if(pMetadata->m_Suballocations.empty())
    13243  {
    13244  pMetadata->m_FreeCount = 1;
    13245  //pMetadata->m_SumFreeSize is already set to blockSize.
    13246  VmaSuballocation suballoc = {
    13247  0, // offset
    13248  blockSize, // size
    13249  VMA_NULL, // hAllocation
    13250  VMA_SUBALLOCATION_TYPE_FREE };
    13251  pMetadata->m_Suballocations.push_back(suballoc);
    13252  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13253  }
    13254  // There are some allocations in this block.
    13255  else
    13256  {
    13257  VkDeviceSize offset = 0;
    13258  VmaSuballocationList::iterator it;
    13259  for(it = pMetadata->m_Suballocations.begin();
    13260  it != pMetadata->m_Suballocations.end();
    13261  ++it)
    13262  {
    13263  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13264  VMA_ASSERT(it->offset >= offset);
    13265 
    13266  // Need to insert preceding free space.
    13267  if(it->offset > offset)
    13268  {
    13269  ++pMetadata->m_FreeCount;
    13270  const VkDeviceSize freeSize = it->offset - offset;
    13271  VmaSuballocation suballoc = {
    13272  offset, // offset
    13273  freeSize, // size
    13274  VMA_NULL, // hAllocation
    13275  VMA_SUBALLOCATION_TYPE_FREE };
    13276  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13277  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13278  {
    13279  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13280  }
    13281  }
    13282 
    13283  pMetadata->m_SumFreeSize -= it->size;
    13284  offset = it->offset + it->size;
    13285  }
    13286 
    13287  // Need to insert trailing free space.
    13288  if(offset < blockSize)
    13289  {
    13290  ++pMetadata->m_FreeCount;
    13291  const VkDeviceSize freeSize = blockSize - offset;
    13292  VmaSuballocation suballoc = {
    13293  offset, // offset
    13294  freeSize, // size
    13295  VMA_NULL, // hAllocation
    13296  VMA_SUBALLOCATION_TYPE_FREE };
    13297  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13298  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13299  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13300  {
    13301  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13302  }
    13303  }
    13304 
    13305  VMA_SORT(
    13306  pMetadata->m_FreeSuballocationsBySize.begin(),
    13307  pMetadata->m_FreeSuballocationsBySize.end(),
    13308  VmaSuballocationItemSizeLess());
    13309  }
    13310 
    13311  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13312  }
    13313 }
    13314 
    13315 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13316 {
    13317  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13318  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13319  while(it != pMetadata->m_Suballocations.end())
    13320  {
    13321  if(it->offset < suballoc.offset)
    13322  {
    13323  ++it;
    13324  }
    13325  }
    13326  pMetadata->m_Suballocations.insert(it, suballoc);
    13327 }
    13328 
    13330 // VmaBlockVectorDefragmentationContext
    13331 
    13332 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13333  VmaAllocator hAllocator,
    13334  VmaPool hCustomPool,
    13335  VmaBlockVector* pBlockVector,
    13336  uint32_t currFrameIndex) :
    13337  res(VK_SUCCESS),
    13338  mutexLocked(false),
    13339  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13340  m_hAllocator(hAllocator),
    13341  m_hCustomPool(hCustomPool),
    13342  m_pBlockVector(pBlockVector),
    13343  m_CurrFrameIndex(currFrameIndex),
    13344  m_pAlgorithm(VMA_NULL),
    13345  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13346  m_AllAllocations(false)
    13347 {
    13348 }
    13349 
    13350 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13351 {
    13352  vma_delete(m_hAllocator, m_pAlgorithm);
    13353 }
    13354 
    13355 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13356 {
    13357  AllocInfo info = { hAlloc, pChanged };
    13358  m_Allocations.push_back(info);
    13359 }
    13360 
    13361 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13362 {
    13363  const bool allAllocations = m_AllAllocations ||
    13364  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13365 
    13366  /********************************
    13367  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13368  ********************************/
    13369 
    13370  /*
    13371  Fast algorithm is supported only when certain criteria are met:
    13372  - VMA_DEBUG_MARGIN is 0.
    13373  - All allocations in this block vector are moveable.
    13374  - There is no possibility of image/buffer granularity conflict.
    13375  */
    13376  if(VMA_DEBUG_MARGIN == 0 &&
    13377  allAllocations &&
    13378  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13379  {
    13380  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13381  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13382  }
    13383  else
    13384  {
    13385  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13386  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13387  }
    13388 
    13389  if(allAllocations)
    13390  {
    13391  m_pAlgorithm->AddAll();
    13392  }
    13393  else
    13394  {
    13395  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13396  {
    13397  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13398  }
    13399  }
    13400 }
    13401 
    13403 // VmaDefragmentationContext
    13404 
    13405 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13406  VmaAllocator hAllocator,
    13407  uint32_t currFrameIndex,
    13408  uint32_t flags,
    13409  VmaDefragmentationStats* pStats) :
    13410  m_hAllocator(hAllocator),
    13411  m_CurrFrameIndex(currFrameIndex),
    13412  m_Flags(flags),
    13413  m_pStats(pStats),
    13414  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13415 {
    13416  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13417 }
    13418 
    13419 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13420 {
    13421  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13422  {
    13423  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13424  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13425  vma_delete(m_hAllocator, pBlockVectorCtx);
    13426  }
    13427  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13428  {
    13429  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13430  if(pBlockVectorCtx)
    13431  {
    13432  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13433  vma_delete(m_hAllocator, pBlockVectorCtx);
    13434  }
    13435  }
    13436 }
    13437 
    13438 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13439 {
    13440  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13441  {
    13442  VmaPool pool = pPools[poolIndex];
    13443  VMA_ASSERT(pool);
    13444  // Pools with algorithm other than default are not defragmented.
    13445  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13446  {
    13447  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13448 
    13449  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13450  {
    13451  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13452  {
    13453  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13454  break;
    13455  }
    13456  }
    13457 
    13458  if(!pBlockVectorDefragCtx)
    13459  {
    13460  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13461  m_hAllocator,
    13462  pool,
    13463  &pool->m_BlockVector,
    13464  m_CurrFrameIndex);
    13465  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13466  }
    13467 
    13468  pBlockVectorDefragCtx->AddAll();
    13469  }
    13470  }
    13471 }
    13472 
    13473 void VmaDefragmentationContext_T::AddAllocations(
    13474  uint32_t allocationCount,
    13475  VmaAllocation* pAllocations,
    13476  VkBool32* pAllocationsChanged)
    13477 {
    13478  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13479  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13480  {
    13481  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13482  VMA_ASSERT(hAlloc);
    13483  // DedicatedAlloc cannot be defragmented.
    13484  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13485  // Lost allocation cannot be defragmented.
    13486  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13487  {
    13488  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13489 
    13490  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13491  // This allocation belongs to custom pool.
    13492  if(hAllocPool != VK_NULL_HANDLE)
    13493  {
    13494  // Pools with algorithm other than default are not defragmented.
    13495  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13496  {
    13497  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13498  {
    13499  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13500  {
    13501  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13502  break;
    13503  }
    13504  }
    13505  if(!pBlockVectorDefragCtx)
    13506  {
    13507  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13508  m_hAllocator,
    13509  hAllocPool,
    13510  &hAllocPool->m_BlockVector,
    13511  m_CurrFrameIndex);
    13512  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13513  }
    13514  }
    13515  }
    13516  // This allocation belongs to default pool.
    13517  else
    13518  {
    13519  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13520  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13521  if(!pBlockVectorDefragCtx)
    13522  {
    13523  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13524  m_hAllocator,
    13525  VMA_NULL, // hCustomPool
    13526  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13527  m_CurrFrameIndex);
    13528  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13529  }
    13530  }
    13531 
    13532  if(pBlockVectorDefragCtx)
    13533  {
    13534  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13535  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13536  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13537  }
    13538  }
    13539  }
    13540 }
    13541 
    13542 VkResult VmaDefragmentationContext_T::Defragment(
    13543  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13544  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13545  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13546 {
    13547  if(pStats)
    13548  {
    13549  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13550  }
    13551 
    13552  if(commandBuffer == VK_NULL_HANDLE)
    13553  {
    13554  maxGpuBytesToMove = 0;
    13555  maxGpuAllocationsToMove = 0;
    13556  }
    13557 
    13558  VkResult res = VK_SUCCESS;
    13559 
    13560  // Process default pools.
    13561  for(uint32_t memTypeIndex = 0;
    13562  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13563  ++memTypeIndex)
    13564  {
    13565  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13566  if(pBlockVectorCtx)
    13567  {
    13568  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13569  pBlockVectorCtx->GetBlockVector()->Defragment(
    13570  pBlockVectorCtx,
    13571  pStats,
    13572  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13573  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13574  commandBuffer);
    13575  if(pBlockVectorCtx->res != VK_SUCCESS)
    13576  {
    13577  res = pBlockVectorCtx->res;
    13578  }
    13579  }
    13580  }
    13581 
    13582  // Process custom pools.
    13583  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13584  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13585  ++customCtxIndex)
    13586  {
    13587  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13588  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13589  pBlockVectorCtx->GetBlockVector()->Defragment(
    13590  pBlockVectorCtx,
    13591  pStats,
    13592  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13593  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13594  commandBuffer);
    13595  if(pBlockVectorCtx->res != VK_SUCCESS)
    13596  {
    13597  res = pBlockVectorCtx->res;
    13598  }
    13599  }
    13600 
    13601  return res;
    13602 }
    13603 
    13605 // VmaRecorder
    13606 
    13607 #if VMA_RECORDING_ENABLED
    13608 
    13609 VmaRecorder::VmaRecorder() :
    13610  m_UseMutex(true),
    13611  m_Flags(0),
    13612  m_File(VMA_NULL),
    13613  m_Freq(INT64_MAX),
    13614  m_StartCounter(INT64_MAX)
    13615 {
    13616 }
    13617 
    13618 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13619 {
    13620  m_UseMutex = useMutex;
    13621  m_Flags = settings.flags;
    13622 
    13623  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13624  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13625 
    13626  // Open file for writing.
    13627  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13628  if(err != 0)
    13629  {
    13630  return VK_ERROR_INITIALIZATION_FAILED;
    13631  }
    13632 
    13633  // Write header.
    13634  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13635  fprintf(m_File, "%s\n", "1,5");
    13636 
    13637  return VK_SUCCESS;
    13638 }
    13639 
    13640 VmaRecorder::~VmaRecorder()
    13641 {
    13642  if(m_File != VMA_NULL)
    13643  {
    13644  fclose(m_File);
    13645  }
    13646 }
    13647 
    13648 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13649 {
    13650  CallParams callParams;
    13651  GetBasicParams(callParams);
    13652 
    13653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13654  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13655  Flush();
    13656 }
    13657 
    13658 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13659 {
    13660  CallParams callParams;
    13661  GetBasicParams(callParams);
    13662 
    13663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13665  Flush();
    13666 }
    13667 
    13668 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13669 {
    13670  CallParams callParams;
    13671  GetBasicParams(callParams);
    13672 
    13673  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13674  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13675  createInfo.memoryTypeIndex,
    13676  createInfo.flags,
    13677  createInfo.blockSize,
    13678  (uint64_t)createInfo.minBlockCount,
    13679  (uint64_t)createInfo.maxBlockCount,
    13680  createInfo.frameInUseCount,
    13681  pool);
    13682  Flush();
    13683 }
    13684 
    13685 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13686 {
    13687  CallParams callParams;
    13688  GetBasicParams(callParams);
    13689 
    13690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13691  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13692  pool);
    13693  Flush();
    13694 }
    13695 
    13696 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13697  const VkMemoryRequirements& vkMemReq,
    13698  const VmaAllocationCreateInfo& createInfo,
    13699  VmaAllocation allocation)
    13700 {
    13701  CallParams callParams;
    13702  GetBasicParams(callParams);
    13703 
    13704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13705  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13706  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13707  vkMemReq.size,
    13708  vkMemReq.alignment,
    13709  vkMemReq.memoryTypeBits,
    13710  createInfo.flags,
    13711  createInfo.usage,
    13712  createInfo.requiredFlags,
    13713  createInfo.preferredFlags,
    13714  createInfo.memoryTypeBits,
    13715  createInfo.pool,
    13716  allocation,
    13717  userDataStr.GetString());
    13718  Flush();
    13719 }
    13720 
    13721 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13722  const VkMemoryRequirements& vkMemReq,
    13723  const VmaAllocationCreateInfo& createInfo,
    13724  uint64_t allocationCount,
    13725  const VmaAllocation* pAllocations)
    13726 {
    13727  CallParams callParams;
    13728  GetBasicParams(callParams);
    13729 
    13730  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13731  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13732  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13733  vkMemReq.size,
    13734  vkMemReq.alignment,
    13735  vkMemReq.memoryTypeBits,
    13736  createInfo.flags,
    13737  createInfo.usage,
    13738  createInfo.requiredFlags,
    13739  createInfo.preferredFlags,
    13740  createInfo.memoryTypeBits,
    13741  createInfo.pool);
    13742  PrintPointerList(allocationCount, pAllocations);
    13743  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13744  Flush();
    13745 }
    13746 
    13747 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13748  const VkMemoryRequirements& vkMemReq,
    13749  bool requiresDedicatedAllocation,
    13750  bool prefersDedicatedAllocation,
    13751  const VmaAllocationCreateInfo& createInfo,
    13752  VmaAllocation allocation)
    13753 {
    13754  CallParams callParams;
    13755  GetBasicParams(callParams);
    13756 
    13757  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13758  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13759  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13760  vkMemReq.size,
    13761  vkMemReq.alignment,
    13762  vkMemReq.memoryTypeBits,
    13763  requiresDedicatedAllocation ? 1 : 0,
    13764  prefersDedicatedAllocation ? 1 : 0,
    13765  createInfo.flags,
    13766  createInfo.usage,
    13767  createInfo.requiredFlags,
    13768  createInfo.preferredFlags,
    13769  createInfo.memoryTypeBits,
    13770  createInfo.pool,
    13771  allocation,
    13772  userDataStr.GetString());
    13773  Flush();
    13774 }
    13775 
    13776 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13777  const VkMemoryRequirements& vkMemReq,
    13778  bool requiresDedicatedAllocation,
    13779  bool prefersDedicatedAllocation,
    13780  const VmaAllocationCreateInfo& createInfo,
    13781  VmaAllocation allocation)
    13782 {
    13783  CallParams callParams;
    13784  GetBasicParams(callParams);
    13785 
    13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13787  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13788  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13789  vkMemReq.size,
    13790  vkMemReq.alignment,
    13791  vkMemReq.memoryTypeBits,
    13792  requiresDedicatedAllocation ? 1 : 0,
    13793  prefersDedicatedAllocation ? 1 : 0,
    13794  createInfo.flags,
    13795  createInfo.usage,
    13796  createInfo.requiredFlags,
    13797  createInfo.preferredFlags,
    13798  createInfo.memoryTypeBits,
    13799  createInfo.pool,
    13800  allocation,
    13801  userDataStr.GetString());
    13802  Flush();
    13803 }
    13804 
    13805 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13806  VmaAllocation allocation)
    13807 {
    13808  CallParams callParams;
    13809  GetBasicParams(callParams);
    13810 
    13811  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13812  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13813  allocation);
    13814  Flush();
    13815 }
    13816 
    13817 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13818  uint64_t allocationCount,
    13819  const VmaAllocation* pAllocations)
    13820 {
    13821  CallParams callParams;
    13822  GetBasicParams(callParams);
    13823 
    13824  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13825  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13826  PrintPointerList(allocationCount, pAllocations);
    13827  fprintf(m_File, "\n");
    13828  Flush();
    13829 }
    13830 
    13831 void VmaRecorder::RecordResizeAllocation(
    13832  uint32_t frameIndex,
    13833  VmaAllocation allocation,
    13834  VkDeviceSize newSize)
    13835 {
    13836  CallParams callParams;
    13837  GetBasicParams(callParams);
    13838 
    13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13840  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13841  allocation, newSize);
    13842  Flush();
    13843 }
    13844 
    13845 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13846  VmaAllocation allocation,
    13847  const void* pUserData)
    13848 {
    13849  CallParams callParams;
    13850  GetBasicParams(callParams);
    13851 
    13852  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13853  UserDataString userDataStr(
    13854  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13855  pUserData);
    13856  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13857  allocation,
    13858  userDataStr.GetString());
    13859  Flush();
    13860 }
    13861 
    13862 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13863  VmaAllocation allocation)
    13864 {
    13865  CallParams callParams;
    13866  GetBasicParams(callParams);
    13867 
    13868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13869  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13870  allocation);
    13871  Flush();
    13872 }
    13873 
    13874 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13875  VmaAllocation allocation)
    13876 {
    13877  CallParams callParams;
    13878  GetBasicParams(callParams);
    13879 
    13880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13881  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13882  allocation);
    13883  Flush();
    13884 }
    13885 
    13886 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13887  VmaAllocation allocation)
    13888 {
    13889  CallParams callParams;
    13890  GetBasicParams(callParams);
    13891 
    13892  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13893  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13894  allocation);
    13895  Flush();
    13896 }
    13897 
    13898 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13899  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13900 {
    13901  CallParams callParams;
    13902  GetBasicParams(callParams);
    13903 
    13904  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13905  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13906  allocation,
    13907  offset,
    13908  size);
    13909  Flush();
    13910 }
    13911 
    13912 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13913  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13914 {
    13915  CallParams callParams;
    13916  GetBasicParams(callParams);
    13917 
    13918  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13919  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13920  allocation,
    13921  offset,
    13922  size);
    13923  Flush();
    13924 }
    13925 
    13926 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13927  const VkBufferCreateInfo& bufCreateInfo,
    13928  const VmaAllocationCreateInfo& allocCreateInfo,
    13929  VmaAllocation allocation)
    13930 {
    13931  CallParams callParams;
    13932  GetBasicParams(callParams);
    13933 
    13934  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13935  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13936  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13937  bufCreateInfo.flags,
    13938  bufCreateInfo.size,
    13939  bufCreateInfo.usage,
    13940  bufCreateInfo.sharingMode,
    13941  allocCreateInfo.flags,
    13942  allocCreateInfo.usage,
    13943  allocCreateInfo.requiredFlags,
    13944  allocCreateInfo.preferredFlags,
    13945  allocCreateInfo.memoryTypeBits,
    13946  allocCreateInfo.pool,
    13947  allocation,
    13948  userDataStr.GetString());
    13949  Flush();
    13950 }
    13951 
    13952 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13953  const VkImageCreateInfo& imageCreateInfo,
    13954  const VmaAllocationCreateInfo& allocCreateInfo,
    13955  VmaAllocation allocation)
    13956 {
    13957  CallParams callParams;
    13958  GetBasicParams(callParams);
    13959 
    13960  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13961  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13962  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13963  imageCreateInfo.flags,
    13964  imageCreateInfo.imageType,
    13965  imageCreateInfo.format,
    13966  imageCreateInfo.extent.width,
    13967  imageCreateInfo.extent.height,
    13968  imageCreateInfo.extent.depth,
    13969  imageCreateInfo.mipLevels,
    13970  imageCreateInfo.arrayLayers,
    13971  imageCreateInfo.samples,
    13972  imageCreateInfo.tiling,
    13973  imageCreateInfo.usage,
    13974  imageCreateInfo.sharingMode,
    13975  imageCreateInfo.initialLayout,
    13976  allocCreateInfo.flags,
    13977  allocCreateInfo.usage,
    13978  allocCreateInfo.requiredFlags,
    13979  allocCreateInfo.preferredFlags,
    13980  allocCreateInfo.memoryTypeBits,
    13981  allocCreateInfo.pool,
    13982  allocation,
    13983  userDataStr.GetString());
    13984  Flush();
    13985 }
    13986 
    13987 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13988  VmaAllocation allocation)
    13989 {
    13990  CallParams callParams;
    13991  GetBasicParams(callParams);
    13992 
    13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13994  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13995  allocation);
    13996  Flush();
    13997 }
    13998 
    13999 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    14000  VmaAllocation allocation)
    14001 {
    14002  CallParams callParams;
    14003  GetBasicParams(callParams);
    14004 
    14005  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14006  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    14007  allocation);
    14008  Flush();
    14009 }
    14010 
    14011 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    14012  VmaAllocation allocation)
    14013 {
    14014  CallParams callParams;
    14015  GetBasicParams(callParams);
    14016 
    14017  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14018  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    14019  allocation);
    14020  Flush();
    14021 }
    14022 
    14023 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    14024  VmaAllocation allocation)
    14025 {
    14026  CallParams callParams;
    14027  GetBasicParams(callParams);
    14028 
    14029  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14030  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    14031  allocation);
    14032  Flush();
    14033 }
    14034 
    14035 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    14036  VmaPool pool)
    14037 {
    14038  CallParams callParams;
    14039  GetBasicParams(callParams);
    14040 
    14041  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14042  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    14043  pool);
    14044  Flush();
    14045 }
    14046 
    14047 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    14048  const VmaDefragmentationInfo2& info,
    14050 {
    14051  CallParams callParams;
    14052  GetBasicParams(callParams);
    14053 
    14054  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14055  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    14056  info.flags);
    14057  PrintPointerList(info.allocationCount, info.pAllocations);
    14058  fprintf(m_File, ",");
    14059  PrintPointerList(info.poolCount, info.pPools);
    14060  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14061  info.maxCpuBytesToMove,
    14063  info.maxGpuBytesToMove,
    14065  info.commandBuffer,
    14066  ctx);
    14067  Flush();
    14068 }
    14069 
    14070 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14072 {
    14073  CallParams callParams;
    14074  GetBasicParams(callParams);
    14075 
    14076  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14077  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14078  ctx);
    14079  Flush();
    14080 }
    14081 
    14082 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14083 {
    14084  if(pUserData != VMA_NULL)
    14085  {
    14086  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14087  {
    14088  m_Str = (const char*)pUserData;
    14089  }
    14090  else
    14091  {
    14092  sprintf_s(m_PtrStr, "%p", pUserData);
    14093  m_Str = m_PtrStr;
    14094  }
    14095  }
    14096  else
    14097  {
    14098  m_Str = "";
    14099  }
    14100 }
    14101 
    14102 void VmaRecorder::WriteConfiguration(
    14103  const VkPhysicalDeviceProperties& devProps,
    14104  const VkPhysicalDeviceMemoryProperties& memProps,
    14105  bool dedicatedAllocationExtensionEnabled)
    14106 {
    14107  fprintf(m_File, "Config,Begin\n");
    14108 
    14109  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14110  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14111  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14112  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14113  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14114  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14115 
    14116  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14117  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14118  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14119 
    14120  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14121  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14122  {
    14123  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14124  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14125  }
    14126  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14127  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14128  {
    14129  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14130  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14131  }
    14132 
    14133  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14134 
    14135  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14136  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14137  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14138  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14139  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14140  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14141  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14142  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14143  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14144 
    14145  fprintf(m_File, "Config,End\n");
    14146 }
    14147 
    14148 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14149 {
    14150  outParams.threadId = GetCurrentThreadId();
    14151 
    14152  LARGE_INTEGER counter;
    14153  QueryPerformanceCounter(&counter);
    14154  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14155 }
    14156 
    14157 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14158 {
    14159  if(count)
    14160  {
    14161  fprintf(m_File, "%p", pItems[0]);
    14162  for(uint64_t i = 1; i < count; ++i)
    14163  {
    14164  fprintf(m_File, " %p", pItems[i]);
    14165  }
    14166  }
    14167 }
    14168 
    14169 void VmaRecorder::Flush()
    14170 {
    14171  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14172  {
    14173  fflush(m_File);
    14174  }
    14175 }
    14176 
    14177 #endif // #if VMA_RECORDING_ENABLED
    14178 
    14180 // VmaAllocationObjectAllocator
    14181 
    14182 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14183  m_Allocator(pAllocationCallbacks, 1024)
    14184 {
    14185 }
    14186 
    14187 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14188 {
    14189  VmaMutexLock mutexLock(m_Mutex);
    14190  return m_Allocator.Alloc();
    14191 }
    14192 
    14193 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14194 {
    14195  VmaMutexLock mutexLock(m_Mutex);
    14196  m_Allocator.Free(hAlloc);
    14197 }
    14198 
    14200 // VmaAllocator_T
    14201 
    14202 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14203  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14204  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14205  m_hDevice(pCreateInfo->device),
    14206  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14207  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14208  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14209  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14210  m_PreferredLargeHeapBlockSize(0),
    14211  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14212  m_CurrentFrameIndex(0),
    14213  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
    14214  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14215  m_NextPoolId(0)
    14217  ,m_pRecorder(VMA_NULL)
    14218 #endif
    14219 {
    14220  if(VMA_DEBUG_DETECT_CORRUPTION)
    14221  {
    14222  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14223  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14224  }
    14225 
    14226  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14227 
    14228 #if !(VMA_DEDICATED_ALLOCATION)
    14230  {
    14231  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14232  }
    14233 #endif
    14234 
    14235  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14236  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14237  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14238 
    14239  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14240  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14241 
    14242  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14243  {
    14244  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14245  }
    14246 
    14247  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14248  {
    14249  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14250  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14251  }
    14252 
    14253  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14254 
    14255  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14256  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14257 
    14258  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14259  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14260  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14261  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14262 
    14263  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14264  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14265 
    14266  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14267  {
    14268  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14269  {
    14270  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14271  if(limit != VK_WHOLE_SIZE)
    14272  {
    14273  m_HeapSizeLimit[heapIndex] = limit;
    14274  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14275  {
    14276  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14277  }
    14278  }
    14279  }
    14280  }
    14281 
    14282  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14283  {
    14284  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14285 
    14286  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14287  this,
    14288  VK_NULL_HANDLE, // hParentPool
    14289  memTypeIndex,
    14290  preferredBlockSize,
    14291  0,
    14292  SIZE_MAX,
    14293  GetBufferImageGranularity(),
    14294  pCreateInfo->frameInUseCount,
    14295  false, // isCustomPool
    14296  false, // explicitBlockSize
    14297  false); // linearAlgorithm
    14298  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14299  // becase minBlockCount is 0.
    14300  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14301 
    14302  }
    14303 }
    14304 
    14305 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14306 {
    14307  VkResult res = VK_SUCCESS;
    14308 
    14309  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14310  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14311  {
    14312 #if VMA_RECORDING_ENABLED
    14313  m_pRecorder = vma_new(this, VmaRecorder)();
    14314  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14315  if(res != VK_SUCCESS)
    14316  {
    14317  return res;
    14318  }
    14319  m_pRecorder->WriteConfiguration(
    14320  m_PhysicalDeviceProperties,
    14321  m_MemProps,
    14322  m_UseKhrDedicatedAllocation);
    14323  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14324 #else
    14325  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14326  return VK_ERROR_FEATURE_NOT_PRESENT;
    14327 #endif
    14328  }
    14329 
    14330  return res;
    14331 }
    14332 
    14333 VmaAllocator_T::~VmaAllocator_T()
    14334 {
    14335 #if VMA_RECORDING_ENABLED
    14336  if(m_pRecorder != VMA_NULL)
    14337  {
    14338  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14339  vma_delete(this, m_pRecorder);
    14340  }
    14341 #endif
    14342 
    14343  VMA_ASSERT(m_Pools.empty());
    14344 
    14345  for(size_t i = GetMemoryTypeCount(); i--; )
    14346  {
    14347  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14348  {
    14349  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14350  }
    14351 
    14352  vma_delete(this, m_pDedicatedAllocations[i]);
    14353  vma_delete(this, m_pBlockVectors[i]);
    14354  }
    14355 }
    14356 
    14357 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14358 {
    14359 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14360  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14361  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14362  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14363  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14364  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14365  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14366  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14367  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14368  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14369  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14370  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14371  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14372  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14373  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14374  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14375  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14376  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14377 #if VMA_DEDICATED_ALLOCATION
    14378  if(m_UseKhrDedicatedAllocation)
    14379  {
    14380  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14381  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14382  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14383  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14384  }
    14385 #endif // #if VMA_DEDICATED_ALLOCATION
    14386 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14387 
    14388 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14389  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14390 
    14391  if(pVulkanFunctions != VMA_NULL)
    14392  {
    14393  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14394  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14395  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14396  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14397  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14398  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14399  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14400  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14401  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14402  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14403  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14404  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14405  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14406  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14407  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14408  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14409  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14410 #if VMA_DEDICATED_ALLOCATION
    14411  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14412  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14413 #endif
    14414  }
    14415 
    14416 #undef VMA_COPY_IF_NOT_NULL
    14417 
    14418  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14419  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14420  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14421  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14422  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14423  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14424  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14425  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14426  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14427  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14428  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14429  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14430  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14431  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14432  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14433  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14434  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14435  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14436  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14437 #if VMA_DEDICATED_ALLOCATION
    14438  if(m_UseKhrDedicatedAllocation)
    14439  {
    14440  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14441  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14442  }
    14443 #endif
    14444 }
    14445 
    14446 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14447 {
    14448  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14449  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14450  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14451  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14452 }
    14453 
    14454 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14455  VkDeviceSize size,
    14456  VkDeviceSize alignment,
    14457  bool dedicatedAllocation,
    14458  VkBuffer dedicatedBuffer,
    14459  VkImage dedicatedImage,
    14460  const VmaAllocationCreateInfo& createInfo,
    14461  uint32_t memTypeIndex,
    14462  VmaSuballocationType suballocType,
    14463  size_t allocationCount,
    14464  VmaAllocation* pAllocations)
    14465 {
    14466  VMA_ASSERT(pAllocations != VMA_NULL);
    14467  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14468 
    14469  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14470 
    14471  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14472  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14473  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14474  {
    14475  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14476  }
    14477 
    14478  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14479  VMA_ASSERT(blockVector);
    14480 
    14481  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14482  bool preferDedicatedMemory =
    14483  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14484  dedicatedAllocation ||
    14485  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14486  size > preferredBlockSize / 2;
    14487 
    14488  if(preferDedicatedMemory &&
    14489  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14490  finalCreateInfo.pool == VK_NULL_HANDLE)
    14491  {
    14493  }
    14494 
    14495  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14496  {
    14497  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14498  {
    14499  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14500  }
    14501  else
    14502  {
    14503  return AllocateDedicatedMemory(
    14504  size,
    14505  suballocType,
    14506  memTypeIndex,
    14507  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14508  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14509  finalCreateInfo.pUserData,
    14510  dedicatedBuffer,
    14511  dedicatedImage,
    14512  allocationCount,
    14513  pAllocations);
    14514  }
    14515  }
    14516  else
    14517  {
    14518  VkResult res = blockVector->Allocate(
    14519  m_CurrentFrameIndex.load(),
    14520  size,
    14521  alignment,
    14522  finalCreateInfo,
    14523  suballocType,
    14524  allocationCount,
    14525  pAllocations);
    14526  if(res == VK_SUCCESS)
    14527  {
    14528  return res;
    14529  }
    14530 
    14531  // 5. Try dedicated memory.
    14532  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14533  {
    14534  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14535  }
    14536  else
    14537  {
    14538  res = AllocateDedicatedMemory(
    14539  size,
    14540  suballocType,
    14541  memTypeIndex,
    14542  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14543  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14544  finalCreateInfo.pUserData,
    14545  dedicatedBuffer,
    14546  dedicatedImage,
    14547  allocationCount,
    14548  pAllocations);
    14549  if(res == VK_SUCCESS)
    14550  {
    14551  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14552  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14553  return VK_SUCCESS;
    14554  }
    14555  else
    14556  {
    14557  // Everything failed: Return error code.
    14558  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14559  return res;
    14560  }
    14561  }
    14562  }
    14563 }
    14564 
    14565 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14566  VkDeviceSize size,
    14567  VmaSuballocationType suballocType,
    14568  uint32_t memTypeIndex,
    14569  bool map,
    14570  bool isUserDataString,
    14571  void* pUserData,
    14572  VkBuffer dedicatedBuffer,
    14573  VkImage dedicatedImage,
    14574  size_t allocationCount,
    14575  VmaAllocation* pAllocations)
    14576 {
    14577  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14578 
    14579  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14580  allocInfo.memoryTypeIndex = memTypeIndex;
    14581  allocInfo.allocationSize = size;
    14582 
    14583 #if VMA_DEDICATED_ALLOCATION
    14584  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14585  if(m_UseKhrDedicatedAllocation)
    14586  {
    14587  if(dedicatedBuffer != VK_NULL_HANDLE)
    14588  {
    14589  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14590  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14591  allocInfo.pNext = &dedicatedAllocInfo;
    14592  }
    14593  else if(dedicatedImage != VK_NULL_HANDLE)
    14594  {
    14595  dedicatedAllocInfo.image = dedicatedImage;
    14596  allocInfo.pNext = &dedicatedAllocInfo;
    14597  }
    14598  }
    14599 #endif // #if VMA_DEDICATED_ALLOCATION
    14600 
    14601  size_t allocIndex;
    14602  VkResult res = VK_SUCCESS;
    14603  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14604  {
    14605  res = AllocateDedicatedMemoryPage(
    14606  size,
    14607  suballocType,
    14608  memTypeIndex,
    14609  allocInfo,
    14610  map,
    14611  isUserDataString,
    14612  pUserData,
    14613  pAllocations + allocIndex);
    14614  if(res != VK_SUCCESS)
    14615  {
    14616  break;
    14617  }
    14618  }
    14619 
    14620  if(res == VK_SUCCESS)
    14621  {
    14622  // Register them in m_pDedicatedAllocations.
    14623  {
    14624  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14625  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14626  VMA_ASSERT(pDedicatedAllocations);
    14627  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14628  {
    14629  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14630  }
    14631  }
    14632 
    14633  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14634  }
    14635  else
    14636  {
    14637  // Free all already created allocations.
    14638  while(allocIndex--)
    14639  {
    14640  VmaAllocation currAlloc = pAllocations[allocIndex];
    14641  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14642 
    14643  /*
    14644  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14645  before vkFreeMemory.
    14646 
    14647  if(currAlloc->GetMappedData() != VMA_NULL)
    14648  {
    14649  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14650  }
    14651  */
    14652 
    14653  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14654 
    14655  currAlloc->SetUserData(this, VMA_NULL);
    14656  currAlloc->Dtor();
    14657  m_AllocationObjectAllocator.Free(currAlloc);
    14658  }
    14659 
    14660  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14661  }
    14662 
    14663  return res;
    14664 }
    14665 
    14666 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14667  VkDeviceSize size,
    14668  VmaSuballocationType suballocType,
    14669  uint32_t memTypeIndex,
    14670  const VkMemoryAllocateInfo& allocInfo,
    14671  bool map,
    14672  bool isUserDataString,
    14673  void* pUserData,
    14674  VmaAllocation* pAllocation)
    14675 {
    14676  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14677  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14678  if(res < 0)
    14679  {
    14680  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14681  return res;
    14682  }
    14683 
    14684  void* pMappedData = VMA_NULL;
    14685  if(map)
    14686  {
    14687  res = (*m_VulkanFunctions.vkMapMemory)(
    14688  m_hDevice,
    14689  hMemory,
    14690  0,
    14691  VK_WHOLE_SIZE,
    14692  0,
    14693  &pMappedData);
    14694  if(res < 0)
    14695  {
    14696  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14697  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14698  return res;
    14699  }
    14700  }
    14701 
    14702  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14703  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14704  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14705  (*pAllocation)->SetUserData(this, pUserData);
    14706  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14707  {
    14708  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14709  }
    14710 
    14711  return VK_SUCCESS;
    14712 }
    14713 
    14714 void VmaAllocator_T::GetBufferMemoryRequirements(
    14715  VkBuffer hBuffer,
    14716  VkMemoryRequirements& memReq,
    14717  bool& requiresDedicatedAllocation,
    14718  bool& prefersDedicatedAllocation) const
    14719 {
    14720 #if VMA_DEDICATED_ALLOCATION
    14721  if(m_UseKhrDedicatedAllocation)
    14722  {
    14723  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14724  memReqInfo.buffer = hBuffer;
    14725 
    14726  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14727 
    14728  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14729  memReq2.pNext = &memDedicatedReq;
    14730 
    14731  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14732 
    14733  memReq = memReq2.memoryRequirements;
    14734  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14735  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14736  }
    14737  else
    14738 #endif // #if VMA_DEDICATED_ALLOCATION
    14739  {
    14740  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14741  requiresDedicatedAllocation = false;
    14742  prefersDedicatedAllocation = false;
    14743  }
    14744 }
    14745 
    14746 void VmaAllocator_T::GetImageMemoryRequirements(
    14747  VkImage hImage,
    14748  VkMemoryRequirements& memReq,
    14749  bool& requiresDedicatedAllocation,
    14750  bool& prefersDedicatedAllocation) const
    14751 {
    14752 #if VMA_DEDICATED_ALLOCATION
    14753  if(m_UseKhrDedicatedAllocation)
    14754  {
    14755  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14756  memReqInfo.image = hImage;
    14757 
    14758  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14759 
    14760  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14761  memReq2.pNext = &memDedicatedReq;
    14762 
    14763  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14764 
    14765  memReq = memReq2.memoryRequirements;
    14766  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14767  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14768  }
    14769  else
    14770 #endif // #if VMA_DEDICATED_ALLOCATION
    14771  {
    14772  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14773  requiresDedicatedAllocation = false;
    14774  prefersDedicatedAllocation = false;
    14775  }
    14776 }
    14777 
    14778 VkResult VmaAllocator_T::AllocateMemory(
    14779  const VkMemoryRequirements& vkMemReq,
    14780  bool requiresDedicatedAllocation,
    14781  bool prefersDedicatedAllocation,
    14782  VkBuffer dedicatedBuffer,
    14783  VkImage dedicatedImage,
    14784  const VmaAllocationCreateInfo& createInfo,
    14785  VmaSuballocationType suballocType,
    14786  size_t allocationCount,
    14787  VmaAllocation* pAllocations)
    14788 {
    14789  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14790 
    14791  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14792 
    14793  if(vkMemReq.size == 0)
    14794  {
    14795  return VK_ERROR_VALIDATION_FAILED_EXT;
    14796  }
    14797  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14798  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14799  {
    14800  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14801  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14802  }
    14803  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14805  {
    14806  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14807  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14808  }
    14809  if(requiresDedicatedAllocation)
    14810  {
    14811  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14812  {
    14813  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14814  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14815  }
    14816  if(createInfo.pool != VK_NULL_HANDLE)
    14817  {
    14818  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14819  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14820  }
    14821  }
    14822  if((createInfo.pool != VK_NULL_HANDLE) &&
    14823  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14824  {
    14825  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14826  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14827  }
    14828 
    14829  if(createInfo.pool != VK_NULL_HANDLE)
    14830  {
    14831  const VkDeviceSize alignmentForPool = VMA_MAX(
    14832  vkMemReq.alignment,
    14833  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14834 
    14835  VmaAllocationCreateInfo createInfoForPool = createInfo;
    14836  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14837  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14838  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14839  {
    14840  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14841  }
    14842 
    14843  return createInfo.pool->m_BlockVector.Allocate(
    14844  m_CurrentFrameIndex.load(),
    14845  vkMemReq.size,
    14846  alignmentForPool,
    14847  createInfoForPool,
    14848  suballocType,
    14849  allocationCount,
    14850  pAllocations);
    14851  }
    14852  else
    14853  {
    14854  // Bit mask of memory Vulkan types acceptable for this allocation.
    14855  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14856  uint32_t memTypeIndex = UINT32_MAX;
    14857  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14858  if(res == VK_SUCCESS)
    14859  {
    14860  VkDeviceSize alignmentForMemType = VMA_MAX(
    14861  vkMemReq.alignment,
    14862  GetMemoryTypeMinAlignment(memTypeIndex));
    14863 
    14864  res = AllocateMemoryOfType(
    14865  vkMemReq.size,
    14866  alignmentForMemType,
    14867  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14868  dedicatedBuffer,
    14869  dedicatedImage,
    14870  createInfo,
    14871  memTypeIndex,
    14872  suballocType,
    14873  allocationCount,
    14874  pAllocations);
    14875  // Succeeded on first try.
    14876  if(res == VK_SUCCESS)
    14877  {
    14878  return res;
    14879  }
    14880  // Allocation from this memory type failed. Try other compatible memory types.
    14881  else
    14882  {
    14883  for(;;)
    14884  {
    14885  // Remove old memTypeIndex from list of possibilities.
    14886  memoryTypeBits &= ~(1u << memTypeIndex);
    14887  // Find alternative memTypeIndex.
    14888  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14889  if(res == VK_SUCCESS)
    14890  {
    14891  alignmentForMemType = VMA_MAX(
    14892  vkMemReq.alignment,
    14893  GetMemoryTypeMinAlignment(memTypeIndex));
    14894 
    14895  res = AllocateMemoryOfType(
    14896  vkMemReq.size,
    14897  alignmentForMemType,
    14898  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14899  dedicatedBuffer,
    14900  dedicatedImage,
    14901  createInfo,
    14902  memTypeIndex,
    14903  suballocType,
    14904  allocationCount,
    14905  pAllocations);
    14906  // Allocation from this alternative memory type succeeded.
    14907  if(res == VK_SUCCESS)
    14908  {
    14909  return res;
    14910  }
    14911  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14912  }
    14913  // No other matching memory type index could be found.
    14914  else
    14915  {
    14916  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14917  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14918  }
    14919  }
    14920  }
    14921  }
    14922  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14923  else
    14924  return res;
    14925  }
    14926 }
    14927 
    14928 void VmaAllocator_T::FreeMemory(
    14929  size_t allocationCount,
    14930  const VmaAllocation* pAllocations)
    14931 {
    14932  VMA_ASSERT(pAllocations);
    14933 
    14934  for(size_t allocIndex = allocationCount; allocIndex--; )
    14935  {
    14936  VmaAllocation allocation = pAllocations[allocIndex];
    14937 
    14938  if(allocation != VK_NULL_HANDLE)
    14939  {
    14940  if(TouchAllocation(allocation))
    14941  {
    14942  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14943  {
    14944  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14945  }
    14946 
    14947  switch(allocation->GetType())
    14948  {
    14949  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14950  {
    14951  VmaBlockVector* pBlockVector = VMA_NULL;
    14952  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14953  if(hPool != VK_NULL_HANDLE)
    14954  {
    14955  pBlockVector = &hPool->m_BlockVector;
    14956  }
    14957  else
    14958  {
    14959  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14960  pBlockVector = m_pBlockVectors[memTypeIndex];
    14961  }
    14962  pBlockVector->Free(allocation);
    14963  }
    14964  break;
    14965  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14966  FreeDedicatedMemory(allocation);
    14967  break;
    14968  default:
    14969  VMA_ASSERT(0);
    14970  }
    14971  }
    14972 
    14973  allocation->SetUserData(this, VMA_NULL);
    14974  allocation->Dtor();
    14975  m_AllocationObjectAllocator.Free(allocation);
    14976  }
    14977  }
    14978 }
    14979 
    14980 VkResult VmaAllocator_T::ResizeAllocation(
    14981  const VmaAllocation alloc,
    14982  VkDeviceSize newSize)
    14983 {
    14984  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14985  {
    14986  return VK_ERROR_VALIDATION_FAILED_EXT;
    14987  }
    14988  if(newSize == alloc->GetSize())
    14989  {
    14990  return VK_SUCCESS;
    14991  }
    14992 
    14993  switch(alloc->GetType())
    14994  {
    14995  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14996  return VK_ERROR_FEATURE_NOT_PRESENT;
    14997  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14998  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14999  {
    15000  alloc->ChangeSize(newSize);
    15001  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    15002  return VK_SUCCESS;
    15003  }
    15004  else
    15005  {
    15006  return VK_ERROR_OUT_OF_POOL_MEMORY;
    15007  }
    15008  default:
    15009  VMA_ASSERT(0);
    15010  return VK_ERROR_VALIDATION_FAILED_EXT;
    15011  }
    15012 }
    15013 
    15014 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    15015 {
    15016  // Initialize.
    15017  InitStatInfo(pStats->total);
    15018  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    15019  InitStatInfo(pStats->memoryType[i]);
    15020  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    15021  InitStatInfo(pStats->memoryHeap[i]);
    15022 
    15023  // Process default pools.
    15024  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15025  {
    15026  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15027  VMA_ASSERT(pBlockVector);
    15028  pBlockVector->AddStats(pStats);
    15029  }
    15030 
    15031  // Process custom pools.
    15032  {
    15033  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15034  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15035  {
    15036  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    15037  }
    15038  }
    15039 
    15040  // Process dedicated allocations.
    15041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15042  {
    15043  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    15044  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15045  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15046  VMA_ASSERT(pDedicatedAllocVector);
    15047  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    15048  {
    15049  VmaStatInfo allocationStatInfo;
    15050  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    15051  VmaAddStatInfo(pStats->total, allocationStatInfo);
    15052  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    15053  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    15054  }
    15055  }
    15056 
    15057  // Postprocess.
    15058  VmaPostprocessCalcStatInfo(pStats->total);
    15059  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    15060  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    15061  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    15062  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    15063 }
    15064 
    15065 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    15066 
    15067 VkResult VmaAllocator_T::DefragmentationBegin(
    15068  const VmaDefragmentationInfo2& info,
    15069  VmaDefragmentationStats* pStats,
    15070  VmaDefragmentationContext* pContext)
    15071 {
    15072  if(info.pAllocationsChanged != VMA_NULL)
    15073  {
    15074  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15075  }
    15076 
    15077  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15078  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15079 
    15080  (*pContext)->AddPools(info.poolCount, info.pPools);
    15081  (*pContext)->AddAllocations(
    15083 
    15084  VkResult res = (*pContext)->Defragment(
    15087  info.commandBuffer, pStats);
    15088 
    15089  if(res != VK_NOT_READY)
    15090  {
    15091  vma_delete(this, *pContext);
    15092  *pContext = VMA_NULL;
    15093  }
    15094 
    15095  return res;
    15096 }
    15097 
    15098 VkResult VmaAllocator_T::DefragmentationEnd(
    15099  VmaDefragmentationContext context)
    15100 {
    15101  vma_delete(this, context);
    15102  return VK_SUCCESS;
    15103 }
    15104 
    15105 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15106 {
    15107  if(hAllocation->CanBecomeLost())
    15108  {
    15109  /*
    15110  Warning: This is a carefully designed algorithm.
    15111  Do not modify unless you really know what you're doing :)
    15112  */
    15113  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15114  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15115  for(;;)
    15116  {
    15117  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15118  {
    15119  pAllocationInfo->memoryType = UINT32_MAX;
    15120  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15121  pAllocationInfo->offset = 0;
    15122  pAllocationInfo->size = hAllocation->GetSize();
    15123  pAllocationInfo->pMappedData = VMA_NULL;
    15124  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15125  return;
    15126  }
    15127  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15128  {
    15129  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15130  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15131  pAllocationInfo->offset = hAllocation->GetOffset();
    15132  pAllocationInfo->size = hAllocation->GetSize();
    15133  pAllocationInfo->pMappedData = VMA_NULL;
    15134  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15135  return;
    15136  }
    15137  else // Last use time earlier than current time.
    15138  {
    15139  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15140  {
    15141  localLastUseFrameIndex = localCurrFrameIndex;
    15142  }
    15143  }
    15144  }
    15145  }
    15146  else
    15147  {
    15148 #if VMA_STATS_STRING_ENABLED
    15149  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15150  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15151  for(;;)
    15152  {
    15153  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15154  if(localLastUseFrameIndex == localCurrFrameIndex)
    15155  {
    15156  break;
    15157  }
    15158  else // Last use time earlier than current time.
    15159  {
    15160  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15161  {
    15162  localLastUseFrameIndex = localCurrFrameIndex;
    15163  }
    15164  }
    15165  }
    15166 #endif
    15167 
    15168  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15169  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15170  pAllocationInfo->offset = hAllocation->GetOffset();
    15171  pAllocationInfo->size = hAllocation->GetSize();
    15172  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15173  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15174  }
    15175 }
    15176 
    15177 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15178 {
    15179  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15180  if(hAllocation->CanBecomeLost())
    15181  {
    15182  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15183  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15184  for(;;)
    15185  {
    15186  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15187  {
    15188  return false;
    15189  }
    15190  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15191  {
    15192  return true;
    15193  }
    15194  else // Last use time earlier than current time.
    15195  {
    15196  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15197  {
    15198  localLastUseFrameIndex = localCurrFrameIndex;
    15199  }
    15200  }
    15201  }
    15202  }
    15203  else
    15204  {
    15205 #if VMA_STATS_STRING_ENABLED
    15206  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15207  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15208  for(;;)
    15209  {
    15210  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15211  if(localLastUseFrameIndex == localCurrFrameIndex)
    15212  {
    15213  break;
    15214  }
    15215  else // Last use time earlier than current time.
    15216  {
    15217  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15218  {
    15219  localLastUseFrameIndex = localCurrFrameIndex;
    15220  }
    15221  }
    15222  }
    15223 #endif
    15224 
    15225  return true;
    15226  }
    15227 }
    15228 
    15229 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15230 {
    15231  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15232 
    15233  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15234 
    15235  if(newCreateInfo.maxBlockCount == 0)
    15236  {
    15237  newCreateInfo.maxBlockCount = SIZE_MAX;
    15238  }
    15239  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15240  {
    15241  return VK_ERROR_INITIALIZATION_FAILED;
    15242  }
    15243 
    15244  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15245 
    15246  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15247 
    15248  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15249  if(res != VK_SUCCESS)
    15250  {
    15251  vma_delete(this, *pPool);
    15252  *pPool = VMA_NULL;
    15253  return res;
    15254  }
    15255 
    15256  // Add to m_Pools.
    15257  {
    15258  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15259  (*pPool)->SetId(m_NextPoolId++);
    15260  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15261  }
    15262 
    15263  return VK_SUCCESS;
    15264 }
    15265 
    15266 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15267 {
    15268  // Remove from m_Pools.
    15269  {
    15270  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15271  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15272  VMA_ASSERT(success && "Pool not found in Allocator.");
    15273  }
    15274 
    15275  vma_delete(this, pool);
    15276 }
    15277 
    15278 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15279 {
    15280  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15281 }
    15282 
    15283 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15284 {
    15285  m_CurrentFrameIndex.store(frameIndex);
    15286 }
    15287 
    15288 void VmaAllocator_T::MakePoolAllocationsLost(
    15289  VmaPool hPool,
    15290  size_t* pLostAllocationCount)
    15291 {
    15292  hPool->m_BlockVector.MakePoolAllocationsLost(
    15293  m_CurrentFrameIndex.load(),
    15294  pLostAllocationCount);
    15295 }
    15296 
    15297 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15298 {
    15299  return hPool->m_BlockVector.CheckCorruption();
    15300 }
    15301 
    15302 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15303 {
    15304  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15305 
    15306  // Process default pools.
    15307  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15308  {
    15309  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15310  {
    15311  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15312  VMA_ASSERT(pBlockVector);
    15313  VkResult localRes = pBlockVector->CheckCorruption();
    15314  switch(localRes)
    15315  {
    15316  case VK_ERROR_FEATURE_NOT_PRESENT:
    15317  break;
    15318  case VK_SUCCESS:
    15319  finalRes = VK_SUCCESS;
    15320  break;
    15321  default:
    15322  return localRes;
    15323  }
    15324  }
    15325  }
    15326 
    15327  // Process custom pools.
    15328  {
    15329  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15330  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15331  {
    15332  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15333  {
    15334  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15335  switch(localRes)
    15336  {
    15337  case VK_ERROR_FEATURE_NOT_PRESENT:
    15338  break;
    15339  case VK_SUCCESS:
    15340  finalRes = VK_SUCCESS;
    15341  break;
    15342  default:
    15343  return localRes;
    15344  }
    15345  }
    15346  }
    15347  }
    15348 
    15349  return finalRes;
    15350 }
    15351 
    15352 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15353 {
    15354  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15355  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15356  (*pAllocation)->InitLost();
    15357 }
    15358 
    15359 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15360 {
    15361  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15362 
    15363  VkResult res;
    15364  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15365  {
    15366  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15367  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15368  {
    15369  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15370  if(res == VK_SUCCESS)
    15371  {
    15372  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15373  }
    15374  }
    15375  else
    15376  {
    15377  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15378  }
    15379  }
    15380  else
    15381  {
    15382  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15383  }
    15384 
    15385  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15386  {
    15387  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15388  }
    15389 
    15390  return res;
    15391 }
    15392 
    15393 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15394 {
    15395  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15396  {
    15397  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15398  }
    15399 
    15400  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15401 
    15402  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15403  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15404  {
    15405  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15406  m_HeapSizeLimit[heapIndex] += size;
    15407  }
    15408 }
    15409 
    15410 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15411 {
    15412  if(hAllocation->CanBecomeLost())
    15413  {
    15414  return VK_ERROR_MEMORY_MAP_FAILED;
    15415  }
    15416 
    15417  switch(hAllocation->GetType())
    15418  {
    15419  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15420  {
    15421  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15422  char *pBytes = VMA_NULL;
    15423  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15424  if(res == VK_SUCCESS)
    15425  {
    15426  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15427  hAllocation->BlockAllocMap();
    15428  }
    15429  return res;
    15430  }
    15431  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15432  return hAllocation->DedicatedAllocMap(this, ppData);
    15433  default:
    15434  VMA_ASSERT(0);
    15435  return VK_ERROR_MEMORY_MAP_FAILED;
    15436  }
    15437 }
    15438 
    15439 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15440 {
    15441  switch(hAllocation->GetType())
    15442  {
    15443  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15444  {
    15445  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15446  hAllocation->BlockAllocUnmap();
    15447  pBlock->Unmap(this, 1);
    15448  }
    15449  break;
    15450  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15451  hAllocation->DedicatedAllocUnmap(this);
    15452  break;
    15453  default:
    15454  VMA_ASSERT(0);
    15455  }
    15456 }
    15457 
    15458 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15459 {
    15460  VkResult res = VK_SUCCESS;
    15461  switch(hAllocation->GetType())
    15462  {
    15463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15464  res = GetVulkanFunctions().vkBindBufferMemory(
    15465  m_hDevice,
    15466  hBuffer,
    15467  hAllocation->GetMemory(),
    15468  0); //memoryOffset
    15469  break;
    15470  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15471  {
    15472  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15473  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15474  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15475  break;
    15476  }
    15477  default:
    15478  VMA_ASSERT(0);
    15479  }
    15480  return res;
    15481 }
    15482 
    15483 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15484 {
    15485  VkResult res = VK_SUCCESS;
    15486  switch(hAllocation->GetType())
    15487  {
    15488  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15489  res = GetVulkanFunctions().vkBindImageMemory(
    15490  m_hDevice,
    15491  hImage,
    15492  hAllocation->GetMemory(),
    15493  0); //memoryOffset
    15494  break;
    15495  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15496  {
    15497  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15498  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15499  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15500  break;
    15501  }
    15502  default:
    15503  VMA_ASSERT(0);
    15504  }
    15505  return res;
    15506 }
    15507 
    15508 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15509  VmaAllocation hAllocation,
    15510  VkDeviceSize offset, VkDeviceSize size,
    15511  VMA_CACHE_OPERATION op)
    15512 {
    15513  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15514  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15515  {
    15516  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15517  VMA_ASSERT(offset <= allocationSize);
    15518 
    15519  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15520 
    15521  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15522  memRange.memory = hAllocation->GetMemory();
    15523 
    15524  switch(hAllocation->GetType())
    15525  {
    15526  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15527  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15528  if(size == VK_WHOLE_SIZE)
    15529  {
    15530  memRange.size = allocationSize - memRange.offset;
    15531  }
    15532  else
    15533  {
    15534  VMA_ASSERT(offset + size <= allocationSize);
    15535  memRange.size = VMA_MIN(
    15536  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15537  allocationSize - memRange.offset);
    15538  }
    15539  break;
    15540 
    15541  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15542  {
    15543  // 1. Still within this allocation.
    15544  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15545  if(size == VK_WHOLE_SIZE)
    15546  {
    15547  size = allocationSize - offset;
    15548  }
    15549  else
    15550  {
    15551  VMA_ASSERT(offset + size <= allocationSize);
    15552  }
    15553  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15554 
    15555  // 2. Adjust to whole block.
    15556  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15557  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15558  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15559  memRange.offset += allocationOffset;
    15560  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15561 
    15562  break;
    15563  }
    15564 
    15565  default:
    15566  VMA_ASSERT(0);
    15567  }
    15568 
    15569  switch(op)
    15570  {
    15571  case VMA_CACHE_FLUSH:
    15572  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15573  break;
    15574  case VMA_CACHE_INVALIDATE:
    15575  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15576  break;
    15577  default:
    15578  VMA_ASSERT(0);
    15579  }
    15580  }
    15581  // else: Just ignore this call.
    15582 }
    15583 
    15584 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15585 {
    15586  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15587 
    15588  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15589  {
    15590  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15591  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15592  VMA_ASSERT(pDedicatedAllocations);
    15593  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15594  VMA_ASSERT(success);
    15595  }
    15596 
    15597  VkDeviceMemory hMemory = allocation->GetMemory();
    15598 
    15599  /*
    15600  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15601  before vkFreeMemory.
    15602 
    15603  if(allocation->GetMappedData() != VMA_NULL)
    15604  {
    15605  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15606  }
    15607  */
    15608 
    15609  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15610 
    15611  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15612 }
    15613 
    15614 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
    15615 {
    15616  VkBufferCreateInfo dummyBufCreateInfo;
    15617  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
    15618 
    15619  uint32_t memoryTypeBits = 0;
    15620 
    15621  // Create buffer.
    15622  VkBuffer buf = VK_NULL_HANDLE;
    15623  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
    15624  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
    15625  if(res == VK_SUCCESS)
    15626  {
    15627  // Query for supported memory types.
    15628  VkMemoryRequirements memReq;
    15629  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
    15630  memoryTypeBits = memReq.memoryTypeBits;
    15631 
    15632  // Destroy buffer.
    15633  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
    15634  }
    15635 
    15636  return memoryTypeBits;
    15637 }
    15638 
    15639 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15640 {
    15641  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15642  !hAllocation->CanBecomeLost() &&
    15643  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15644  {
    15645  void* pData = VMA_NULL;
    15646  VkResult res = Map(hAllocation, &pData);
    15647  if(res == VK_SUCCESS)
    15648  {
    15649  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15650  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15651  Unmap(hAllocation);
    15652  }
    15653  else
    15654  {
    15655  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15656  }
    15657  }
    15658 }
    15659 
    15660 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
    15661 {
    15662  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
    15663  if(memoryTypeBits == UINT32_MAX)
    15664  {
    15665  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
    15666  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
    15667  }
    15668  return memoryTypeBits;
    15669 }
    15670 
    15671 #if VMA_STATS_STRING_ENABLED
    15672 
    15673 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15674 {
    15675  bool dedicatedAllocationsStarted = false;
    15676  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15677  {
    15678  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15679  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15680  VMA_ASSERT(pDedicatedAllocVector);
    15681  if(pDedicatedAllocVector->empty() == false)
    15682  {
    15683  if(dedicatedAllocationsStarted == false)
    15684  {
    15685  dedicatedAllocationsStarted = true;
    15686  json.WriteString("DedicatedAllocations");
    15687  json.BeginObject();
    15688  }
    15689 
    15690  json.BeginString("Type ");
    15691  json.ContinueString(memTypeIndex);
    15692  json.EndString();
    15693 
    15694  json.BeginArray();
    15695 
    15696  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15697  {
    15698  json.BeginObject(true);
    15699  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15700  hAlloc->PrintParameters(json);
    15701  json.EndObject();
    15702  }
    15703 
    15704  json.EndArray();
    15705  }
    15706  }
    15707  if(dedicatedAllocationsStarted)
    15708  {
    15709  json.EndObject();
    15710  }
    15711 
    15712  {
    15713  bool allocationsStarted = false;
    15714  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15715  {
    15716  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15717  {
    15718  if(allocationsStarted == false)
    15719  {
    15720  allocationsStarted = true;
    15721  json.WriteString("DefaultPools");
    15722  json.BeginObject();
    15723  }
    15724 
    15725  json.BeginString("Type ");
    15726  json.ContinueString(memTypeIndex);
    15727  json.EndString();
    15728 
    15729  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15730  }
    15731  }
    15732  if(allocationsStarted)
    15733  {
    15734  json.EndObject();
    15735  }
    15736  }
    15737 
    15738  // Custom pools
    15739  {
    15740  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15741  const size_t poolCount = m_Pools.size();
    15742  if(poolCount > 0)
    15743  {
    15744  json.WriteString("Pools");
    15745  json.BeginObject();
    15746  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15747  {
    15748  json.BeginString();
    15749  json.ContinueString(m_Pools[poolIndex]->GetId());
    15750  json.EndString();
    15751 
    15752  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15753  }
    15754  json.EndObject();
    15755  }
    15756  }
    15757 }
    15758 
    15759 #endif // #if VMA_STATS_STRING_ENABLED
    15760 
    15762 // Public interface
    15763 
    15764 VkResult vmaCreateAllocator(
    15765  const VmaAllocatorCreateInfo* pCreateInfo,
    15766  VmaAllocator* pAllocator)
    15767 {
    15768  VMA_ASSERT(pCreateInfo && pAllocator);
    15769  VMA_DEBUG_LOG("vmaCreateAllocator");
    15770  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15771  return (*pAllocator)->Init(pCreateInfo);
    15772 }
    15773 
    15774 void vmaDestroyAllocator(
    15775  VmaAllocator allocator)
    15776 {
    15777  if(allocator != VK_NULL_HANDLE)
    15778  {
    15779  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15780  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15781  vma_delete(&allocationCallbacks, allocator);
    15782  }
    15783 }
    15784 
    15786  VmaAllocator allocator,
    15787  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15788 {
    15789  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15790  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15791 }
    15792 
    15794  VmaAllocator allocator,
    15795  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15796 {
    15797  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15798  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15799 }
    15800 
    15802  VmaAllocator allocator,
    15803  uint32_t memoryTypeIndex,
    15804  VkMemoryPropertyFlags* pFlags)
    15805 {
    15806  VMA_ASSERT(allocator && pFlags);
    15807  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15808  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15809 }
    15810 
    15812  VmaAllocator allocator,
    15813  uint32_t frameIndex)
    15814 {
    15815  VMA_ASSERT(allocator);
    15816  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15817 
    15818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15819 
    15820  allocator->SetCurrentFrameIndex(frameIndex);
    15821 }
    15822 
    15823 void vmaCalculateStats(
    15824  VmaAllocator allocator,
    15825  VmaStats* pStats)
    15826 {
    15827  VMA_ASSERT(allocator && pStats);
    15828  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15829  allocator->CalculateStats(pStats);
    15830 }
    15831 
    15832 #if VMA_STATS_STRING_ENABLED
    15833 
    15834 void vmaBuildStatsString(
    15835  VmaAllocator allocator,
    15836  char** ppStatsString,
    15837  VkBool32 detailedMap)
    15838 {
    15839  VMA_ASSERT(allocator && ppStatsString);
    15840  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15841 
    15842  VmaStringBuilder sb(allocator);
    15843  {
    15844  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15845  json.BeginObject();
    15846 
    15847  VmaStats stats;
    15848  allocator->CalculateStats(&stats);
    15849 
    15850  json.WriteString("Total");
    15851  VmaPrintStatInfo(json, stats.total);
    15852 
    15853  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15854  {
    15855  json.BeginString("Heap ");
    15856  json.ContinueString(heapIndex);
    15857  json.EndString();
    15858  json.BeginObject();
    15859 
    15860  json.WriteString("Size");
    15861  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15862 
    15863  json.WriteString("Flags");
    15864  json.BeginArray(true);
    15865  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15866  {
    15867  json.WriteString("DEVICE_LOCAL");
    15868  }
    15869  json.EndArray();
    15870 
    15871  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15872  {
    15873  json.WriteString("Stats");
    15874  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15875  }
    15876 
    15877  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15878  {
    15879  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15880  {
    15881  json.BeginString("Type ");
    15882  json.ContinueString(typeIndex);
    15883  json.EndString();
    15884 
    15885  json.BeginObject();
    15886 
    15887  json.WriteString("Flags");
    15888  json.BeginArray(true);
    15889  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15890  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15891  {
    15892  json.WriteString("DEVICE_LOCAL");
    15893  }
    15894  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15895  {
    15896  json.WriteString("HOST_VISIBLE");
    15897  }
    15898  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15899  {
    15900  json.WriteString("HOST_COHERENT");
    15901  }
    15902  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15903  {
    15904  json.WriteString("HOST_CACHED");
    15905  }
    15906  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15907  {
    15908  json.WriteString("LAZILY_ALLOCATED");
    15909  }
    15910  json.EndArray();
    15911 
    15912  if(stats.memoryType[typeIndex].blockCount > 0)
    15913  {
    15914  json.WriteString("Stats");
    15915  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15916  }
    15917 
    15918  json.EndObject();
    15919  }
    15920  }
    15921 
    15922  json.EndObject();
    15923  }
    15924  if(detailedMap == VK_TRUE)
    15925  {
    15926  allocator->PrintDetailedMap(json);
    15927  }
    15928 
    15929  json.EndObject();
    15930  }
    15931 
    15932  const size_t len = sb.GetLength();
    15933  char* const pChars = vma_new_array(allocator, char, len + 1);
    15934  if(len > 0)
    15935  {
    15936  memcpy(pChars, sb.GetData(), len);
    15937  }
    15938  pChars[len] = '\0';
    15939  *ppStatsString = pChars;
    15940 }
    15941 
    15942 void vmaFreeStatsString(
    15943  VmaAllocator allocator,
    15944  char* pStatsString)
    15945 {
    15946  if(pStatsString != VMA_NULL)
    15947  {
    15948  VMA_ASSERT(allocator);
    15949  size_t len = strlen(pStatsString);
    15950  vma_delete_array(allocator, pStatsString, len + 1);
    15951  }
    15952 }
    15953 
    15954 #endif // #if VMA_STATS_STRING_ENABLED
    15955 
    15956 /*
    15957 This function is not protected by any mutex because it just reads immutable data.
    15958 */
    15959 VkResult vmaFindMemoryTypeIndex(
    15960  VmaAllocator allocator,
    15961  uint32_t memoryTypeBits,
    15962  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15963  uint32_t* pMemoryTypeIndex)
    15964 {
    15965  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15966  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15967  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15968 
    15969  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15970  {
    15971  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15972  }
    15973 
    15974  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15975  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15976 
    15977  // Convert usage to requiredFlags and preferredFlags.
    15978  switch(pAllocationCreateInfo->usage)
    15979  {
    15981  break;
    15983  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15984  {
    15985  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15986  }
    15987  break;
    15989  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15990  break;
    15992  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15993  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15994  {
    15995  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15996  }
    15997  break;
    15999  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    16000  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    16001  break;
    16002  default:
    16003  break;
    16004  }
    16005 
    16006  *pMemoryTypeIndex = UINT32_MAX;
    16007  uint32_t minCost = UINT32_MAX;
    16008  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    16009  memTypeIndex < allocator->GetMemoryTypeCount();
    16010  ++memTypeIndex, memTypeBit <<= 1)
    16011  {
    16012  // This memory type is acceptable according to memoryTypeBits bitmask.
    16013  if((memTypeBit & memoryTypeBits) != 0)
    16014  {
    16015  const VkMemoryPropertyFlags currFlags =
    16016  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    16017  // This memory type contains requiredFlags.
    16018  if((requiredFlags & ~currFlags) == 0)
    16019  {
    16020  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    16021  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    16022  // Remember memory type with lowest cost.
    16023  if(currCost < minCost)
    16024  {
    16025  *pMemoryTypeIndex = memTypeIndex;
    16026  if(currCost == 0)
    16027  {
    16028  return VK_SUCCESS;
    16029  }
    16030  minCost = currCost;
    16031  }
    16032  }
    16033  }
    16034  }
    16035  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    16036 }
    16037 
    16039  VmaAllocator allocator,
    16040  const VkBufferCreateInfo* pBufferCreateInfo,
    16041  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16042  uint32_t* pMemoryTypeIndex)
    16043 {
    16044  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16045  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    16046  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16047  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16048 
    16049  const VkDevice hDev = allocator->m_hDevice;
    16050  VkBuffer hBuffer = VK_NULL_HANDLE;
    16051  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    16052  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    16053  if(res == VK_SUCCESS)
    16054  {
    16055  VkMemoryRequirements memReq = {};
    16056  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    16057  hDev, hBuffer, &memReq);
    16058 
    16059  res = vmaFindMemoryTypeIndex(
    16060  allocator,
    16061  memReq.memoryTypeBits,
    16062  pAllocationCreateInfo,
    16063  pMemoryTypeIndex);
    16064 
    16065  allocator->GetVulkanFunctions().vkDestroyBuffer(
    16066  hDev, hBuffer, allocator->GetAllocationCallbacks());
    16067  }
    16068  return res;
    16069 }
    16070 
    16072  VmaAllocator allocator,
    16073  const VkImageCreateInfo* pImageCreateInfo,
    16074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16075  uint32_t* pMemoryTypeIndex)
    16076 {
    16077  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    16078  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    16079  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    16080  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    16081 
    16082  const VkDevice hDev = allocator->m_hDevice;
    16083  VkImage hImage = VK_NULL_HANDLE;
    16084  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    16085  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    16086  if(res == VK_SUCCESS)
    16087  {
    16088  VkMemoryRequirements memReq = {};
    16089  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    16090  hDev, hImage, &memReq);
    16091 
    16092  res = vmaFindMemoryTypeIndex(
    16093  allocator,
    16094  memReq.memoryTypeBits,
    16095  pAllocationCreateInfo,
    16096  pMemoryTypeIndex);
    16097 
    16098  allocator->GetVulkanFunctions().vkDestroyImage(
    16099  hDev, hImage, allocator->GetAllocationCallbacks());
    16100  }
    16101  return res;
    16102 }
    16103 
    16104 VkResult vmaCreatePool(
    16105  VmaAllocator allocator,
    16106  const VmaPoolCreateInfo* pCreateInfo,
    16107  VmaPool* pPool)
    16108 {
    16109  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16110 
    16111  VMA_DEBUG_LOG("vmaCreatePool");
    16112 
    16113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16114 
    16115  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16116 
    16117 #if VMA_RECORDING_ENABLED
    16118  if(allocator->GetRecorder() != VMA_NULL)
    16119  {
    16120  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16121  }
    16122 #endif
    16123 
    16124  return res;
    16125 }
    16126 
    16127 void vmaDestroyPool(
    16128  VmaAllocator allocator,
    16129  VmaPool pool)
    16130 {
    16131  VMA_ASSERT(allocator);
    16132 
    16133  if(pool == VK_NULL_HANDLE)
    16134  {
    16135  return;
    16136  }
    16137 
    16138  VMA_DEBUG_LOG("vmaDestroyPool");
    16139 
    16140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16141 
    16142 #if VMA_RECORDING_ENABLED
    16143  if(allocator->GetRecorder() != VMA_NULL)
    16144  {
    16145  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16146  }
    16147 #endif
    16148 
    16149  allocator->DestroyPool(pool);
    16150 }
    16151 
    16152 void vmaGetPoolStats(
    16153  VmaAllocator allocator,
    16154  VmaPool pool,
    16155  VmaPoolStats* pPoolStats)
    16156 {
    16157  VMA_ASSERT(allocator && pool && pPoolStats);
    16158 
    16159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16160 
    16161  allocator->GetPoolStats(pool, pPoolStats);
    16162 }
    16163 
    16165  VmaAllocator allocator,
    16166  VmaPool pool,
    16167  size_t* pLostAllocationCount)
    16168 {
    16169  VMA_ASSERT(allocator && pool);
    16170 
    16171  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16172 
    16173 #if VMA_RECORDING_ENABLED
    16174  if(allocator->GetRecorder() != VMA_NULL)
    16175  {
    16176  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16177  }
    16178 #endif
    16179 
    16180  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16181 }
    16182 
    16183 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16184 {
    16185  VMA_ASSERT(allocator && pool);
    16186 
    16187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16188 
    16189  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16190 
    16191  return allocator->CheckPoolCorruption(pool);
    16192 }
    16193 
    16194 VkResult vmaAllocateMemory(
    16195  VmaAllocator allocator,
    16196  const VkMemoryRequirements* pVkMemoryRequirements,
    16197  const VmaAllocationCreateInfo* pCreateInfo,
    16198  VmaAllocation* pAllocation,
    16199  VmaAllocationInfo* pAllocationInfo)
    16200 {
    16201  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16202 
    16203  VMA_DEBUG_LOG("vmaAllocateMemory");
    16204 
    16205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16206 
    16207  VkResult result = allocator->AllocateMemory(
    16208  *pVkMemoryRequirements,
    16209  false, // requiresDedicatedAllocation
    16210  false, // prefersDedicatedAllocation
    16211  VK_NULL_HANDLE, // dedicatedBuffer
    16212  VK_NULL_HANDLE, // dedicatedImage
    16213  *pCreateInfo,
    16214  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16215  1, // allocationCount
    16216  pAllocation);
    16217 
    16218 #if VMA_RECORDING_ENABLED
    16219  if(allocator->GetRecorder() != VMA_NULL)
    16220  {
    16221  allocator->GetRecorder()->RecordAllocateMemory(
    16222  allocator->GetCurrentFrameIndex(),
    16223  *pVkMemoryRequirements,
    16224  *pCreateInfo,
    16225  *pAllocation);
    16226  }
    16227 #endif
    16228 
    16229  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16230  {
    16231  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16232  }
    16233 
    16234  return result;
    16235 }
    16236 
    16237 VkResult vmaAllocateMemoryPages(
    16238  VmaAllocator allocator,
    16239  const VkMemoryRequirements* pVkMemoryRequirements,
    16240  const VmaAllocationCreateInfo* pCreateInfo,
    16241  size_t allocationCount,
    16242  VmaAllocation* pAllocations,
    16243  VmaAllocationInfo* pAllocationInfo)
    16244 {
    16245  if(allocationCount == 0)
    16246  {
    16247  return VK_SUCCESS;
    16248  }
    16249 
    16250  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16251 
    16252  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16253 
    16254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16255 
    16256  VkResult result = allocator->AllocateMemory(
    16257  *pVkMemoryRequirements,
    16258  false, // requiresDedicatedAllocation
    16259  false, // prefersDedicatedAllocation
    16260  VK_NULL_HANDLE, // dedicatedBuffer
    16261  VK_NULL_HANDLE, // dedicatedImage
    16262  *pCreateInfo,
    16263  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16264  allocationCount,
    16265  pAllocations);
    16266 
    16267 #if VMA_RECORDING_ENABLED
    16268  if(allocator->GetRecorder() != VMA_NULL)
    16269  {
    16270  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16271  allocator->GetCurrentFrameIndex(),
    16272  *pVkMemoryRequirements,
    16273  *pCreateInfo,
    16274  (uint64_t)allocationCount,
    16275  pAllocations);
    16276  }
    16277 #endif
    16278 
    16279  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16280  {
    16281  for(size_t i = 0; i < allocationCount; ++i)
    16282  {
    16283  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16284  }
    16285  }
    16286 
    16287  return result;
    16288 }
    16289 
    16291  VmaAllocator allocator,
    16292  VkBuffer buffer,
    16293  const VmaAllocationCreateInfo* pCreateInfo,
    16294  VmaAllocation* pAllocation,
    16295  VmaAllocationInfo* pAllocationInfo)
    16296 {
    16297  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16298 
    16299  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16300 
    16301  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16302 
    16303  VkMemoryRequirements vkMemReq = {};
    16304  bool requiresDedicatedAllocation = false;
    16305  bool prefersDedicatedAllocation = false;
    16306  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16307  requiresDedicatedAllocation,
    16308  prefersDedicatedAllocation);
    16309 
    16310  VkResult result = allocator->AllocateMemory(
    16311  vkMemReq,
    16312  requiresDedicatedAllocation,
    16313  prefersDedicatedAllocation,
    16314  buffer, // dedicatedBuffer
    16315  VK_NULL_HANDLE, // dedicatedImage
    16316  *pCreateInfo,
    16317  VMA_SUBALLOCATION_TYPE_BUFFER,
    16318  1, // allocationCount
    16319  pAllocation);
    16320 
    16321 #if VMA_RECORDING_ENABLED
    16322  if(allocator->GetRecorder() != VMA_NULL)
    16323  {
    16324  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16325  allocator->GetCurrentFrameIndex(),
    16326  vkMemReq,
    16327  requiresDedicatedAllocation,
    16328  prefersDedicatedAllocation,
    16329  *pCreateInfo,
    16330  *pAllocation);
    16331  }
    16332 #endif
    16333 
    16334  if(pAllocationInfo && result == VK_SUCCESS)
    16335  {
    16336  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16337  }
    16338 
    16339  return result;
    16340 }
    16341 
    16342 VkResult vmaAllocateMemoryForImage(
    16343  VmaAllocator allocator,
    16344  VkImage image,
    16345  const VmaAllocationCreateInfo* pCreateInfo,
    16346  VmaAllocation* pAllocation,
    16347  VmaAllocationInfo* pAllocationInfo)
    16348 {
    16349  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16350 
    16351  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16352 
    16353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16354 
    16355  VkMemoryRequirements vkMemReq = {};
    16356  bool requiresDedicatedAllocation = false;
    16357  bool prefersDedicatedAllocation = false;
    16358  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16359  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16360 
    16361  VkResult result = allocator->AllocateMemory(
    16362  vkMemReq,
    16363  requiresDedicatedAllocation,
    16364  prefersDedicatedAllocation,
    16365  VK_NULL_HANDLE, // dedicatedBuffer
    16366  image, // dedicatedImage
    16367  *pCreateInfo,
    16368  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16369  1, // allocationCount
    16370  pAllocation);
    16371 
    16372 #if VMA_RECORDING_ENABLED
    16373  if(allocator->GetRecorder() != VMA_NULL)
    16374  {
    16375  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16376  allocator->GetCurrentFrameIndex(),
    16377  vkMemReq,
    16378  requiresDedicatedAllocation,
    16379  prefersDedicatedAllocation,
    16380  *pCreateInfo,
    16381  *pAllocation);
    16382  }
    16383 #endif
    16384 
    16385  if(pAllocationInfo && result == VK_SUCCESS)
    16386  {
    16387  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16388  }
    16389 
    16390  return result;
    16391 }
    16392 
    16393 void vmaFreeMemory(
    16394  VmaAllocator allocator,
    16395  VmaAllocation allocation)
    16396 {
    16397  VMA_ASSERT(allocator);
    16398 
    16399  if(allocation == VK_NULL_HANDLE)
    16400  {
    16401  return;
    16402  }
    16403 
    16404  VMA_DEBUG_LOG("vmaFreeMemory");
    16405 
    16406  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16407 
    16408 #if VMA_RECORDING_ENABLED
    16409  if(allocator->GetRecorder() != VMA_NULL)
    16410  {
    16411  allocator->GetRecorder()->RecordFreeMemory(
    16412  allocator->GetCurrentFrameIndex(),
    16413  allocation);
    16414  }
    16415 #endif
    16416 
    16417  allocator->FreeMemory(
    16418  1, // allocationCount
    16419  &allocation);
    16420 }
    16421 
    16422 void vmaFreeMemoryPages(
    16423  VmaAllocator allocator,
    16424  size_t allocationCount,
    16425  VmaAllocation* pAllocations)
    16426 {
    16427  if(allocationCount == 0)
    16428  {
    16429  return;
    16430  }
    16431 
    16432  VMA_ASSERT(allocator);
    16433 
    16434  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16435 
    16436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16437 
    16438 #if VMA_RECORDING_ENABLED
    16439  if(allocator->GetRecorder() != VMA_NULL)
    16440  {
    16441  allocator->GetRecorder()->RecordFreeMemoryPages(
    16442  allocator->GetCurrentFrameIndex(),
    16443  (uint64_t)allocationCount,
    16444  pAllocations);
    16445  }
    16446 #endif
    16447 
    16448  allocator->FreeMemory(allocationCount, pAllocations);
    16449 }
    16450 
    16451 VkResult vmaResizeAllocation(
    16452  VmaAllocator allocator,
    16453  VmaAllocation allocation,
    16454  VkDeviceSize newSize)
    16455 {
    16456  VMA_ASSERT(allocator && allocation);
    16457 
    16458  VMA_DEBUG_LOG("vmaResizeAllocation");
    16459 
    16460  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16461 
    16462 #if VMA_RECORDING_ENABLED
    16463  if(allocator->GetRecorder() != VMA_NULL)
    16464  {
    16465  allocator->GetRecorder()->RecordResizeAllocation(
    16466  allocator->GetCurrentFrameIndex(),
    16467  allocation,
    16468  newSize);
    16469  }
    16470 #endif
    16471 
    16472  return allocator->ResizeAllocation(allocation, newSize);
    16473 }
    16474 
    16476  VmaAllocator allocator,
    16477  VmaAllocation allocation,
    16478  VmaAllocationInfo* pAllocationInfo)
    16479 {
    16480  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16481 
    16482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16483 
    16484 #if VMA_RECORDING_ENABLED
    16485  if(allocator->GetRecorder() != VMA_NULL)
    16486  {
    16487  allocator->GetRecorder()->RecordGetAllocationInfo(
    16488  allocator->GetCurrentFrameIndex(),
    16489  allocation);
    16490  }
    16491 #endif
    16492 
    16493  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16494 }
    16495 
    16496 VkBool32 vmaTouchAllocation(
    16497  VmaAllocator allocator,
    16498  VmaAllocation allocation)
    16499 {
    16500  VMA_ASSERT(allocator && allocation);
    16501 
    16502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16503 
    16504 #if VMA_RECORDING_ENABLED
    16505  if(allocator->GetRecorder() != VMA_NULL)
    16506  {
    16507  allocator->GetRecorder()->RecordTouchAllocation(
    16508  allocator->GetCurrentFrameIndex(),
    16509  allocation);
    16510  }
    16511 #endif
    16512 
    16513  return allocator->TouchAllocation(allocation);
    16514 }
    16515 
    16517  VmaAllocator allocator,
    16518  VmaAllocation allocation,
    16519  void* pUserData)
    16520 {
    16521  VMA_ASSERT(allocator && allocation);
    16522 
    16523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16524 
    16525  allocation->SetUserData(allocator, pUserData);
    16526 
    16527 #if VMA_RECORDING_ENABLED
    16528  if(allocator->GetRecorder() != VMA_NULL)
    16529  {
    16530  allocator->GetRecorder()->RecordSetAllocationUserData(
    16531  allocator->GetCurrentFrameIndex(),
    16532  allocation,
    16533  pUserData);
    16534  }
    16535 #endif
    16536 }
    16537 
    16539  VmaAllocator allocator,
    16540  VmaAllocation* pAllocation)
    16541 {
    16542  VMA_ASSERT(allocator && pAllocation);
    16543 
    16544  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16545 
    16546  allocator->CreateLostAllocation(pAllocation);
    16547 
    16548 #if VMA_RECORDING_ENABLED
    16549  if(allocator->GetRecorder() != VMA_NULL)
    16550  {
    16551  allocator->GetRecorder()->RecordCreateLostAllocation(
    16552  allocator->GetCurrentFrameIndex(),
    16553  *pAllocation);
    16554  }
    16555 #endif
    16556 }
    16557 
    16558 VkResult vmaMapMemory(
    16559  VmaAllocator allocator,
    16560  VmaAllocation allocation,
    16561  void** ppData)
    16562 {
    16563  VMA_ASSERT(allocator && allocation && ppData);
    16564 
    16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16566 
    16567  VkResult res = allocator->Map(allocation, ppData);
    16568 
    16569 #if VMA_RECORDING_ENABLED
    16570  if(allocator->GetRecorder() != VMA_NULL)
    16571  {
    16572  allocator->GetRecorder()->RecordMapMemory(
    16573  allocator->GetCurrentFrameIndex(),
    16574  allocation);
    16575  }
    16576 #endif
    16577 
    16578  return res;
    16579 }
    16580 
    16581 void vmaUnmapMemory(
    16582  VmaAllocator allocator,
    16583  VmaAllocation allocation)
    16584 {
    16585  VMA_ASSERT(allocator && allocation);
    16586 
    16587  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16588 
    16589 #if VMA_RECORDING_ENABLED
    16590  if(allocator->GetRecorder() != VMA_NULL)
    16591  {
    16592  allocator->GetRecorder()->RecordUnmapMemory(
    16593  allocator->GetCurrentFrameIndex(),
    16594  allocation);
    16595  }
    16596 #endif
    16597 
    16598  allocator->Unmap(allocation);
    16599 }
    16600 
    16601 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16602 {
    16603  VMA_ASSERT(allocator && allocation);
    16604 
    16605  VMA_DEBUG_LOG("vmaFlushAllocation");
    16606 
    16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16608 
    16609  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16610 
    16611 #if VMA_RECORDING_ENABLED
    16612  if(allocator->GetRecorder() != VMA_NULL)
    16613  {
    16614  allocator->GetRecorder()->RecordFlushAllocation(
    16615  allocator->GetCurrentFrameIndex(),
    16616  allocation, offset, size);
    16617  }
    16618 #endif
    16619 }
    16620 
    16621 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16622 {
    16623  VMA_ASSERT(allocator && allocation);
    16624 
    16625  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16626 
    16627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16628 
    16629  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16630 
    16631 #if VMA_RECORDING_ENABLED
    16632  if(allocator->GetRecorder() != VMA_NULL)
    16633  {
    16634  allocator->GetRecorder()->RecordInvalidateAllocation(
    16635  allocator->GetCurrentFrameIndex(),
    16636  allocation, offset, size);
    16637  }
    16638 #endif
    16639 }
    16640 
    16641 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16642 {
    16643  VMA_ASSERT(allocator);
    16644 
    16645  VMA_DEBUG_LOG("vmaCheckCorruption");
    16646 
    16647  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16648 
    16649  return allocator->CheckCorruption(memoryTypeBits);
    16650 }
    16651 
    16652 VkResult vmaDefragment(
    16653  VmaAllocator allocator,
    16654  VmaAllocation* pAllocations,
    16655  size_t allocationCount,
    16656  VkBool32* pAllocationsChanged,
    16657  const VmaDefragmentationInfo *pDefragmentationInfo,
    16658  VmaDefragmentationStats* pDefragmentationStats)
    16659 {
    16660  // Deprecated interface, reimplemented using new one.
    16661 
    16662  VmaDefragmentationInfo2 info2 = {};
    16663  info2.allocationCount = (uint32_t)allocationCount;
    16664  info2.pAllocations = pAllocations;
    16665  info2.pAllocationsChanged = pAllocationsChanged;
    16666  if(pDefragmentationInfo != VMA_NULL)
    16667  {
    16668  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16669  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16670  }
    16671  else
    16672  {
    16673  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16674  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16675  }
    16676  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16677 
    16679  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16680  if(res == VK_NOT_READY)
    16681  {
    16682  res = vmaDefragmentationEnd( allocator, ctx);
    16683  }
    16684  return res;
    16685 }
    16686 
    16687 VkResult vmaDefragmentationBegin(
    16688  VmaAllocator allocator,
    16689  const VmaDefragmentationInfo2* pInfo,
    16690  VmaDefragmentationStats* pStats,
    16691  VmaDefragmentationContext *pContext)
    16692 {
    16693  VMA_ASSERT(allocator && pInfo && pContext);
    16694 
    16695  // Degenerate case: Nothing to defragment.
    16696  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16697  {
    16698  return VK_SUCCESS;
    16699  }
    16700 
    16701  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16702  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16703  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16704  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16705 
    16706  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16707 
    16708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16709 
    16710  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16711 
    16712 #if VMA_RECORDING_ENABLED
    16713  if(allocator->GetRecorder() != VMA_NULL)
    16714  {
    16715  allocator->GetRecorder()->RecordDefragmentationBegin(
    16716  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16717  }
    16718 #endif
    16719 
    16720  return res;
    16721 }
    16722 
    16723 VkResult vmaDefragmentationEnd(
    16724  VmaAllocator allocator,
    16725  VmaDefragmentationContext context)
    16726 {
    16727  VMA_ASSERT(allocator);
    16728 
    16729  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16730 
    16731  if(context != VK_NULL_HANDLE)
    16732  {
    16733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16734 
    16735 #if VMA_RECORDING_ENABLED
    16736  if(allocator->GetRecorder() != VMA_NULL)
    16737  {
    16738  allocator->GetRecorder()->RecordDefragmentationEnd(
    16739  allocator->GetCurrentFrameIndex(), context);
    16740  }
    16741 #endif
    16742 
    16743  return allocator->DefragmentationEnd(context);
    16744  }
    16745  else
    16746  {
    16747  return VK_SUCCESS;
    16748  }
    16749 }
    16750 
    16751 VkResult vmaBindBufferMemory(
    16752  VmaAllocator allocator,
    16753  VmaAllocation allocation,
    16754  VkBuffer buffer)
    16755 {
    16756  VMA_ASSERT(allocator && allocation && buffer);
    16757 
    16758  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16759 
    16760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16761 
    16762  return allocator->BindBufferMemory(allocation, buffer);
    16763 }
    16764 
    16765 VkResult vmaBindImageMemory(
    16766  VmaAllocator allocator,
    16767  VmaAllocation allocation,
    16768  VkImage image)
    16769 {
    16770  VMA_ASSERT(allocator && allocation && image);
    16771 
    16772  VMA_DEBUG_LOG("vmaBindImageMemory");
    16773 
    16774  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16775 
    16776  return allocator->BindImageMemory(allocation, image);
    16777 }
    16778 
    16779 VkResult vmaCreateBuffer(
    16780  VmaAllocator allocator,
    16781  const VkBufferCreateInfo* pBufferCreateInfo,
    16782  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16783  VkBuffer* pBuffer,
    16784  VmaAllocation* pAllocation,
    16785  VmaAllocationInfo* pAllocationInfo)
    16786 {
    16787  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16788 
    16789  if(pBufferCreateInfo->size == 0)
    16790  {
    16791  return VK_ERROR_VALIDATION_FAILED_EXT;
    16792  }
    16793 
    16794  VMA_DEBUG_LOG("vmaCreateBuffer");
    16795 
    16796  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16797 
    16798  *pBuffer = VK_NULL_HANDLE;
    16799  *pAllocation = VK_NULL_HANDLE;
    16800 
    16801  // 1. Create VkBuffer.
    16802  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16803  allocator->m_hDevice,
    16804  pBufferCreateInfo,
    16805  allocator->GetAllocationCallbacks(),
    16806  pBuffer);
    16807  if(res >= 0)
    16808  {
    16809  // 2. vkGetBufferMemoryRequirements.
    16810  VkMemoryRequirements vkMemReq = {};
    16811  bool requiresDedicatedAllocation = false;
    16812  bool prefersDedicatedAllocation = false;
    16813  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16814  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16815 
    16816  // Make sure alignment requirements for specific buffer usages reported
    16817  // in Physical Device Properties are included in alignment reported by memory requirements.
    16818  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16819  {
    16820  VMA_ASSERT(vkMemReq.alignment %
    16821  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16822  }
    16823  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16824  {
    16825  VMA_ASSERT(vkMemReq.alignment %
    16826  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16827  }
    16828  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16829  {
    16830  VMA_ASSERT(vkMemReq.alignment %
    16831  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16832  }
    16833 
    16834  // 3. Allocate memory using allocator.
    16835  res = allocator->AllocateMemory(
    16836  vkMemReq,
    16837  requiresDedicatedAllocation,
    16838  prefersDedicatedAllocation,
    16839  *pBuffer, // dedicatedBuffer
    16840  VK_NULL_HANDLE, // dedicatedImage
    16841  *pAllocationCreateInfo,
    16842  VMA_SUBALLOCATION_TYPE_BUFFER,
    16843  1, // allocationCount
    16844  pAllocation);
    16845 
    16846 #if VMA_RECORDING_ENABLED
    16847  if(allocator->GetRecorder() != VMA_NULL)
    16848  {
    16849  allocator->GetRecorder()->RecordCreateBuffer(
    16850  allocator->GetCurrentFrameIndex(),
    16851  *pBufferCreateInfo,
    16852  *pAllocationCreateInfo,
    16853  *pAllocation);
    16854  }
    16855 #endif
    16856 
    16857  if(res >= 0)
    16858  {
    16859  // 3. Bind buffer with memory.
    16860  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16861  {
    16862  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16863  }
    16864  if(res >= 0)
    16865  {
    16866  // All steps succeeded.
    16867  #if VMA_STATS_STRING_ENABLED
    16868  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16869  #endif
    16870  if(pAllocationInfo != VMA_NULL)
    16871  {
    16872  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16873  }
    16874 
    16875  return VK_SUCCESS;
    16876  }
    16877  allocator->FreeMemory(
    16878  1, // allocationCount
    16879  pAllocation);
    16880  *pAllocation = VK_NULL_HANDLE;
    16881  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16882  *pBuffer = VK_NULL_HANDLE;
    16883  return res;
    16884  }
    16885  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16886  *pBuffer = VK_NULL_HANDLE;
    16887  return res;
    16888  }
    16889  return res;
    16890 }
    16891 
    16892 void vmaDestroyBuffer(
    16893  VmaAllocator allocator,
    16894  VkBuffer buffer,
    16895  VmaAllocation allocation)
    16896 {
    16897  VMA_ASSERT(allocator);
    16898 
    16899  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16900  {
    16901  return;
    16902  }
    16903 
    16904  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16905 
    16906  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16907 
    16908 #if VMA_RECORDING_ENABLED
    16909  if(allocator->GetRecorder() != VMA_NULL)
    16910  {
    16911  allocator->GetRecorder()->RecordDestroyBuffer(
    16912  allocator->GetCurrentFrameIndex(),
    16913  allocation);
    16914  }
    16915 #endif
    16916 
    16917  if(buffer != VK_NULL_HANDLE)
    16918  {
    16919  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16920  }
    16921 
    16922  if(allocation != VK_NULL_HANDLE)
    16923  {
    16924  allocator->FreeMemory(
    16925  1, // allocationCount
    16926  &allocation);
    16927  }
    16928 }
    16929 
    16930 VkResult vmaCreateImage(
    16931  VmaAllocator allocator,
    16932  const VkImageCreateInfo* pImageCreateInfo,
    16933  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16934  VkImage* pImage,
    16935  VmaAllocation* pAllocation,
    16936  VmaAllocationInfo* pAllocationInfo)
    16937 {
    16938  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16939 
    16940  if(pImageCreateInfo->extent.width == 0 ||
    16941  pImageCreateInfo->extent.height == 0 ||
    16942  pImageCreateInfo->extent.depth == 0 ||
    16943  pImageCreateInfo->mipLevels == 0 ||
    16944  pImageCreateInfo->arrayLayers == 0)
    16945  {
    16946  return VK_ERROR_VALIDATION_FAILED_EXT;
    16947  }
    16948 
    16949  VMA_DEBUG_LOG("vmaCreateImage");
    16950 
    16951  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16952 
    16953  *pImage = VK_NULL_HANDLE;
    16954  *pAllocation = VK_NULL_HANDLE;
    16955 
    16956  // 1. Create VkImage.
    16957  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16958  allocator->m_hDevice,
    16959  pImageCreateInfo,
    16960  allocator->GetAllocationCallbacks(),
    16961  pImage);
    16962  if(res >= 0)
    16963  {
    16964  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16965  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16966  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16967 
    16968  // 2. Allocate memory using allocator.
    16969  VkMemoryRequirements vkMemReq = {};
    16970  bool requiresDedicatedAllocation = false;
    16971  bool prefersDedicatedAllocation = false;
    16972  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16973  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16974 
    16975  res = allocator->AllocateMemory(
    16976  vkMemReq,
    16977  requiresDedicatedAllocation,
    16978  prefersDedicatedAllocation,
    16979  VK_NULL_HANDLE, // dedicatedBuffer
    16980  *pImage, // dedicatedImage
    16981  *pAllocationCreateInfo,
    16982  suballocType,
    16983  1, // allocationCount
    16984  pAllocation);
    16985 
    16986 #if VMA_RECORDING_ENABLED
    16987  if(allocator->GetRecorder() != VMA_NULL)
    16988  {
    16989  allocator->GetRecorder()->RecordCreateImage(
    16990  allocator->GetCurrentFrameIndex(),
    16991  *pImageCreateInfo,
    16992  *pAllocationCreateInfo,
    16993  *pAllocation);
    16994  }
    16995 #endif
    16996 
    16997  if(res >= 0)
    16998  {
    16999  // 3. Bind image with memory.
    17000  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    17001  {
    17002  res = allocator->BindImageMemory(*pAllocation, *pImage);
    17003  }
    17004  if(res >= 0)
    17005  {
    17006  // All steps succeeded.
    17007  #if VMA_STATS_STRING_ENABLED
    17008  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    17009  #endif
    17010  if(pAllocationInfo != VMA_NULL)
    17011  {
    17012  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    17013  }
    17014 
    17015  return VK_SUCCESS;
    17016  }
    17017  allocator->FreeMemory(
    17018  1, // allocationCount
    17019  pAllocation);
    17020  *pAllocation = VK_NULL_HANDLE;
    17021  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17022  *pImage = VK_NULL_HANDLE;
    17023  return res;
    17024  }
    17025  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    17026  *pImage = VK_NULL_HANDLE;
    17027  return res;
    17028  }
    17029  return res;
    17030 }
    17031 
    17032 void vmaDestroyImage(
    17033  VmaAllocator allocator,
    17034  VkImage image,
    17035  VmaAllocation allocation)
    17036 {
    17037  VMA_ASSERT(allocator);
    17038 
    17039  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    17040  {
    17041  return;
    17042  }
    17043 
    17044  VMA_DEBUG_LOG("vmaDestroyImage");
    17045 
    17046  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    17047 
    17048 #if VMA_RECORDING_ENABLED
    17049  if(allocator->GetRecorder() != VMA_NULL)
    17050  {
    17051  allocator->GetRecorder()->RecordDestroyImage(
    17052  allocator->GetCurrentFrameIndex(),
    17053  allocation);
    17054  }
    17055 #endif
    17056 
    17057  if(image != VK_NULL_HANDLE)
    17058  {
    17059  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    17060  }
    17061  if(allocation != VK_NULL_HANDLE)
    17062  {
    17063  allocator->FreeMemory(
    17064  1, // allocationCount
    17065  &allocation);
    17066  }
    17067 }
    17068 
    17069 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1786
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2086
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1822
    -
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2875
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1844
    +
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2897
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Deprecated. Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1796
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2395
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1776
    +
    Definition: vk_mem_alloc.h:1818
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2417
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1798
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:2026
    -
    Definition: vk_mem_alloc.h:2130
    -
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2828
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1768
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2495
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1819
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2911
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2284
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1663
    +
    Definition: vk_mem_alloc.h:2048
    +
    Definition: vk_mem_alloc.h:2152
    +
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2850
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1790
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2517
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1841
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2933
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2306
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1685
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2376
    -
    Definition: vk_mem_alloc.h:2101
    -
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2831
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1757
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2183
    -
    Definition: vk_mem_alloc.h:2053
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1831
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2312
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2398
    +
    Definition: vk_mem_alloc.h:2123
    +
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2853
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1779
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2205
    +
    Definition: vk_mem_alloc.h:2075
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1853
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2334
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1885
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1816
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
    Definition: vk_mem_alloc.h:1907
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1838
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2057
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:2079
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1957
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1773
    -
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2865
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1956
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2915
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1979
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1795
    +
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2887
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1978
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2937
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1848
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1966
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2923
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2167
    -
    Definition: vk_mem_alloc.h:2125
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2906
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1774
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1699
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1870
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1988
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2945
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2189
    +
    Definition: vk_mem_alloc.h:2147
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2928
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1796
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1721
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1825
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1847
    void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
    Frees memory and destroys multiple allocations.
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2326
    -
    Definition: vk_mem_alloc.h:2320
    -
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1780
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1892
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2505
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2348
    +
    Definition: vk_mem_alloc.h:2342
    +
    PFN_vkCmdCopyBuffer vkCmdCopyBuffer
    Definition: vk_mem_alloc.h:1802
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1914
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2527
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1769
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1791
    VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
    Begins defragmentation process.
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1794
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2204
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2346
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2382
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1816
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2226
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2368
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost.
    Definition: vk_mem_alloc.h:2404
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1755
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2329
    +
    Definition: vk_mem_alloc.h:1777
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2351
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2880
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:2004
    +
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2902
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:2026
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2840
    +
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2862
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2901
    +
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2923
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2919
    -
    Definition: vk_mem_alloc.h:2043
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2191
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1772
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2941
    +
    Definition: vk_mem_alloc.h:2065
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2213
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1794
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
    Ends defragmentation process.
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1962
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1705
    -
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2819
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1984
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1727
    +
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2841
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    Definition: vk_mem_alloc.h:2817
    - -
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2846
    +
    Definition: vk_mem_alloc.h:2839
    + +
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2868
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1726
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1748
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1798
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1731
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2921
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1820
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1753
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2943
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2178
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2392
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2200
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2414
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1765
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1945
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2341
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1718
    -
    Definition: vk_mem_alloc.h:2316
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1787
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1967
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
    Definition: vk_mem_alloc.h:2363
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1740
    +
    Definition: vk_mem_alloc.h:2338
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2108
    +
    Definition: vk_mem_alloc.h:2130
    Represents Opaque object that represents started defragmentation process.
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1958
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1722
    -
    Definition: vk_mem_alloc.h:2141
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2332
    -
    Definition: vk_mem_alloc.h:2052
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1771
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1980
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1744
    +
    Definition: vk_mem_alloc.h:2163
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2354
    +
    Definition: vk_mem_alloc.h:2074
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1793
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2173
    -
    Definition: vk_mem_alloc.h:2164
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:2195
    +
    Definition: vk_mem_alloc.h:2186
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1948
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1767
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2354
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1834
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2385
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2162
    -
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2870
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2197
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1970
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1789
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2376
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1856
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2407
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2184
    +
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2892
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2219
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1873
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1964
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2088
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1957
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1895
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1986
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
    Definition: vk_mem_alloc.h:2110
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1979
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1778
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1804
    -
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2816
    -
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2894
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1720
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1777
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1800
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1826
    +
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2838
    +
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2916
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1742
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1799
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2368
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1770
    -
    Definition: vk_mem_alloc.h:2119
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2390
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1792
    +
    Definition: vk_mem_alloc.h:2141
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1812
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2519
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1828
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1957
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1834
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2541
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
    Definition: vk_mem_alloc.h:1850
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1979
    VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation for multiple allocation objects at once.
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1954
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1976
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2373
    -
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2825
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2395
    +
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2847
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions.
    -
    Definition: vk_mem_alloc.h:2134
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2500
    -
    Definition: vk_mem_alloc.h:2148
    -
    Definition: vk_mem_alloc.h:2160
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2917
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1763
    +
    Definition: vk_mem_alloc.h:2156
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2522
    +
    Definition: vk_mem_alloc.h:2170
    +
    Definition: vk_mem_alloc.h:2182
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2939
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1785
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1952
    -
    Definition: vk_mem_alloc.h:2009
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2322
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1974
    +
    Definition: vk_mem_alloc.h:2031
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2344
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1801
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1950
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1775
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1779
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2075
    -
    Definition: vk_mem_alloc.h:2155
    -
    Definition: vk_mem_alloc.h:2036
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2514
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1823
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1972
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1797
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1801
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:2097
    +
    Definition: vk_mem_alloc.h:2177
    +
    Definition: vk_mem_alloc.h:2058
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2536
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1753
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1775
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1766
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2301
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1788
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2323
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2481
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2503
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:2145
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2266
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1958
    +
    Definition: vk_mem_alloc.h:2167
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2288
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1980
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1788
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1965
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1810
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1987
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2379
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1958
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2401
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1980
    struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
    Parameters for defragmentation.
    -
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2885
    +
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2907
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2486
    -
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2849
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2508
    +
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2871