diff --git a/docs/html/general_considerations.html b/docs/html/general_considerations.html index 3dc7017..b298d5c 100644 --- a/docs/html/general_considerations.html +++ b/docs/html/general_considerations.html @@ -116,7 +116,7 @@ Features not supported
  • Recreation of buffers and images. Although the library has functions for buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to recreate these objects yourself after defragmentation. That's because the big structures VkBufferCreateInfo, VkImageCreateInfo are not stored in VmaAllocation object.
  • Handling CPU memory allocation failures. When dynamically creating small C++ objects in CPU memory (not Vulkan memory), allocation failures are not checked and handled gracefully, because that would complicate code significantly and is usually not needed in desktop PC applications anyway.
  • Code free of any compiler warnings. Maintaining the library to compile and work correctly on so many different platforms is hard enough. Being free of any warnings, on any version of any compiler, is simply not feasible.
  • -
  • Support for any programming languages other than C/C++. Bindings to other languages are welcomed as external projects.
  • +
  • This is a C++ library with C interface. Bindings or ports to any other programming languages are welcomed as external projects and are not going to be included into this repository.
  • diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index bd37171..6baea14 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,189 +65,189 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1694  const VkDeviceSize* pHeapSizeLimit;
    1715 
    1717 VkResult vmaCreateAllocator(
    1718  const VmaAllocatorCreateInfo* pCreateInfo,
    1719  VmaAllocator* pAllocator);
    1720 
    1722 void vmaDestroyAllocator(
    1723  VmaAllocator allocator);
    1724 
    1730  VmaAllocator allocator,
    1731  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1732 
    1738  VmaAllocator allocator,
    1739  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1740 
    1748  VmaAllocator allocator,
    1749  uint32_t memoryTypeIndex,
    1750  VkMemoryPropertyFlags* pFlags);
    1751 
    1761  VmaAllocator allocator,
    1762  uint32_t frameIndex);
    1763 
    1766 typedef struct VmaStatInfo
    1767 {
    1769  uint32_t blockCount;
    1775  VkDeviceSize usedBytes;
    1777  VkDeviceSize unusedBytes;
    1780 } VmaStatInfo;
    1781 
    1783 typedef struct VmaStats
    1784 {
    1785  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1786  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1788 } VmaStats;
    1789 
    1791 void vmaCalculateStats(
    1792  VmaAllocator allocator,
    1793  VmaStats* pStats);
    1794 
    1795 #define VMA_STATS_STRING_ENABLED 1
    1796 
    1797 #if VMA_STATS_STRING_ENABLED
    1798 
    1800 
    1802 void vmaBuildStatsString(
    1803  VmaAllocator allocator,
    1804  char** ppStatsString,
    1805  VkBool32 detailedMap);
    1806 
    1807 void vmaFreeStatsString(
    1808  VmaAllocator allocator,
    1809  char* pStatsString);
    1810 
    1811 #endif // #if VMA_STATS_STRING_ENABLED
    1812 
    1821 VK_DEFINE_HANDLE(VmaPool)
    1822 
    1823 typedef enum VmaMemoryUsage
    1824 {
    1873 } VmaMemoryUsage;
    1874 
    1889 
    1944 
    1960 
    1970 
    1977 
    1981 
    1983 {
    1996  VkMemoryPropertyFlags requiredFlags;
    2001  VkMemoryPropertyFlags preferredFlags;
    2009  uint32_t memoryTypeBits;
    2022  void* pUserData;
    2024 
    2041 VkResult vmaFindMemoryTypeIndex(
    2042  VmaAllocator allocator,
    2043  uint32_t memoryTypeBits,
    2044  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2045  uint32_t* pMemoryTypeIndex);
    2046 
    2060  VmaAllocator allocator,
    2061  const VkBufferCreateInfo* pBufferCreateInfo,
    2062  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2063  uint32_t* pMemoryTypeIndex);
    2064 
    2078  VmaAllocator allocator,
    2079  const VkImageCreateInfo* pImageCreateInfo,
    2080  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2081  uint32_t* pMemoryTypeIndex);
    2082 
    2103 
    2120 
    2131 
    2137 
    2140 typedef VkFlags VmaPoolCreateFlags;
    2141 
    2144 typedef struct VmaPoolCreateInfo {
    2159  VkDeviceSize blockSize;
    2188 
    2191 typedef struct VmaPoolStats {
    2194  VkDeviceSize size;
    2197  VkDeviceSize unusedSize;
    2210  VkDeviceSize unusedRangeSizeMax;
    2213  size_t blockCount;
    2214 } VmaPoolStats;
    2215 
    2222 VkResult vmaCreatePool(
    2223  VmaAllocator allocator,
    2224  const VmaPoolCreateInfo* pCreateInfo,
    2225  VmaPool* pPool);
    2226 
    2229 void vmaDestroyPool(
    2230  VmaAllocator allocator,
    2231  VmaPool pool);
    2232 
    2239 void vmaGetPoolStats(
    2240  VmaAllocator allocator,
    2241  VmaPool pool,
    2242  VmaPoolStats* pPoolStats);
    2243 
    2251  VmaAllocator allocator,
    2252  VmaPool pool,
    2253  size_t* pLostAllocationCount);
    2254 
    2269 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2270 
    2295 VK_DEFINE_HANDLE(VmaAllocation)
    2296 
    2297 
    2299 typedef struct VmaAllocationInfo {
    2304  uint32_t memoryType;
    2313  VkDeviceMemory deviceMemory;
    2318  VkDeviceSize offset;
    2323  VkDeviceSize size;
    2337  void* pUserData;
    2339 
    2350 VkResult vmaAllocateMemory(
    2351  VmaAllocator allocator,
    2352  const VkMemoryRequirements* pVkMemoryRequirements,
    2353  const VmaAllocationCreateInfo* pCreateInfo,
    2354  VmaAllocation* pAllocation,
    2355  VmaAllocationInfo* pAllocationInfo);
    2356 
    2364  VmaAllocator allocator,
    2365  VkBuffer buffer,
    2366  const VmaAllocationCreateInfo* pCreateInfo,
    2367  VmaAllocation* pAllocation,
    2368  VmaAllocationInfo* pAllocationInfo);
    2369 
    2371 VkResult vmaAllocateMemoryForImage(
    2372  VmaAllocator allocator,
    2373  VkImage image,
    2374  const VmaAllocationCreateInfo* pCreateInfo,
    2375  VmaAllocation* pAllocation,
    2376  VmaAllocationInfo* pAllocationInfo);
    2377 
    2379 void vmaFreeMemory(
    2380  VmaAllocator allocator,
    2381  VmaAllocation allocation);
    2382 
    2403 VkResult vmaResizeAllocation(
    2404  VmaAllocator allocator,
    2405  VmaAllocation allocation,
    2406  VkDeviceSize newSize);
    2407 
    2425  VmaAllocator allocator,
    2426  VmaAllocation allocation,
    2427  VmaAllocationInfo* pAllocationInfo);
    2428 
    2443 VkBool32 vmaTouchAllocation(
    2444  VmaAllocator allocator,
    2445  VmaAllocation allocation);
    2446 
    2461  VmaAllocator allocator,
    2462  VmaAllocation allocation,
    2463  void* pUserData);
    2464 
    2476  VmaAllocator allocator,
    2477  VmaAllocation* pAllocation);
    2478 
    2513 VkResult vmaMapMemory(
    2514  VmaAllocator allocator,
    2515  VmaAllocation allocation,
    2516  void** ppData);
    2517 
    2522 void vmaUnmapMemory(
    2523  VmaAllocator allocator,
    2524  VmaAllocation allocation);
    2525 
    2538 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2539 
    2552 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2553 
    2570 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2571 
    2573 typedef struct VmaDefragmentationInfo {
    2578  VkDeviceSize maxBytesToMove;
    2585 
    2587 typedef struct VmaDefragmentationStats {
    2589  VkDeviceSize bytesMoved;
    2591  VkDeviceSize bytesFreed;
    2597 
    2636 VkResult vmaDefragment(
    2637  VmaAllocator allocator,
    2638  VmaAllocation* pAllocations,
    2639  size_t allocationCount,
    2640  VkBool32* pAllocationsChanged,
    2641  const VmaDefragmentationInfo *pDefragmentationInfo,
    2642  VmaDefragmentationStats* pDefragmentationStats);
    2643 
    2656 VkResult vmaBindBufferMemory(
    2657  VmaAllocator allocator,
    2658  VmaAllocation allocation,
    2659  VkBuffer buffer);
    2660 
    2673 VkResult vmaBindImageMemory(
    2674  VmaAllocator allocator,
    2675  VmaAllocation allocation,
    2676  VkImage image);
    2677 
    2704 VkResult vmaCreateBuffer(
    2705  VmaAllocator allocator,
    2706  const VkBufferCreateInfo* pBufferCreateInfo,
    2707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2708  VkBuffer* pBuffer,
    2709  VmaAllocation* pAllocation,
    2710  VmaAllocationInfo* pAllocationInfo);
    2711 
    2723 void vmaDestroyBuffer(
    2724  VmaAllocator allocator,
    2725  VkBuffer buffer,
    2726  VmaAllocation allocation);
    2727 
    2729 VkResult vmaCreateImage(
    2730  VmaAllocator allocator,
    2731  const VkImageCreateInfo* pImageCreateInfo,
    2732  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2733  VkImage* pImage,
    2734  VmaAllocation* pAllocation,
    2735  VmaAllocationInfo* pAllocationInfo);
    2736 
    2748 void vmaDestroyImage(
    2749  VmaAllocator allocator,
    2750  VkImage image,
    2751  VmaAllocation allocation);
    2752 
    2753 #ifdef __cplusplus
    2754 }
    2755 #endif
    2756 
    2757 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2758 
    2759 // For Visual Studio IntelliSense.
    2760 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2761 #define VMA_IMPLEMENTATION
    2762 #endif
    2763 
    2764 #ifdef VMA_IMPLEMENTATION
    2765 #undef VMA_IMPLEMENTATION
    2766 
    2767 #include <cstdint>
    2768 #include <cstdlib>
    2769 #include <cstring>
    2770 
    2771 /*******************************************************************************
    2772 CONFIGURATION SECTION
    2773 
    2774 Define some of these macros before each #include of this header or change them
    2775 here if you need other then default behavior depending on your environment.
    2776 */
    2777 
    2778 /*
    2779 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2780 internally, like:
    2781 
    2782  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2783 
    2784 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2785 VmaAllocatorCreateInfo::pVulkanFunctions.
    2786 */
    2787 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2788 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2789 #endif
    2790 
    2791 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2792 //#define VMA_USE_STL_CONTAINERS 1
    2793 
    2794 /* Set this macro to 1 to make the library including and using STL containers:
    2795 std::pair, std::vector, std::list, std::unordered_map.
    2796 
    2797 Set it to 0 or undefined to make the library using its own implementation of
    2798 the containers.
    2799 */
    2800 #if VMA_USE_STL_CONTAINERS
    2801  #define VMA_USE_STL_VECTOR 1
    2802  #define VMA_USE_STL_UNORDERED_MAP 1
    2803  #define VMA_USE_STL_LIST 1
    2804 #endif
    2805 
    2806 #if VMA_USE_STL_VECTOR
    2807  #include <vector>
    2808 #endif
    2809 
    2810 #if VMA_USE_STL_UNORDERED_MAP
    2811  #include <unordered_map>
    2812 #endif
    2813 
    2814 #if VMA_USE_STL_LIST
    2815  #include <list>
    2816 #endif
    2817 
    2818 /*
    2819 Following headers are used in this CONFIGURATION section only, so feel free to
    2820 remove them if not needed.
    2821 */
    2822 #include <cassert> // for assert
    2823 #include <algorithm> // for min, max
    2824 #include <mutex> // for std::mutex
    2825 #include <atomic> // for std::atomic
    2826 
    2827 #ifndef VMA_NULL
    2828  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2829  #define VMA_NULL nullptr
    2830 #endif
    2831 
    2832 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2833 #include <cstdlib>
    2834 void *aligned_alloc(size_t alignment, size_t size)
    2835 {
    2836  // alignment must be >= sizeof(void*)
    2837  if(alignment < sizeof(void*))
    2838  {
    2839  alignment = sizeof(void*);
    2840  }
    2841 
    2842  return memalign(alignment, size);
    2843 }
    2844 #elif defined(__APPLE__) || defined(__ANDROID__)
    2845 #include <cstdlib>
    2846 void *aligned_alloc(size_t alignment, size_t size)
    2847 {
    2848  // alignment must be >= sizeof(void*)
    2849  if(alignment < sizeof(void*))
    2850  {
    2851  alignment = sizeof(void*);
    2852  }
    2853 
    2854  void *pointer;
    2855  if(posix_memalign(&pointer, alignment, size) == 0)
    2856  return pointer;
    2857  return VMA_NULL;
    2858 }
    2859 #endif
    2860 
    2861 // If your compiler is not compatible with C++11 and definition of
    2862 // aligned_alloc() function is missing, uncommeting following line may help:
    2863 
    2864 //#include <malloc.h>
    2865 
    2866 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2867 #ifndef VMA_ASSERT
    2868  #ifdef _DEBUG
    2869  #define VMA_ASSERT(expr) assert(expr)
    2870  #else
    2871  #define VMA_ASSERT(expr)
    2872  #endif
    2873 #endif
    2874 
    2875 // Assert that will be called very often, like inside data structures e.g. operator[].
    2876 // Making it non-empty can make program slow.
    2877 #ifndef VMA_HEAVY_ASSERT
    2878  #ifdef _DEBUG
    2879  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2880  #else
    2881  #define VMA_HEAVY_ASSERT(expr)
    2882  #endif
    2883 #endif
    2884 
    2885 #ifndef VMA_ALIGN_OF
    2886  #define VMA_ALIGN_OF(type) (__alignof(type))
    2887 #endif
    2888 
    2889 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2890  #if defined(_WIN32)
    2891  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2892  #else
    2893  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2894  #endif
    2895 #endif
    2896 
    2897 #ifndef VMA_SYSTEM_FREE
    2898  #if defined(_WIN32)
    2899  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2900  #else
    2901  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2902  #endif
    2903 #endif
    2904 
    2905 #ifndef VMA_MIN
    2906  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2907 #endif
    2908 
    2909 #ifndef VMA_MAX
    2910  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2911 #endif
    2912 
    2913 #ifndef VMA_SWAP
    2914  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2915 #endif
    2916 
    2917 #ifndef VMA_SORT
    2918  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_LOG
    2922  #define VMA_DEBUG_LOG(format, ...)
    2923  /*
    2924  #define VMA_DEBUG_LOG(format, ...) do { \
    2925  printf(format, __VA_ARGS__); \
    2926  printf("\n"); \
    2927  } while(false)
    2928  */
    2929 #endif
    2930 
    2931 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2932 #if VMA_STATS_STRING_ENABLED
    2933  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2934  {
    2935  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2936  }
    2937  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2938  {
    2939  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2940  }
    2941  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2942  {
    2943  snprintf(outStr, strLen, "%p", ptr);
    2944  }
    2945 #endif
    2946 
    2947 #ifndef VMA_MUTEX
    2948  class VmaMutex
    2949  {
    2950  public:
    2951  VmaMutex() { }
    2952  ~VmaMutex() { }
    2953  void Lock() { m_Mutex.lock(); }
    2954  void Unlock() { m_Mutex.unlock(); }
    2955  private:
    2956  std::mutex m_Mutex;
    2957  };
    2958  #define VMA_MUTEX VmaMutex
    2959 #endif
    2960 
    2961 /*
    2962 If providing your own implementation, you need to implement a subset of std::atomic:
    2963 
    2964 - Constructor(uint32_t desired)
    2965 - uint32_t load() const
    2966 - void store(uint32_t desired)
    2967 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2968 */
    2969 #ifndef VMA_ATOMIC_UINT32
    2970  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2971 #endif
    2972 
    2973 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2974 
    2978  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2979 #endif
    2980 
    2981 #ifndef VMA_DEBUG_ALIGNMENT
    2982 
    2986  #define VMA_DEBUG_ALIGNMENT (1)
    2987 #endif
    2988 
    2989 #ifndef VMA_DEBUG_MARGIN
    2990 
    2994  #define VMA_DEBUG_MARGIN (0)
    2995 #endif
    2996 
    2997 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2998 
    3002  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3003 #endif
    3004 
    3005 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3006 
    3011  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3012 #endif
    3013 
    3014 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3015 
    3019  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3020 #endif
    3021 
    3022 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3023 
    3027  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3028 #endif
    3029 
    3030 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3031  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3033 #endif
    3034 
    3035 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3036  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3038 #endif
    3039 
    3040 #ifndef VMA_CLASS_NO_COPY
    3041  #define VMA_CLASS_NO_COPY(className) \
    3042  private: \
    3043  className(const className&) = delete; \
    3044  className& operator=(const className&) = delete;
    3045 #endif
    3046 
    3047 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3048 
    3049 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3050 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3051 
    3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3053 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3054 
    3055 /*******************************************************************************
    3056 END OF CONFIGURATION
    3057 */
    3058 
    3059 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3060  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3061 
    3062 // Returns number of bits set to 1 in (v).
    3063 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3064 {
    3065  uint32_t c = v - ((v >> 1) & 0x55555555);
    3066  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3067  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3068  c = ((c >> 8) + c) & 0x00FF00FF;
    3069  c = ((c >> 16) + c) & 0x0000FFFF;
    3070  return c;
    3071 }
    3072 
    3073 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3074 // Use types like uint32_t, uint64_t as T.
    3075 template <typename T>
    3076 static inline T VmaAlignUp(T val, T align)
    3077 {
    3078  return (val + align - 1) / align * align;
    3079 }
    3080 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3081 // Use types like uint32_t, uint64_t as T.
    3082 template <typename T>
    3083 static inline T VmaAlignDown(T val, T align)
    3084 {
    3085  return val / align * align;
    3086 }
    3087 
    3088 // Division with mathematical rounding to nearest number.
    3089 template <typename T>
    3090 static inline T VmaRoundDiv(T x, T y)
    3091 {
    3092  return (x + (y / (T)2)) / y;
    3093 }
    3094 
    3095 /*
    3096 Returns true if given number is a power of two.
    3097 T must be unsigned integer number or signed integer but always nonnegative.
    3098 For 0 returns true.
    3099 */
    3100 template <typename T>
    3101 inline bool VmaIsPow2(T x)
    3102 {
    3103  return (x & (x-1)) == 0;
    3104 }
    3105 
    3106 // Returns smallest power of 2 greater or equal to v.
    3107 static inline uint32_t VmaNextPow2(uint32_t v)
    3108 {
    3109  v--;
    3110  v |= v >> 1;
    3111  v |= v >> 2;
    3112  v |= v >> 4;
    3113  v |= v >> 8;
    3114  v |= v >> 16;
    3115  v++;
    3116  return v;
    3117 }
    3118 static inline uint64_t VmaNextPow2(uint64_t v)
    3119 {
    3120  v--;
    3121  v |= v >> 1;
    3122  v |= v >> 2;
    3123  v |= v >> 4;
    3124  v |= v >> 8;
    3125  v |= v >> 16;
    3126  v |= v >> 32;
    3127  v++;
    3128  return v;
    3129 }
    3130 
    3131 // Returns largest power of 2 less or equal to v.
    3132 static inline uint32_t VmaPrevPow2(uint32_t v)
    3133 {
    3134  v |= v >> 1;
    3135  v |= v >> 2;
    3136  v |= v >> 4;
    3137  v |= v >> 8;
    3138  v |= v >> 16;
    3139  v = v ^ (v >> 1);
    3140  return v;
    3141 }
    3142 static inline uint64_t VmaPrevPow2(uint64_t v)
    3143 {
    3144  v |= v >> 1;
    3145  v |= v >> 2;
    3146  v |= v >> 4;
    3147  v |= v >> 8;
    3148  v |= v >> 16;
    3149  v |= v >> 32;
    3150  v = v ^ (v >> 1);
    3151  return v;
    3152 }
    3153 
    3154 static inline bool VmaStrIsEmpty(const char* pStr)
    3155 {
    3156  return pStr == VMA_NULL || *pStr == '\0';
    3157 }
    3158 
    3159 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3160 {
    3161  switch(algorithm)
    3162  {
    3164  return "Linear";
    3166  return "Buddy";
    3167  case 0:
    3168  return "Default";
    3169  default:
    3170  VMA_ASSERT(0);
    3171  return "";
    3172  }
    3173 }
    3174 
    3175 #ifndef VMA_SORT
    3176 
    3177 template<typename Iterator, typename Compare>
    3178 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3179 {
    3180  Iterator centerValue = end; --centerValue;
    3181  Iterator insertIndex = beg;
    3182  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3183  {
    3184  if(cmp(*memTypeIndex, *centerValue))
    3185  {
    3186  if(insertIndex != memTypeIndex)
    3187  {
    3188  VMA_SWAP(*memTypeIndex, *insertIndex);
    3189  }
    3190  ++insertIndex;
    3191  }
    3192  }
    3193  if(insertIndex != centerValue)
    3194  {
    3195  VMA_SWAP(*insertIndex, *centerValue);
    3196  }
    3197  return insertIndex;
    3198 }
    3199 
    3200 template<typename Iterator, typename Compare>
    3201 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3202 {
    3203  if(beg < end)
    3204  {
    3205  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3206  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3207  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3208  }
    3209 }
    3210 
    3211 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3212 
    3213 #endif // #ifndef VMA_SORT
    3214 
    3215 /*
    3216 Returns true if two memory blocks occupy overlapping pages.
    3217 ResourceA must be in less memory offset than ResourceB.
    3218 
    3219 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3220 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3221 */
    3222 static inline bool VmaBlocksOnSamePage(
    3223  VkDeviceSize resourceAOffset,
    3224  VkDeviceSize resourceASize,
    3225  VkDeviceSize resourceBOffset,
    3226  VkDeviceSize pageSize)
    3227 {
    3228  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3229  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3230  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3231  VkDeviceSize resourceBStart = resourceBOffset;
    3232  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3233  return resourceAEndPage == resourceBStartPage;
    3234 }
    3235 
    3236 enum VmaSuballocationType
    3237 {
    3238  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3239  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3240  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3241  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3242  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3243  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3244  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3245 };
    3246 
    3247 /*
    3248 Returns true if given suballocation types could conflict and must respect
    3249 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3250 or linear image and another one is optimal image. If type is unknown, behave
    3251 conservatively.
    3252 */
    3253 static inline bool VmaIsBufferImageGranularityConflict(
    3254  VmaSuballocationType suballocType1,
    3255  VmaSuballocationType suballocType2)
    3256 {
    3257  if(suballocType1 > suballocType2)
    3258  {
    3259  VMA_SWAP(suballocType1, suballocType2);
    3260  }
    3261 
    3262  switch(suballocType1)
    3263  {
    3264  case VMA_SUBALLOCATION_TYPE_FREE:
    3265  return false;
    3266  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3267  return true;
    3268  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3269  return
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3272  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3273  return
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3276  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3277  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3278  return
    3279  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3280  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3281  return false;
    3282  default:
    3283  VMA_ASSERT(0);
    3284  return true;
    3285  }
    3286 }
    3287 
    3288 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3289 {
    3290  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3291  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3292  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3293  {
    3294  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3295  }
    3296 }
    3297 
    3298 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3299 {
    3300  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3301  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3302  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3303  {
    3304  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3305  {
    3306  return false;
    3307  }
    3308  }
    3309  return true;
    3310 }
    3311 
    3312 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3313 struct VmaMutexLock
    3314 {
    3315  VMA_CLASS_NO_COPY(VmaMutexLock)
    3316 public:
    3317  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3318  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3319  {
    3320  if(m_pMutex)
    3321  {
    3322  m_pMutex->Lock();
    3323  }
    3324  }
    3325 
    3326  ~VmaMutexLock()
    3327  {
    3328  if(m_pMutex)
    3329  {
    3330  m_pMutex->Unlock();
    3331  }
    3332  }
    3333 
    3334 private:
    3335  VMA_MUTEX* m_pMutex;
    3336 };
    3337 
    3338 #if VMA_DEBUG_GLOBAL_MUTEX
    3339  static VMA_MUTEX gDebugGlobalMutex;
    3340  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3341 #else
    3342  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3343 #endif
    3344 
    3345 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3346 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3347 
    3348 /*
    3349 Performs binary search and returns iterator to first element that is greater or
    3350 equal to (key), according to comparison (cmp).
    3351 
    3352 Cmp should return true if first argument is less than second argument.
    3353 
    3354 Returned value is the found element, if present in the collection or place where
    3355 new element with value (key) should be inserted.
    3356 */
    3357 template <typename CmpLess, typename IterT, typename KeyT>
    3358 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3359 {
    3360  size_t down = 0, up = (end - beg);
    3361  while(down < up)
    3362  {
    3363  const size_t mid = (down + up) / 2;
    3364  if(cmp(*(beg+mid), key))
    3365  {
    3366  down = mid + 1;
    3367  }
    3368  else
    3369  {
    3370  up = mid;
    3371  }
    3372  }
    3373  return beg + down;
    3374 }
    3375 
    3377 // Memory allocation
    3378 
    3379 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3380 {
    3381  if((pAllocationCallbacks != VMA_NULL) &&
    3382  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3383  {
    3384  return (*pAllocationCallbacks->pfnAllocation)(
    3385  pAllocationCallbacks->pUserData,
    3386  size,
    3387  alignment,
    3388  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3389  }
    3390  else
    3391  {
    3392  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3393  }
    3394 }
    3395 
    3396 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3397 {
    3398  if((pAllocationCallbacks != VMA_NULL) &&
    3399  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3400  {
    3401  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3402  }
    3403  else
    3404  {
    3405  VMA_SYSTEM_FREE(ptr);
    3406  }
    3407 }
    3408 
    3409 template<typename T>
    3410 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3411 {
    3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3413 }
    3414 
    3415 template<typename T>
    3416 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3417 {
    3418  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3419 }
    3420 
    3421 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3422 
    3423 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3424 
    3425 template<typename T>
    3426 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3427 {
    3428  ptr->~T();
    3429  VmaFree(pAllocationCallbacks, ptr);
    3430 }
    3431 
    3432 template<typename T>
    3433 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3434 {
    3435  if(ptr != VMA_NULL)
    3436  {
    3437  for(size_t i = count; i--; )
    3438  {
    3439  ptr[i].~T();
    3440  }
    3441  VmaFree(pAllocationCallbacks, ptr);
    3442  }
    3443 }
    3444 
    3445 // STL-compatible allocator.
    3446 template<typename T>
    3447 class VmaStlAllocator
    3448 {
    3449 public:
    3450  const VkAllocationCallbacks* const m_pCallbacks;
    3451  typedef T value_type;
    3452 
    3453  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3454  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3455 
    3456  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3457  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3458 
    3459  template<typename U>
    3460  bool operator==(const VmaStlAllocator<U>& rhs) const
    3461  {
    3462  return m_pCallbacks == rhs.m_pCallbacks;
    3463  }
    3464  template<typename U>
    3465  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3466  {
    3467  return m_pCallbacks != rhs.m_pCallbacks;
    3468  }
    3469 
    3470  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3471 };
    3472 
    3473 #if VMA_USE_STL_VECTOR
    3474 
    3475 #define VmaVector std::vector
    3476 
    3477 template<typename T, typename allocatorT>
    3478 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3479 {
    3480  vec.insert(vec.begin() + index, item);
    3481 }
    3482 
    3483 template<typename T, typename allocatorT>
    3484 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3485 {
    3486  vec.erase(vec.begin() + index);
    3487 }
    3488 
    3489 #else // #if VMA_USE_STL_VECTOR
    3490 
    3491 /* Class with interface compatible with subset of std::vector.
    3492 T must be POD because constructors and destructors are not called and memcpy is
    3493 used for these objects. */
    3494 template<typename T, typename AllocatorT>
    3495 class VmaVector
    3496 {
    3497 public:
    3498  typedef T value_type;
    3499 
    3500  VmaVector(const AllocatorT& allocator) :
    3501  m_Allocator(allocator),
    3502  m_pArray(VMA_NULL),
    3503  m_Count(0),
    3504  m_Capacity(0)
    3505  {
    3506  }
    3507 
    3508  VmaVector(size_t count, const AllocatorT& allocator) :
    3509  m_Allocator(allocator),
    3510  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3511  m_Count(count),
    3512  m_Capacity(count)
    3513  {
    3514  }
    3515 
    3516  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3517  m_Allocator(src.m_Allocator),
    3518  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3519  m_Count(src.m_Count),
    3520  m_Capacity(src.m_Count)
    3521  {
    3522  if(m_Count != 0)
    3523  {
    3524  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3525  }
    3526  }
    3527 
    3528  ~VmaVector()
    3529  {
    3530  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3531  }
    3532 
    3533  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3534  {
    3535  if(&rhs != this)
    3536  {
    3537  resize(rhs.m_Count);
    3538  if(m_Count != 0)
    3539  {
    3540  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3541  }
    3542  }
    3543  return *this;
    3544  }
    3545 
    3546  bool empty() const { return m_Count == 0; }
    3547  size_t size() const { return m_Count; }
    3548  T* data() { return m_pArray; }
    3549  const T* data() const { return m_pArray; }
    3550 
    3551  T& operator[](size_t index)
    3552  {
    3553  VMA_HEAVY_ASSERT(index < m_Count);
    3554  return m_pArray[index];
    3555  }
    3556  const T& operator[](size_t index) const
    3557  {
    3558  VMA_HEAVY_ASSERT(index < m_Count);
    3559  return m_pArray[index];
    3560  }
    3561 
    3562  T& front()
    3563  {
    3564  VMA_HEAVY_ASSERT(m_Count > 0);
    3565  return m_pArray[0];
    3566  }
    3567  const T& front() const
    3568  {
    3569  VMA_HEAVY_ASSERT(m_Count > 0);
    3570  return m_pArray[0];
    3571  }
    3572  T& back()
    3573  {
    3574  VMA_HEAVY_ASSERT(m_Count > 0);
    3575  return m_pArray[m_Count - 1];
    3576  }
    3577  const T& back() const
    3578  {
    3579  VMA_HEAVY_ASSERT(m_Count > 0);
    3580  return m_pArray[m_Count - 1];
    3581  }
    3582 
    3583  void reserve(size_t newCapacity, bool freeMemory = false)
    3584  {
    3585  newCapacity = VMA_MAX(newCapacity, m_Count);
    3586 
    3587  if((newCapacity < m_Capacity) && !freeMemory)
    3588  {
    3589  newCapacity = m_Capacity;
    3590  }
    3591 
    3592  if(newCapacity != m_Capacity)
    3593  {
    3594  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3595  if(m_Count != 0)
    3596  {
    3597  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3598  }
    3599  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3600  m_Capacity = newCapacity;
    3601  m_pArray = newArray;
    3602  }
    3603  }
    3604 
    3605  void resize(size_t newCount, bool freeMemory = false)
    3606  {
    3607  size_t newCapacity = m_Capacity;
    3608  if(newCount > m_Capacity)
    3609  {
    3610  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3611  }
    3612  else if(freeMemory)
    3613  {
    3614  newCapacity = newCount;
    3615  }
    3616 
    3617  if(newCapacity != m_Capacity)
    3618  {
    3619  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3620  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3621  if(elementsToCopy != 0)
    3622  {
    3623  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3624  }
    3625  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3626  m_Capacity = newCapacity;
    3627  m_pArray = newArray;
    3628  }
    3629 
    3630  m_Count = newCount;
    3631  }
    3632 
    3633  void clear(bool freeMemory = false)
    3634  {
    3635  resize(0, freeMemory);
    3636  }
    3637 
    3638  void insert(size_t index, const T& src)
    3639  {
    3640  VMA_HEAVY_ASSERT(index <= m_Count);
    3641  const size_t oldCount = size();
    3642  resize(oldCount + 1);
    3643  if(index < oldCount)
    3644  {
    3645  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3646  }
    3647  m_pArray[index] = src;
    3648  }
    3649 
    3650  void remove(size_t index)
    3651  {
    3652  VMA_HEAVY_ASSERT(index < m_Count);
    3653  const size_t oldCount = size();
    3654  if(index < oldCount - 1)
    3655  {
    3656  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3657  }
    3658  resize(oldCount - 1);
    3659  }
    3660 
    3661  void push_back(const T& src)
    3662  {
    3663  const size_t newIndex = size();
    3664  resize(newIndex + 1);
    3665  m_pArray[newIndex] = src;
    3666  }
    3667 
    3668  void pop_back()
    3669  {
    3670  VMA_HEAVY_ASSERT(m_Count > 0);
    3671  resize(size() - 1);
    3672  }
    3673 
    3674  void push_front(const T& src)
    3675  {
    3676  insert(0, src);
    3677  }
    3678 
    3679  void pop_front()
    3680  {
    3681  VMA_HEAVY_ASSERT(m_Count > 0);
    3682  remove(0);
    3683  }
    3684 
    3685  typedef T* iterator;
    3686 
    3687  iterator begin() { return m_pArray; }
    3688  iterator end() { return m_pArray + m_Count; }
    3689 
    3690 private:
    3691  AllocatorT m_Allocator;
    3692  T* m_pArray;
    3693  size_t m_Count;
    3694  size_t m_Capacity;
    3695 };
    3696 
    3697 template<typename T, typename allocatorT>
    3698 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3699 {
    3700  vec.insert(index, item);
    3701 }
    3702 
    3703 template<typename T, typename allocatorT>
    3704 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3705 {
    3706  vec.remove(index);
    3707 }
    3708 
    3709 #endif // #if VMA_USE_STL_VECTOR
    3710 
    3711 template<typename CmpLess, typename VectorT>
    3712 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3713 {
    3714  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3715  vector.data(),
    3716  vector.data() + vector.size(),
    3717  value,
    3718  CmpLess()) - vector.data();
    3719  VmaVectorInsert(vector, indexToInsert, value);
    3720  return indexToInsert;
    3721 }
    3722 
    3723 template<typename CmpLess, typename VectorT>
    3724 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3725 {
    3726  CmpLess comparator;
    3727  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3728  vector.begin(),
    3729  vector.end(),
    3730  value,
    3731  comparator);
    3732  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3733  {
    3734  size_t indexToRemove = it - vector.begin();
    3735  VmaVectorRemove(vector, indexToRemove);
    3736  return true;
    3737  }
    3738  return false;
    3739 }
    3740 
    3741 template<typename CmpLess, typename IterT, typename KeyT>
    3742 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3743 {
    3744  CmpLess comparator;
    3745  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3746  beg, end, value, comparator);
    3747  if(it == end ||
    3748  (!comparator(*it, value) && !comparator(value, *it)))
    3749  {
    3750  return it;
    3751  }
    3752  return end;
    3753 }
    3754 
    3756 // class VmaPoolAllocator
    3757 
    3758 /*
    3759 Allocator for objects of type T using a list of arrays (pools) to speed up
    3760 allocation. Number of elements that can be allocated is not bounded because
    3761 allocator can create multiple blocks.
    3762 */
    3763 template<typename T>
    3764 class VmaPoolAllocator
    3765 {
    3766  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3767 public:
    3768  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3769  ~VmaPoolAllocator();
    3770  void Clear();
    3771  T* Alloc();
    3772  void Free(T* ptr);
    3773 
    3774 private:
    3775  union Item
    3776  {
    3777  uint32_t NextFreeIndex;
    3778  T Value;
    3779  };
    3780 
    3781  struct ItemBlock
    3782  {
    3783  Item* pItems;
    3784  uint32_t FirstFreeIndex;
    3785  };
    3786 
    3787  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3788  size_t m_ItemsPerBlock;
    3789  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3790 
    3791  ItemBlock& CreateNewBlock();
    3792 };
    3793 
    3794 template<typename T>
    3795 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3796  m_pAllocationCallbacks(pAllocationCallbacks),
    3797  m_ItemsPerBlock(itemsPerBlock),
    3798  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3799 {
    3800  VMA_ASSERT(itemsPerBlock > 0);
    3801 }
    3802 
    3803 template<typename T>
    3804 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3805 {
    3806  Clear();
    3807 }
    3808 
    3809 template<typename T>
    3810 void VmaPoolAllocator<T>::Clear()
    3811 {
    3812  for(size_t i = m_ItemBlocks.size(); i--; )
    3813  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3814  m_ItemBlocks.clear();
    3815 }
    3816 
    3817 template<typename T>
    3818 T* VmaPoolAllocator<T>::Alloc()
    3819 {
    3820  for(size_t i = m_ItemBlocks.size(); i--; )
    3821  {
    3822  ItemBlock& block = m_ItemBlocks[i];
    3823  // This block has some free items: Use first one.
    3824  if(block.FirstFreeIndex != UINT32_MAX)
    3825  {
    3826  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3827  block.FirstFreeIndex = pItem->NextFreeIndex;
    3828  return &pItem->Value;
    3829  }
    3830  }
    3831 
    3832  // No block has free item: Create new one and use it.
    3833  ItemBlock& newBlock = CreateNewBlock();
    3834  Item* const pItem = &newBlock.pItems[0];
    3835  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3836  return &pItem->Value;
    3837 }
    3838 
    3839 template<typename T>
    3840 void VmaPoolAllocator<T>::Free(T* ptr)
    3841 {
    3842  // Search all memory blocks to find ptr.
    3843  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3844  {
    3845  ItemBlock& block = m_ItemBlocks[i];
    3846 
    3847  // Casting to union.
    3848  Item* pItemPtr;
    3849  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3850 
    3851  // Check if pItemPtr is in address range of this block.
    3852  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3853  {
    3854  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3855  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3856  block.FirstFreeIndex = index;
    3857  return;
    3858  }
    3859  }
    3860  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3861 }
    3862 
    3863 template<typename T>
    3864 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3865 {
    3866  ItemBlock newBlock = {
    3867  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3868 
    3869  m_ItemBlocks.push_back(newBlock);
    3870 
    3871  // Setup singly-linked list of all free items in this block.
    3872  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3873  newBlock.pItems[i].NextFreeIndex = i + 1;
    3874  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3875  return m_ItemBlocks.back();
    3876 }
    3877 
    3879 // class VmaRawList, VmaList
    3880 
    3881 #if VMA_USE_STL_LIST
    3882 
    3883 #define VmaList std::list
    3884 
    3885 #else // #if VMA_USE_STL_LIST
    3886 
    3887 template<typename T>
    3888 struct VmaListItem
    3889 {
    3890  VmaListItem* pPrev;
    3891  VmaListItem* pNext;
    3892  T Value;
    3893 };
    3894 
    3895 // Doubly linked list.
    3896 template<typename T>
    3897 class VmaRawList
    3898 {
    3899  VMA_CLASS_NO_COPY(VmaRawList)
    3900 public:
    3901  typedef VmaListItem<T> ItemType;
    3902 
    3903  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3904  ~VmaRawList();
    3905  void Clear();
    3906 
    3907  size_t GetCount() const { return m_Count; }
    3908  bool IsEmpty() const { return m_Count == 0; }
    3909 
    3910  ItemType* Front() { return m_pFront; }
    3911  const ItemType* Front() const { return m_pFront; }
    3912  ItemType* Back() { return m_pBack; }
    3913  const ItemType* Back() const { return m_pBack; }
    3914 
    3915  ItemType* PushBack();
    3916  ItemType* PushFront();
    3917  ItemType* PushBack(const T& value);
    3918  ItemType* PushFront(const T& value);
    3919  void PopBack();
    3920  void PopFront();
    3921 
    3922  // Item can be null - it means PushBack.
    3923  ItemType* InsertBefore(ItemType* pItem);
    3924  // Item can be null - it means PushFront.
    3925  ItemType* InsertAfter(ItemType* pItem);
    3926 
    3927  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3928  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3929 
    3930  void Remove(ItemType* pItem);
    3931 
    3932 private:
    3933  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3934  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3935  ItemType* m_pFront;
    3936  ItemType* m_pBack;
    3937  size_t m_Count;
    3938 };
    3939 
    3940 template<typename T>
    3941 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3942  m_pAllocationCallbacks(pAllocationCallbacks),
    3943  m_ItemAllocator(pAllocationCallbacks, 128),
    3944  m_pFront(VMA_NULL),
    3945  m_pBack(VMA_NULL),
    3946  m_Count(0)
    3947 {
    3948 }
    3949 
    3950 template<typename T>
    3951 VmaRawList<T>::~VmaRawList()
    3952 {
    3953  // Intentionally not calling Clear, because that would be unnecessary
    3954  // computations to return all items to m_ItemAllocator as free.
    3955 }
    3956 
    3957 template<typename T>
    3958 void VmaRawList<T>::Clear()
    3959 {
    3960  if(IsEmpty() == false)
    3961  {
    3962  ItemType* pItem = m_pBack;
    3963  while(pItem != VMA_NULL)
    3964  {
    3965  ItemType* const pPrevItem = pItem->pPrev;
    3966  m_ItemAllocator.Free(pItem);
    3967  pItem = pPrevItem;
    3968  }
    3969  m_pFront = VMA_NULL;
    3970  m_pBack = VMA_NULL;
    3971  m_Count = 0;
    3972  }
    3973 }
    3974 
    3975 template<typename T>
    3976 VmaListItem<T>* VmaRawList<T>::PushBack()
    3977 {
    3978  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3979  pNewItem->pNext = VMA_NULL;
    3980  if(IsEmpty())
    3981  {
    3982  pNewItem->pPrev = VMA_NULL;
    3983  m_pFront = pNewItem;
    3984  m_pBack = pNewItem;
    3985  m_Count = 1;
    3986  }
    3987  else
    3988  {
    3989  pNewItem->pPrev = m_pBack;
    3990  m_pBack->pNext = pNewItem;
    3991  m_pBack = pNewItem;
    3992  ++m_Count;
    3993  }
    3994  return pNewItem;
    3995 }
    3996 
    3997 template<typename T>
    3998 VmaListItem<T>* VmaRawList<T>::PushFront()
    3999 {
    4000  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4001  pNewItem->pPrev = VMA_NULL;
    4002  if(IsEmpty())
    4003  {
    4004  pNewItem->pNext = VMA_NULL;
    4005  m_pFront = pNewItem;
    4006  m_pBack = pNewItem;
    4007  m_Count = 1;
    4008  }
    4009  else
    4010  {
    4011  pNewItem->pNext = m_pFront;
    4012  m_pFront->pPrev = pNewItem;
    4013  m_pFront = pNewItem;
    4014  ++m_Count;
    4015  }
    4016  return pNewItem;
    4017 }
    4018 
    4019 template<typename T>
    4020 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4021 {
    4022  ItemType* const pNewItem = PushBack();
    4023  pNewItem->Value = value;
    4024  return pNewItem;
    4025 }
    4026 
    4027 template<typename T>
    4028 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4029 {
    4030  ItemType* const pNewItem = PushFront();
    4031  pNewItem->Value = value;
    4032  return pNewItem;
    4033 }
    4034 
    4035 template<typename T>
    4036 void VmaRawList<T>::PopBack()
    4037 {
    4038  VMA_HEAVY_ASSERT(m_Count > 0);
    4039  ItemType* const pBackItem = m_pBack;
    4040  ItemType* const pPrevItem = pBackItem->pPrev;
    4041  if(pPrevItem != VMA_NULL)
    4042  {
    4043  pPrevItem->pNext = VMA_NULL;
    4044  }
    4045  m_pBack = pPrevItem;
    4046  m_ItemAllocator.Free(pBackItem);
    4047  --m_Count;
    4048 }
    4049 
    4050 template<typename T>
    4051 void VmaRawList<T>::PopFront()
    4052 {
    4053  VMA_HEAVY_ASSERT(m_Count > 0);
    4054  ItemType* const pFrontItem = m_pFront;
    4055  ItemType* const pNextItem = pFrontItem->pNext;
    4056  if(pNextItem != VMA_NULL)
    4057  {
    4058  pNextItem->pPrev = VMA_NULL;
    4059  }
    4060  m_pFront = pNextItem;
    4061  m_ItemAllocator.Free(pFrontItem);
    4062  --m_Count;
    4063 }
    4064 
    4065 template<typename T>
    4066 void VmaRawList<T>::Remove(ItemType* pItem)
    4067 {
    4068  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4069  VMA_HEAVY_ASSERT(m_Count > 0);
    4070 
    4071  if(pItem->pPrev != VMA_NULL)
    4072  {
    4073  pItem->pPrev->pNext = pItem->pNext;
    4074  }
    4075  else
    4076  {
    4077  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4078  m_pFront = pItem->pNext;
    4079  }
    4080 
    4081  if(pItem->pNext != VMA_NULL)
    4082  {
    4083  pItem->pNext->pPrev = pItem->pPrev;
    4084  }
    4085  else
    4086  {
    4087  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4088  m_pBack = pItem->pPrev;
    4089  }
    4090 
    4091  m_ItemAllocator.Free(pItem);
    4092  --m_Count;
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4097 {
    4098  if(pItem != VMA_NULL)
    4099  {
    4100  ItemType* const prevItem = pItem->pPrev;
    4101  ItemType* const newItem = m_ItemAllocator.Alloc();
    4102  newItem->pPrev = prevItem;
    4103  newItem->pNext = pItem;
    4104  pItem->pPrev = newItem;
    4105  if(prevItem != VMA_NULL)
    4106  {
    4107  prevItem->pNext = newItem;
    4108  }
    4109  else
    4110  {
    4111  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4112  m_pFront = newItem;
    4113  }
    4114  ++m_Count;
    4115  return newItem;
    4116  }
    4117  else
    4118  return PushBack();
    4119 }
    4120 
    4121 template<typename T>
    4122 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4123 {
    4124  if(pItem != VMA_NULL)
    4125  {
    4126  ItemType* const nextItem = pItem->pNext;
    4127  ItemType* const newItem = m_ItemAllocator.Alloc();
    4128  newItem->pNext = nextItem;
    4129  newItem->pPrev = pItem;
    4130  pItem->pNext = newItem;
    4131  if(nextItem != VMA_NULL)
    4132  {
    4133  nextItem->pPrev = newItem;
    4134  }
    4135  else
    4136  {
    4137  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4138  m_pBack = newItem;
    4139  }
    4140  ++m_Count;
    4141  return newItem;
    4142  }
    4143  else
    4144  return PushFront();
    4145 }
    4146 
    4147 template<typename T>
    4148 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4149 {
    4150  ItemType* const newItem = InsertBefore(pItem);
    4151  newItem->Value = value;
    4152  return newItem;
    4153 }
    4154 
    4155 template<typename T>
    4156 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4157 {
    4158  ItemType* const newItem = InsertAfter(pItem);
    4159  newItem->Value = value;
    4160  return newItem;
    4161 }
    4162 
    4163 template<typename T, typename AllocatorT>
    4164 class VmaList
    4165 {
    4166  VMA_CLASS_NO_COPY(VmaList)
    4167 public:
    4168  class iterator
    4169  {
    4170  public:
    4171  iterator() :
    4172  m_pList(VMA_NULL),
    4173  m_pItem(VMA_NULL)
    4174  {
    4175  }
    4176 
    4177  T& operator*() const
    4178  {
    4179  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4180  return m_pItem->Value;
    4181  }
    4182  T* operator->() const
    4183  {
    4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4185  return &m_pItem->Value;
    4186  }
    4187 
    4188  iterator& operator++()
    4189  {
    4190  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4191  m_pItem = m_pItem->pNext;
    4192  return *this;
    4193  }
    4194  iterator& operator--()
    4195  {
    4196  if(m_pItem != VMA_NULL)
    4197  {
    4198  m_pItem = m_pItem->pPrev;
    4199  }
    4200  else
    4201  {
    4202  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4203  m_pItem = m_pList->Back();
    4204  }
    4205  return *this;
    4206  }
    4207 
    4208  iterator operator++(int)
    4209  {
    4210  iterator result = *this;
    4211  ++*this;
    4212  return result;
    4213  }
    4214  iterator operator--(int)
    4215  {
    4216  iterator result = *this;
    4217  --*this;
    4218  return result;
    4219  }
    4220 
    4221  bool operator==(const iterator& rhs) const
    4222  {
    4223  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4224  return m_pItem == rhs.m_pItem;
    4225  }
    4226  bool operator!=(const iterator& rhs) const
    4227  {
    4228  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4229  return m_pItem != rhs.m_pItem;
    4230  }
    4231 
    4232  private:
    4233  VmaRawList<T>* m_pList;
    4234  VmaListItem<T>* m_pItem;
    4235 
    4236  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4237  m_pList(pList),
    4238  m_pItem(pItem)
    4239  {
    4240  }
    4241 
    4242  friend class VmaList<T, AllocatorT>;
    4243  };
    4244 
    4245  class const_iterator
    4246  {
    4247  public:
    4248  const_iterator() :
    4249  m_pList(VMA_NULL),
    4250  m_pItem(VMA_NULL)
    4251  {
    4252  }
    4253 
    4254  const_iterator(const iterator& src) :
    4255  m_pList(src.m_pList),
    4256  m_pItem(src.m_pItem)
    4257  {
    4258  }
    4259 
    4260  const T& operator*() const
    4261  {
    4262  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4263  return m_pItem->Value;
    4264  }
    4265  const T* operator->() const
    4266  {
    4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4268  return &m_pItem->Value;
    4269  }
    4270 
    4271  const_iterator& operator++()
    4272  {
    4273  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4274  m_pItem = m_pItem->pNext;
    4275  return *this;
    4276  }
    4277  const_iterator& operator--()
    4278  {
    4279  if(m_pItem != VMA_NULL)
    4280  {
    4281  m_pItem = m_pItem->pPrev;
    4282  }
    4283  else
    4284  {
    4285  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4286  m_pItem = m_pList->Back();
    4287  }
    4288  return *this;
    4289  }
    4290 
    4291  const_iterator operator++(int)
    4292  {
    4293  const_iterator result = *this;
    4294  ++*this;
    4295  return result;
    4296  }
    4297  const_iterator operator--(int)
    4298  {
    4299  const_iterator result = *this;
    4300  --*this;
    4301  return result;
    4302  }
    4303 
    4304  bool operator==(const const_iterator& rhs) const
    4305  {
    4306  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4307  return m_pItem == rhs.m_pItem;
    4308  }
    4309  bool operator!=(const const_iterator& rhs) const
    4310  {
    4311  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4312  return m_pItem != rhs.m_pItem;
    4313  }
    4314 
    4315  private:
    4316  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4317  m_pList(pList),
    4318  m_pItem(pItem)
    4319  {
    4320  }
    4321 
    4322  const VmaRawList<T>* m_pList;
    4323  const VmaListItem<T>* m_pItem;
    4324 
    4325  friend class VmaList<T, AllocatorT>;
    4326  };
    4327 
    4328  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4329 
    4330  bool empty() const { return m_RawList.IsEmpty(); }
    4331  size_t size() const { return m_RawList.GetCount(); }
    4332 
    4333  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4334  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4335 
    4336  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4337  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4338 
    4339  void clear() { m_RawList.Clear(); }
    4340  void push_back(const T& value) { m_RawList.PushBack(value); }
    4341  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4342  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4343 
    4344 private:
    4345  VmaRawList<T> m_RawList;
    4346 };
    4347 
    4348 #endif // #if VMA_USE_STL_LIST
    4349 
    4351 // class VmaMap
    4352 
    4353 // Unused in this version.
    4354 #if 0
    4355 
    4356 #if VMA_USE_STL_UNORDERED_MAP
    4357 
    4358 #define VmaPair std::pair
    4359 
    4360 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4361  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4362 
    4363 #else // #if VMA_USE_STL_UNORDERED_MAP
    4364 
    4365 template<typename T1, typename T2>
    4366 struct VmaPair
    4367 {
    4368  T1 first;
    4369  T2 second;
    4370 
    4371  VmaPair() : first(), second() { }
    4372  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4373 };
    4374 
    4375 /* Class compatible with subset of interface of std::unordered_map.
    4376 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4377 */
    4378 template<typename KeyT, typename ValueT>
    4379 class VmaMap
    4380 {
    4381 public:
    4382  typedef VmaPair<KeyT, ValueT> PairType;
    4383  typedef PairType* iterator;
    4384 
    4385  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4386 
    4387  iterator begin() { return m_Vector.begin(); }
    4388  iterator end() { return m_Vector.end(); }
    4389 
    4390  void insert(const PairType& pair);
    4391  iterator find(const KeyT& key);
    4392  void erase(iterator it);
    4393 
    4394 private:
    4395  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4396 };
    4397 
    4398 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4399 
    4400 template<typename FirstT, typename SecondT>
    4401 struct VmaPairFirstLess
    4402 {
    4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4404  {
    4405  return lhs.first < rhs.first;
    4406  }
    4407  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4408  {
    4409  return lhs.first < rhsFirst;
    4410  }
    4411 };
    4412 
    4413 template<typename KeyT, typename ValueT>
    4414 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4415 {
    4416  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4417  m_Vector.data(),
    4418  m_Vector.data() + m_Vector.size(),
    4419  pair,
    4420  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4421  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4422 }
    4423 
    4424 template<typename KeyT, typename ValueT>
    4425 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4426 {
    4427  PairType* it = VmaBinaryFindFirstNotLess(
    4428  m_Vector.data(),
    4429  m_Vector.data() + m_Vector.size(),
    4430  key,
    4431  VmaPairFirstLess<KeyT, ValueT>());
    4432  if((it != m_Vector.end()) && (it->first == key))
    4433  {
    4434  return it;
    4435  }
    4436  else
    4437  {
    4438  return m_Vector.end();
    4439  }
    4440 }
    4441 
    4442 template<typename KeyT, typename ValueT>
    4443 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4444 {
    4445  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4446 }
    4447 
    4448 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4449 
    4450 #endif // #if 0
    4451 
    4453 
    4454 class VmaDeviceMemoryBlock;
    4455 
    4456 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4457 
    4458 struct VmaAllocation_T
    4459 {
    4460  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4461 private:
    4462  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4463 
    4464  enum FLAGS
    4465  {
    4466  FLAG_USER_DATA_STRING = 0x01,
    4467  };
    4468 
    4469 public:
    4470  enum ALLOCATION_TYPE
    4471  {
    4472  ALLOCATION_TYPE_NONE,
    4473  ALLOCATION_TYPE_BLOCK,
    4474  ALLOCATION_TYPE_DEDICATED,
    4475  };
    4476 
    4477  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4478  m_Alignment(1),
    4479  m_Size(0),
    4480  m_pUserData(VMA_NULL),
    4481  m_LastUseFrameIndex(currentFrameIndex),
    4482  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4483  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4484  m_MapCount(0),
    4485  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4486  {
    4487 #if VMA_STATS_STRING_ENABLED
    4488  m_CreationFrameIndex = currentFrameIndex;
    4489  m_BufferImageUsage = 0;
    4490 #endif
    4491  }
    4492 
    4493  ~VmaAllocation_T()
    4494  {
    4495  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4496 
    4497  // Check if owned string was freed.
    4498  VMA_ASSERT(m_pUserData == VMA_NULL);
    4499  }
    4500 
    4501  void InitBlockAllocation(
    4502  VmaPool hPool,
    4503  VmaDeviceMemoryBlock* block,
    4504  VkDeviceSize offset,
    4505  VkDeviceSize alignment,
    4506  VkDeviceSize size,
    4507  VmaSuballocationType suballocationType,
    4508  bool mapped,
    4509  bool canBecomeLost)
    4510  {
    4511  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4512  VMA_ASSERT(block != VMA_NULL);
    4513  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4514  m_Alignment = alignment;
    4515  m_Size = size;
    4516  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4517  m_SuballocationType = (uint8_t)suballocationType;
    4518  m_BlockAllocation.m_hPool = hPool;
    4519  m_BlockAllocation.m_Block = block;
    4520  m_BlockAllocation.m_Offset = offset;
    4521  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4522  }
    4523 
    4524  void InitLost()
    4525  {
    4526  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4527  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4528  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4529  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4530  m_BlockAllocation.m_Block = VMA_NULL;
    4531  m_BlockAllocation.m_Offset = 0;
    4532  m_BlockAllocation.m_CanBecomeLost = true;
    4533  }
    4534 
    4535  void ChangeBlockAllocation(
    4536  VmaAllocator hAllocator,
    4537  VmaDeviceMemoryBlock* block,
    4538  VkDeviceSize offset);
    4539 
    4540  void ChangeSize(VkDeviceSize newSize);
    4541 
    4542  // pMappedData not null means allocation is created with MAPPED flag.
    4543  void InitDedicatedAllocation(
    4544  uint32_t memoryTypeIndex,
    4545  VkDeviceMemory hMemory,
    4546  VmaSuballocationType suballocationType,
    4547  void* pMappedData,
    4548  VkDeviceSize size)
    4549  {
    4550  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4551  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4552  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4553  m_Alignment = 0;
    4554  m_Size = size;
    4555  m_SuballocationType = (uint8_t)suballocationType;
    4556  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4557  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4558  m_DedicatedAllocation.m_hMemory = hMemory;
    4559  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4560  }
    4561 
    4562  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4563  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4564  VkDeviceSize GetSize() const { return m_Size; }
    4565  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4566  void* GetUserData() const { return m_pUserData; }
    4567  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4568  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4569 
    4570  VmaDeviceMemoryBlock* GetBlock() const
    4571  {
    4572  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4573  return m_BlockAllocation.m_Block;
    4574  }
    4575  VkDeviceSize GetOffset() const;
    4576  VkDeviceMemory GetMemory() const;
    4577  uint32_t GetMemoryTypeIndex() const;
    4578  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4579  void* GetMappedData() const;
    4580  bool CanBecomeLost() const;
    4581  VmaPool GetPool() const;
    4582 
    4583  uint32_t GetLastUseFrameIndex() const
    4584  {
    4585  return m_LastUseFrameIndex.load();
    4586  }
    4587  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4588  {
    4589  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4590  }
    4591  /*
    4592  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4593  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4594  - Else, returns false.
    4595 
    4596  If hAllocation is already lost, assert - you should not call it then.
    4597  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4598  */
    4599  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4600 
    4601  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4602  {
    4603  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4604  outInfo.blockCount = 1;
    4605  outInfo.allocationCount = 1;
    4606  outInfo.unusedRangeCount = 0;
    4607  outInfo.usedBytes = m_Size;
    4608  outInfo.unusedBytes = 0;
    4609  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4610  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4611  outInfo.unusedRangeSizeMax = 0;
    4612  }
    4613 
    4614  void BlockAllocMap();
    4615  void BlockAllocUnmap();
    4616  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4617  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4618 
    4619 #if VMA_STATS_STRING_ENABLED
    4620  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4621  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4622 
    4623  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4624  {
    4625  VMA_ASSERT(m_BufferImageUsage == 0);
    4626  m_BufferImageUsage = bufferImageUsage;
    4627  }
    4628 
    4629  void PrintParameters(class VmaJsonWriter& json) const;
    4630 #endif
    4631 
    4632 private:
    4633  VkDeviceSize m_Alignment;
    4634  VkDeviceSize m_Size;
    4635  void* m_pUserData;
    4636  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4637  uint8_t m_Type; // ALLOCATION_TYPE
    4638  uint8_t m_SuballocationType; // VmaSuballocationType
    4639  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4640  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4641  uint8_t m_MapCount;
    4642  uint8_t m_Flags; // enum FLAGS
    4643 
    4644  // Allocation out of VmaDeviceMemoryBlock.
    4645  struct BlockAllocation
    4646  {
    4647  VmaPool m_hPool; // Null if belongs to general memory.
    4648  VmaDeviceMemoryBlock* m_Block;
    4649  VkDeviceSize m_Offset;
    4650  bool m_CanBecomeLost;
    4651  };
    4652 
    4653  // Allocation for an object that has its own private VkDeviceMemory.
    4654  struct DedicatedAllocation
    4655  {
    4656  uint32_t m_MemoryTypeIndex;
    4657  VkDeviceMemory m_hMemory;
    4658  void* m_pMappedData; // Not null means memory is mapped.
    4659  };
    4660 
    4661  union
    4662  {
    4663  // Allocation out of VmaDeviceMemoryBlock.
    4664  BlockAllocation m_BlockAllocation;
    4665  // Allocation for an object that has its own private VkDeviceMemory.
    4666  DedicatedAllocation m_DedicatedAllocation;
    4667  };
    4668 
    4669 #if VMA_STATS_STRING_ENABLED
    4670  uint32_t m_CreationFrameIndex;
    4671  uint32_t m_BufferImageUsage; // 0 if unknown.
    4672 #endif
    4673 
    4674  void FreeUserDataString(VmaAllocator hAllocator);
    4675 };
    4676 
    4677 /*
    4678 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4679 allocated memory block or free.
    4680 */
    4681 struct VmaSuballocation
    4682 {
    4683  VkDeviceSize offset;
    4684  VkDeviceSize size;
    4685  VmaAllocation hAllocation;
    4686  VmaSuballocationType type;
    4687 };
    4688 
    4689 // Comparator for offsets.
    4690 struct VmaSuballocationOffsetLess
    4691 {
    4692  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4693  {
    4694  return lhs.offset < rhs.offset;
    4695  }
    4696 };
    4697 struct VmaSuballocationOffsetGreater
    4698 {
    4699  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4700  {
    4701  return lhs.offset > rhs.offset;
    4702  }
    4703 };
    4704 
    4705 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4706 
    4707 // Cost of one additional allocation lost, as equivalent in bytes.
    4708 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4709 
    4710 /*
    4711 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4712 
    4713 If canMakeOtherLost was false:
    4714 - item points to a FREE suballocation.
    4715 - itemsToMakeLostCount is 0.
    4716 
    4717 If canMakeOtherLost was true:
    4718 - item points to first of sequence of suballocations, which are either FREE,
    4719  or point to VmaAllocations that can become lost.
    4720 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4721  the requested allocation to succeed.
    4722 */
    4723 struct VmaAllocationRequest
    4724 {
    4725  VkDeviceSize offset;
    4726  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4727  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4728  VmaSuballocationList::iterator item;
    4729  size_t itemsToMakeLostCount;
    4730  void* customData;
    4731 
    4732  VkDeviceSize CalcCost() const
    4733  {
    4734  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4735  }
    4736 };
    4737 
    4738 /*
    4739 Data structure used for bookkeeping of allocations and unused ranges of memory
    4740 in a single VkDeviceMemory block.
    4741 */
    4742 class VmaBlockMetadata
    4743 {
    4744 public:
    4745  VmaBlockMetadata(VmaAllocator hAllocator);
    4746  virtual ~VmaBlockMetadata() { }
    4747  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4748 
    4749  // Validates all data structures inside this object. If not valid, returns false.
    4750  virtual bool Validate() const = 0;
    4751  VkDeviceSize GetSize() const { return m_Size; }
    4752  virtual size_t GetAllocationCount() const = 0;
    4753  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4754  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4755  // Returns true if this block is empty - contains only single free suballocation.
    4756  virtual bool IsEmpty() const = 0;
    4757 
    4758  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4759  // Shouldn't modify blockCount.
    4760  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4761 
    4762 #if VMA_STATS_STRING_ENABLED
    4763  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4764 #endif
    4765 
    4766  // Tries to find a place for suballocation with given parameters inside this block.
    4767  // If succeeded, fills pAllocationRequest and returns true.
    4768  // If failed, returns false.
    4769  virtual bool CreateAllocationRequest(
    4770  uint32_t currentFrameIndex,
    4771  uint32_t frameInUseCount,
    4772  VkDeviceSize bufferImageGranularity,
    4773  VkDeviceSize allocSize,
    4774  VkDeviceSize allocAlignment,
    4775  bool upperAddress,
    4776  VmaSuballocationType allocType,
    4777  bool canMakeOtherLost,
    4778  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4779  VmaAllocationRequest* pAllocationRequest) = 0;
    4780 
    4781  virtual bool MakeRequestedAllocationsLost(
    4782  uint32_t currentFrameIndex,
    4783  uint32_t frameInUseCount,
    4784  VmaAllocationRequest* pAllocationRequest) = 0;
    4785 
    4786  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4787 
    4788  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4789 
    4790  // Makes actual allocation based on request. Request must already be checked and valid.
    4791  virtual void Alloc(
    4792  const VmaAllocationRequest& request,
    4793  VmaSuballocationType type,
    4794  VkDeviceSize allocSize,
    4795  bool upperAddress,
    4796  VmaAllocation hAllocation) = 0;
    4797 
    4798  // Frees suballocation assigned to given memory region.
    4799  virtual void Free(const VmaAllocation allocation) = 0;
    4800  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4801 
    4802  // Tries to resize (grow or shrink) space for given allocation, in place.
    4803  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4804 
    4805 protected:
    4806  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4807 
    4808 #if VMA_STATS_STRING_ENABLED
    4809  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4810  VkDeviceSize unusedBytes,
    4811  size_t allocationCount,
    4812  size_t unusedRangeCount) const;
    4813  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4814  VkDeviceSize offset,
    4815  VmaAllocation hAllocation) const;
    4816  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4817  VkDeviceSize offset,
    4818  VkDeviceSize size) const;
    4819  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4820 #endif
    4821 
    4822 private:
    4823  VkDeviceSize m_Size;
    4824  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4825 };
    4826 
    4827 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4828  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4829  return false; \
    4830  } } while(false)
    4831 
    4832 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4833 {
    4834  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4835 public:
    4836  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4837  virtual ~VmaBlockMetadata_Generic();
    4838  virtual void Init(VkDeviceSize size);
    4839 
    4840  virtual bool Validate() const;
    4841  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4842  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4843  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4844  virtual bool IsEmpty() const;
    4845 
    4846  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4847  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4848 
    4849 #if VMA_STATS_STRING_ENABLED
    4850  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4851 #endif
    4852 
    4853  virtual bool CreateAllocationRequest(
    4854  uint32_t currentFrameIndex,
    4855  uint32_t frameInUseCount,
    4856  VkDeviceSize bufferImageGranularity,
    4857  VkDeviceSize allocSize,
    4858  VkDeviceSize allocAlignment,
    4859  bool upperAddress,
    4860  VmaSuballocationType allocType,
    4861  bool canMakeOtherLost,
    4862  uint32_t strategy,
    4863  VmaAllocationRequest* pAllocationRequest);
    4864 
    4865  virtual bool MakeRequestedAllocationsLost(
    4866  uint32_t currentFrameIndex,
    4867  uint32_t frameInUseCount,
    4868  VmaAllocationRequest* pAllocationRequest);
    4869 
    4870  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4871 
    4872  virtual VkResult CheckCorruption(const void* pBlockData);
    4873 
    4874  virtual void Alloc(
    4875  const VmaAllocationRequest& request,
    4876  VmaSuballocationType type,
    4877  VkDeviceSize allocSize,
    4878  bool upperAddress,
    4879  VmaAllocation hAllocation);
    4880 
    4881  virtual void Free(const VmaAllocation allocation);
    4882  virtual void FreeAtOffset(VkDeviceSize offset);
    4883 
    4884  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4885 
    4886 private:
    4887  uint32_t m_FreeCount;
    4888  VkDeviceSize m_SumFreeSize;
    4889  VmaSuballocationList m_Suballocations;
    4890  // Suballocations that are free and have size greater than certain threshold.
    4891  // Sorted by size, ascending.
    4892  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4893 
    4894  bool ValidateFreeSuballocationList() const;
    4895 
    4896  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4897  // If yes, fills pOffset and returns true. If no, returns false.
    4898  bool CheckAllocation(
    4899  uint32_t currentFrameIndex,
    4900  uint32_t frameInUseCount,
    4901  VkDeviceSize bufferImageGranularity,
    4902  VkDeviceSize allocSize,
    4903  VkDeviceSize allocAlignment,
    4904  VmaSuballocationType allocType,
    4905  VmaSuballocationList::const_iterator suballocItem,
    4906  bool canMakeOtherLost,
    4907  VkDeviceSize* pOffset,
    4908  size_t* itemsToMakeLostCount,
    4909  VkDeviceSize* pSumFreeSize,
    4910  VkDeviceSize* pSumItemSize) const;
    4911  // Given free suballocation, it merges it with following one, which must also be free.
    4912  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4913  // Releases given suballocation, making it free.
    4914  // Merges it with adjacent free suballocations if applicable.
    4915  // Returns iterator to new free suballocation at this place.
    4916  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4917  // Given free suballocation, it inserts it into sorted list of
    4918  // m_FreeSuballocationsBySize if it's suitable.
    4919  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4920  // Given free suballocation, it removes it from sorted list of
    4921  // m_FreeSuballocationsBySize if it's suitable.
    4922  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4923 };
    4924 
    4925 /*
    4926 Allocations and their references in internal data structure look like this:
    4927 
    4928 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4929 
    4930  0 +-------+
    4931  | |
    4932  | |
    4933  | |
    4934  +-------+
    4935  | Alloc | 1st[m_1stNullItemsBeginCount]
    4936  +-------+
    4937  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4938  +-------+
    4939  | ... |
    4940  +-------+
    4941  | Alloc | 1st[1st.size() - 1]
    4942  +-------+
    4943  | |
    4944  | |
    4945  | |
    4946 GetSize() +-------+
    4947 
    4948 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4949 
    4950  0 +-------+
    4951  | Alloc | 2nd[0]
    4952  +-------+
    4953  | Alloc | 2nd[1]
    4954  +-------+
    4955  | ... |
    4956  +-------+
    4957  | Alloc | 2nd[2nd.size() - 1]
    4958  +-------+
    4959  | |
    4960  | |
    4961  | |
    4962  +-------+
    4963  | Alloc | 1st[m_1stNullItemsBeginCount]
    4964  +-------+
    4965  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4966  +-------+
    4967  | ... |
    4968  +-------+
    4969  | Alloc | 1st[1st.size() - 1]
    4970  +-------+
    4971  | |
    4972 GetSize() +-------+
    4973 
    4974 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4975 
    4976  0 +-------+
    4977  | |
    4978  | |
    4979  | |
    4980  +-------+
    4981  | Alloc | 1st[m_1stNullItemsBeginCount]
    4982  +-------+
    4983  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4984  +-------+
    4985  | ... |
    4986  +-------+
    4987  | Alloc | 1st[1st.size() - 1]
    4988  +-------+
    4989  | |
    4990  | |
    4991  | |
    4992  +-------+
    4993  | Alloc | 2nd[2nd.size() - 1]
    4994  +-------+
    4995  | ... |
    4996  +-------+
    4997  | Alloc | 2nd[1]
    4998  +-------+
    4999  | Alloc | 2nd[0]
    5000 GetSize() +-------+
    5001 
    5002 */
    5003 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5004 {
    5005  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5006 public:
    5007  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5008  virtual ~VmaBlockMetadata_Linear();
    5009  virtual void Init(VkDeviceSize size);
    5010 
    5011  virtual bool Validate() const;
    5012  virtual size_t GetAllocationCount() const;
    5013  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5014  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5015  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5016 
    5017  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5018  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5019 
    5020 #if VMA_STATS_STRING_ENABLED
    5021  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5022 #endif
    5023 
    5024  virtual bool CreateAllocationRequest(
    5025  uint32_t currentFrameIndex,
    5026  uint32_t frameInUseCount,
    5027  VkDeviceSize bufferImageGranularity,
    5028  VkDeviceSize allocSize,
    5029  VkDeviceSize allocAlignment,
    5030  bool upperAddress,
    5031  VmaSuballocationType allocType,
    5032  bool canMakeOtherLost,
    5033  uint32_t strategy,
    5034  VmaAllocationRequest* pAllocationRequest);
    5035 
    5036  virtual bool MakeRequestedAllocationsLost(
    5037  uint32_t currentFrameIndex,
    5038  uint32_t frameInUseCount,
    5039  VmaAllocationRequest* pAllocationRequest);
    5040 
    5041  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5042 
    5043  virtual VkResult CheckCorruption(const void* pBlockData);
    5044 
    5045  virtual void Alloc(
    5046  const VmaAllocationRequest& request,
    5047  VmaSuballocationType type,
    5048  VkDeviceSize allocSize,
    5049  bool upperAddress,
    5050  VmaAllocation hAllocation);
    5051 
    5052  virtual void Free(const VmaAllocation allocation);
    5053  virtual void FreeAtOffset(VkDeviceSize offset);
    5054 
    5055 private:
    5056  /*
    5057  There are two suballocation vectors, used in ping-pong way.
    5058  The one with index m_1stVectorIndex is called 1st.
    5059  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5060  2nd can be non-empty only when 1st is not empty.
    5061  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5062  */
    5063  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5064 
    5065  enum SECOND_VECTOR_MODE
    5066  {
    5067  SECOND_VECTOR_EMPTY,
    5068  /*
    5069  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5070  all have smaller offset.
    5071  */
    5072  SECOND_VECTOR_RING_BUFFER,
    5073  /*
    5074  Suballocations in 2nd vector are upper side of double stack.
    5075  They all have offsets higher than those in 1st vector.
    5076  Top of this stack means smaller offsets, but higher indices in this vector.
    5077  */
    5078  SECOND_VECTOR_DOUBLE_STACK,
    5079  };
    5080 
    5081  VkDeviceSize m_SumFreeSize;
    5082  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5083  uint32_t m_1stVectorIndex;
    5084  SECOND_VECTOR_MODE m_2ndVectorMode;
    5085 
    5086  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5087  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5088  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5089  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5090 
    5091  // Number of items in 1st vector with hAllocation = null at the beginning.
    5092  size_t m_1stNullItemsBeginCount;
    5093  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5094  size_t m_1stNullItemsMiddleCount;
    5095  // Number of items in 2nd vector with hAllocation = null.
    5096  size_t m_2ndNullItemsCount;
    5097 
    5098  bool ShouldCompact1st() const;
    5099  void CleanupAfterFree();
    5100 };
    5101 
    5102 /*
    5103 - GetSize() is the original size of allocated memory block.
    5104 - m_UsableSize is this size aligned down to a power of two.
    5105  All allocations and calculations happen relative to m_UsableSize.
    5106 - GetUnusableSize() is the difference between them.
    5107  It is repoted as separate, unused range, not available for allocations.
    5108 
    5109 Node at level 0 has size = m_UsableSize.
    5110 Each next level contains nodes with size 2 times smaller than current level.
    5111 m_LevelCount is the maximum number of levels to use in the current object.
    5112 */
    5113 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5114 {
    5115  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5116 public:
    5117  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5118  virtual ~VmaBlockMetadata_Buddy();
    5119  virtual void Init(VkDeviceSize size);
    5120 
    5121  virtual bool Validate() const;
    5122  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5123  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5124  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5125  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5126 
    5127  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5128  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5129 
    5130 #if VMA_STATS_STRING_ENABLED
    5131  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5132 #endif
    5133 
    5134  virtual bool CreateAllocationRequest(
    5135  uint32_t currentFrameIndex,
    5136  uint32_t frameInUseCount,
    5137  VkDeviceSize bufferImageGranularity,
    5138  VkDeviceSize allocSize,
    5139  VkDeviceSize allocAlignment,
    5140  bool upperAddress,
    5141  VmaSuballocationType allocType,
    5142  bool canMakeOtherLost,
    5143  uint32_t strategy,
    5144  VmaAllocationRequest* pAllocationRequest);
    5145 
    5146  virtual bool MakeRequestedAllocationsLost(
    5147  uint32_t currentFrameIndex,
    5148  uint32_t frameInUseCount,
    5149  VmaAllocationRequest* pAllocationRequest);
    5150 
    5151  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5152 
    5153  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5154 
    5155  virtual void Alloc(
    5156  const VmaAllocationRequest& request,
    5157  VmaSuballocationType type,
    5158  VkDeviceSize allocSize,
    5159  bool upperAddress,
    5160  VmaAllocation hAllocation);
    5161 
    5162  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5163  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5164 
    5165 private:
    5166  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5167  static const size_t MAX_LEVELS = 30;
    5168 
    5169  struct ValidationContext
    5170  {
    5171  size_t calculatedAllocationCount;
    5172  size_t calculatedFreeCount;
    5173  VkDeviceSize calculatedSumFreeSize;
    5174 
    5175  ValidationContext() :
    5176  calculatedAllocationCount(0),
    5177  calculatedFreeCount(0),
    5178  calculatedSumFreeSize(0) { }
    5179  };
    5180 
    5181  struct Node
    5182  {
    5183  VkDeviceSize offset;
    5184  enum TYPE
    5185  {
    5186  TYPE_FREE,
    5187  TYPE_ALLOCATION,
    5188  TYPE_SPLIT,
    5189  TYPE_COUNT
    5190  } type;
    5191  Node* parent;
    5192  Node* buddy;
    5193 
    5194  union
    5195  {
    5196  struct
    5197  {
    5198  Node* prev;
    5199  Node* next;
    5200  } free;
    5201  struct
    5202  {
    5203  VmaAllocation alloc;
    5204  } allocation;
    5205  struct
    5206  {
    5207  Node* leftChild;
    5208  } split;
    5209  };
    5210  };
    5211 
    5212  // Size of the memory block aligned down to a power of two.
    5213  VkDeviceSize m_UsableSize;
    5214  uint32_t m_LevelCount;
    5215 
    5216  Node* m_Root;
    5217  struct {
    5218  Node* front;
    5219  Node* back;
    5220  } m_FreeList[MAX_LEVELS];
    5221  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5222  size_t m_AllocationCount;
    5223  // Number of nodes in the tree with type == TYPE_FREE.
    5224  size_t m_FreeCount;
    5225  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5226  VkDeviceSize m_SumFreeSize;
    5227 
    5228  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5229  void DeleteNode(Node* node);
    5230  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5231  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5232  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5233  // Alloc passed just for validation. Can be null.
    5234  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5235  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5236  // Adds node to the front of FreeList at given level.
    5237  // node->type must be FREE.
    5238  // node->free.prev, next can be undefined.
    5239  void AddToFreeListFront(uint32_t level, Node* node);
    5240  // Removes node from FreeList at given level.
    5241  // node->type must be FREE.
    5242  // node->free.prev, next stay untouched.
    5243  void RemoveFromFreeList(uint32_t level, Node* node);
    5244 
    5245 #if VMA_STATS_STRING_ENABLED
    5246  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5247 #endif
    5248 };
    5249 
    5250 /*
    5251 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5252 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5253 
    5254 Thread-safety: This class must be externally synchronized.
    5255 */
    5256 class VmaDeviceMemoryBlock
    5257 {
    5258  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5259 public:
    5260  VmaBlockMetadata* m_pMetadata;
    5261 
    5262  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5263 
    5264  ~VmaDeviceMemoryBlock()
    5265  {
    5266  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5267  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5268  }
    5269 
    5270  // Always call after construction.
    5271  void Init(
    5272  VmaAllocator hAllocator,
    5273  uint32_t newMemoryTypeIndex,
    5274  VkDeviceMemory newMemory,
    5275  VkDeviceSize newSize,
    5276  uint32_t id,
    5277  uint32_t algorithm);
    5278  // Always call before destruction.
    5279  void Destroy(VmaAllocator allocator);
    5280 
    5281  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5282  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5283  uint32_t GetId() const { return m_Id; }
    5284  void* GetMappedData() const { return m_pMappedData; }
    5285 
    5286  // Validates all data structures inside this object. If not valid, returns false.
    5287  bool Validate() const;
    5288 
    5289  VkResult CheckCorruption(VmaAllocator hAllocator);
    5290 
    5291  // ppData can be null.
    5292  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5293  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5294 
    5295  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5296  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5297 
    5298  VkResult BindBufferMemory(
    5299  const VmaAllocator hAllocator,
    5300  const VmaAllocation hAllocation,
    5301  VkBuffer hBuffer);
    5302  VkResult BindImageMemory(
    5303  const VmaAllocator hAllocator,
    5304  const VmaAllocation hAllocation,
    5305  VkImage hImage);
    5306 
    5307 private:
    5308  uint32_t m_MemoryTypeIndex;
    5309  uint32_t m_Id;
    5310  VkDeviceMemory m_hMemory;
    5311 
    5312  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5313  // Also protects m_MapCount, m_pMappedData.
    5314  VMA_MUTEX m_Mutex;
    5315  uint32_t m_MapCount;
    5316  void* m_pMappedData;
    5317 };
    5318 
    5319 struct VmaPointerLess
    5320 {
    5321  bool operator()(const void* lhs, const void* rhs) const
    5322  {
    5323  return lhs < rhs;
    5324  }
    5325 };
    5326 
    5327 class VmaDefragmentator;
    5328 
    5329 /*
    5330 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5331 Vulkan memory type.
    5332 
    5333 Synchronized internally with a mutex.
    5334 */
    5335 struct VmaBlockVector
    5336 {
    5337  VMA_CLASS_NO_COPY(VmaBlockVector)
    5338 public:
    5339  VmaBlockVector(
    5340  VmaAllocator hAllocator,
    5341  uint32_t memoryTypeIndex,
    5342  VkDeviceSize preferredBlockSize,
    5343  size_t minBlockCount,
    5344  size_t maxBlockCount,
    5345  VkDeviceSize bufferImageGranularity,
    5346  uint32_t frameInUseCount,
    5347  bool isCustomPool,
    5348  bool explicitBlockSize,
    5349  uint32_t algorithm);
    5350  ~VmaBlockVector();
    5351 
    5352  VkResult CreateMinBlocks();
    5353 
    5354  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5355  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5356  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5357  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5358  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5359 
    5360  void GetPoolStats(VmaPoolStats* pStats);
    5361 
    5362  bool IsEmpty() const { return m_Blocks.empty(); }
    5363  bool IsCorruptionDetectionEnabled() const;
    5364 
    5365  VkResult Allocate(
    5366  VmaPool hCurrentPool,
    5367  uint32_t currentFrameIndex,
    5368  VkDeviceSize size,
    5369  VkDeviceSize alignment,
    5370  const VmaAllocationCreateInfo& createInfo,
    5371  VmaSuballocationType suballocType,
    5372  VmaAllocation* pAllocation);
    5373 
    5374  void Free(
    5375  VmaAllocation hAllocation);
    5376 
    5377  // Adds statistics of this BlockVector to pStats.
    5378  void AddStats(VmaStats* pStats);
    5379 
    5380 #if VMA_STATS_STRING_ENABLED
    5381  void PrintDetailedMap(class VmaJsonWriter& json);
    5382 #endif
    5383 
    5384  void MakePoolAllocationsLost(
    5385  uint32_t currentFrameIndex,
    5386  size_t* pLostAllocationCount);
    5387  VkResult CheckCorruption();
    5388 
    5389  VmaDefragmentator* EnsureDefragmentator(
    5390  VmaAllocator hAllocator,
    5391  uint32_t currentFrameIndex);
    5392 
    5393  VkResult Defragment(
    5394  VmaDefragmentationStats* pDefragmentationStats,
    5395  VkDeviceSize& maxBytesToMove,
    5396  uint32_t& maxAllocationsToMove);
    5397 
    5398  void DestroyDefragmentator();
    5399 
    5400 private:
    5401  friend class VmaDefragmentator;
    5402 
    5403  const VmaAllocator m_hAllocator;
    5404  const uint32_t m_MemoryTypeIndex;
    5405  const VkDeviceSize m_PreferredBlockSize;
    5406  const size_t m_MinBlockCount;
    5407  const size_t m_MaxBlockCount;
    5408  const VkDeviceSize m_BufferImageGranularity;
    5409  const uint32_t m_FrameInUseCount;
    5410  const bool m_IsCustomPool;
    5411  const bool m_ExplicitBlockSize;
    5412  const uint32_t m_Algorithm;
    5413  bool m_HasEmptyBlock;
    5414  VMA_MUTEX m_Mutex;
    5415  // Incrementally sorted by sumFreeSize, ascending.
    5416  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5417  /* There can be at most one allocation that is completely empty - a
    5418  hysteresis to avoid pessimistic case of alternating creation and destruction
    5419  of a VkDeviceMemory. */
    5420  VmaDefragmentator* m_pDefragmentator;
    5421  uint32_t m_NextBlockId;
    5422 
    5423  VkDeviceSize CalcMaxBlockSize() const;
    5424 
    5425  // Finds and removes given block from vector.
    5426  void Remove(VmaDeviceMemoryBlock* pBlock);
    5427 
    5428  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5429  // after this call.
    5430  void IncrementallySortBlocks();
    5431 
    5432  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5433  VkResult AllocateFromBlock(
    5434  VmaDeviceMemoryBlock* pBlock,
    5435  VmaPool hCurrentPool,
    5436  uint32_t currentFrameIndex,
    5437  VkDeviceSize size,
    5438  VkDeviceSize alignment,
    5439  VmaAllocationCreateFlags allocFlags,
    5440  void* pUserData,
    5441  VmaSuballocationType suballocType,
    5442  uint32_t strategy,
    5443  VmaAllocation* pAllocation);
    5444 
    5445  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5446 };
    5447 
    5448 struct VmaPool_T
    5449 {
    5450  VMA_CLASS_NO_COPY(VmaPool_T)
    5451 public:
    5452  VmaBlockVector m_BlockVector;
    5453 
    5454  VmaPool_T(
    5455  VmaAllocator hAllocator,
    5456  const VmaPoolCreateInfo& createInfo,
    5457  VkDeviceSize preferredBlockSize);
    5458  ~VmaPool_T();
    5459 
    5460  uint32_t GetId() const { return m_Id; }
    5461  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5462 
    5463 #if VMA_STATS_STRING_ENABLED
    5464  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5465 #endif
    5466 
    5467 private:
    5468  uint32_t m_Id;
    5469 };
    5470 
    5471 class VmaDefragmentator
    5472 {
    5473  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5474 private:
    5475  const VmaAllocator m_hAllocator;
    5476  VmaBlockVector* const m_pBlockVector;
    5477  uint32_t m_CurrentFrameIndex;
    5478  VkDeviceSize m_BytesMoved;
    5479  uint32_t m_AllocationsMoved;
    5480 
    5481  struct AllocationInfo
    5482  {
    5483  VmaAllocation m_hAllocation;
    5484  VkBool32* m_pChanged;
    5485 
    5486  AllocationInfo() :
    5487  m_hAllocation(VK_NULL_HANDLE),
    5488  m_pChanged(VMA_NULL)
    5489  {
    5490  }
    5491  };
    5492 
    5493  struct AllocationInfoSizeGreater
    5494  {
    5495  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5496  {
    5497  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5498  }
    5499  };
    5500 
    5501  // Used between AddAllocation and Defragment.
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  struct BlockInfo
    5505  {
    5506  VmaDeviceMemoryBlock* m_pBlock;
    5507  bool m_HasNonMovableAllocations;
    5508  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5509 
    5510  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5511  m_pBlock(VMA_NULL),
    5512  m_HasNonMovableAllocations(true),
    5513  m_Allocations(pAllocationCallbacks),
    5514  m_pMappedDataForDefragmentation(VMA_NULL)
    5515  {
    5516  }
    5517 
    5518  void CalcHasNonMovableAllocations()
    5519  {
    5520  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5521  const size_t defragmentAllocCount = m_Allocations.size();
    5522  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5523  }
    5524 
    5525  void SortAllocationsBySizeDescecnding()
    5526  {
    5527  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5528  }
    5529 
    5530  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5531  void Unmap(VmaAllocator hAllocator);
    5532 
    5533  private:
    5534  // Not null if mapped for defragmentation only, not originally mapped.
    5535  void* m_pMappedDataForDefragmentation;
    5536  };
    5537 
    5538  struct BlockPointerLess
    5539  {
    5540  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5541  {
    5542  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5543  }
    5544  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5545  {
    5546  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5547  }
    5548  };
    5549 
    5550  // 1. Blocks with some non-movable allocations go first.
    5551  // 2. Blocks with smaller sumFreeSize go first.
    5552  struct BlockInfoCompareMoveDestination
    5553  {
    5554  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5555  {
    5556  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5557  {
    5558  return true;
    5559  }
    5560  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5561  {
    5562  return false;
    5563  }
    5564  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5565  {
    5566  return true;
    5567  }
    5568  return false;
    5569  }
    5570  };
    5571 
    5572  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5573  BlockInfoVector m_Blocks;
    5574 
    5575  VkResult DefragmentRound(
    5576  VkDeviceSize maxBytesToMove,
    5577  uint32_t maxAllocationsToMove);
    5578 
    5579  static bool MoveMakesSense(
    5580  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5581  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5582 
    5583 public:
    5584  VmaDefragmentator(
    5585  VmaAllocator hAllocator,
    5586  VmaBlockVector* pBlockVector,
    5587  uint32_t currentFrameIndex);
    5588 
    5589  ~VmaDefragmentator();
    5590 
    5591  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5592  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5593 
    5594  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5595 
    5596  VkResult Defragment(
    5597  VkDeviceSize maxBytesToMove,
    5598  uint32_t maxAllocationsToMove);
    5599 };
    5600 
    5601 #if VMA_RECORDING_ENABLED
    5602 
    5603 class VmaRecorder
    5604 {
    5605 public:
    5606  VmaRecorder();
    5607  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5608  void WriteConfiguration(
    5609  const VkPhysicalDeviceProperties& devProps,
    5610  const VkPhysicalDeviceMemoryProperties& memProps,
    5611  bool dedicatedAllocationExtensionEnabled);
    5612  ~VmaRecorder();
    5613 
    5614  void RecordCreateAllocator(uint32_t frameIndex);
    5615  void RecordDestroyAllocator(uint32_t frameIndex);
    5616  void RecordCreatePool(uint32_t frameIndex,
    5617  const VmaPoolCreateInfo& createInfo,
    5618  VmaPool pool);
    5619  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5620  void RecordAllocateMemory(uint32_t frameIndex,
    5621  const VkMemoryRequirements& vkMemReq,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5631  const VkMemoryRequirements& vkMemReq,
    5632  bool requiresDedicatedAllocation,
    5633  bool prefersDedicatedAllocation,
    5634  const VmaAllocationCreateInfo& createInfo,
    5635  VmaAllocation allocation);
    5636  void RecordFreeMemory(uint32_t frameIndex,
    5637  VmaAllocation allocation);
    5638  void RecordResizeAllocation(
    5639  uint32_t frameIndex,
    5640  VmaAllocation allocation,
    5641  VkDeviceSize newSize);
    5642  void RecordSetAllocationUserData(uint32_t frameIndex,
    5643  VmaAllocation allocation,
    5644  const void* pUserData);
    5645  void RecordCreateLostAllocation(uint32_t frameIndex,
    5646  VmaAllocation allocation);
    5647  void RecordMapMemory(uint32_t frameIndex,
    5648  VmaAllocation allocation);
    5649  void RecordUnmapMemory(uint32_t frameIndex,
    5650  VmaAllocation allocation);
    5651  void RecordFlushAllocation(uint32_t frameIndex,
    5652  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5653  void RecordInvalidateAllocation(uint32_t frameIndex,
    5654  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5655  void RecordCreateBuffer(uint32_t frameIndex,
    5656  const VkBufferCreateInfo& bufCreateInfo,
    5657  const VmaAllocationCreateInfo& allocCreateInfo,
    5658  VmaAllocation allocation);
    5659  void RecordCreateImage(uint32_t frameIndex,
    5660  const VkImageCreateInfo& imageCreateInfo,
    5661  const VmaAllocationCreateInfo& allocCreateInfo,
    5662  VmaAllocation allocation);
    5663  void RecordDestroyBuffer(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordDestroyImage(uint32_t frameIndex,
    5666  VmaAllocation allocation);
    5667  void RecordTouchAllocation(uint32_t frameIndex,
    5668  VmaAllocation allocation);
    5669  void RecordGetAllocationInfo(uint32_t frameIndex,
    5670  VmaAllocation allocation);
    5671  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5672  VmaPool pool);
    5673 
    5674 private:
    5675  struct CallParams
    5676  {
    5677  uint32_t threadId;
    5678  double time;
    5679  };
    5680 
    5681  class UserDataString
    5682  {
    5683  public:
    5684  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5685  const char* GetString() const { return m_Str; }
    5686 
    5687  private:
    5688  char m_PtrStr[17];
    5689  const char* m_Str;
    5690  };
    5691 
    5692  bool m_UseMutex;
    5693  VmaRecordFlags m_Flags;
    5694  FILE* m_File;
    5695  VMA_MUTEX m_FileMutex;
    5696  int64_t m_Freq;
    5697  int64_t m_StartCounter;
    5698 
    5699  void GetBasicParams(CallParams& outParams);
    5700  void Flush();
    5701 };
    5702 
    5703 #endif // #if VMA_RECORDING_ENABLED
    5704 
    5705 // Main allocator object.
    5706 struct VmaAllocator_T
    5707 {
    5708  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5709 public:
    5710  bool m_UseMutex;
    5711  bool m_UseKhrDedicatedAllocation;
    5712  VkDevice m_hDevice;
    5713  bool m_AllocationCallbacksSpecified;
    5714  VkAllocationCallbacks m_AllocationCallbacks;
    5715  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5716 
    5717  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5718  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5719  VMA_MUTEX m_HeapSizeLimitMutex;
    5720 
    5721  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5722  VkPhysicalDeviceMemoryProperties m_MemProps;
    5723 
    5724  // Default pools.
    5725  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5726 
    5727  // Each vector is sorted by memory (handle value).
    5728  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5729  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5730  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5731 
    5732  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5733  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5734  ~VmaAllocator_T();
    5735 
    5736  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5737  {
    5738  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5739  }
    5740  const VmaVulkanFunctions& GetVulkanFunctions() const
    5741  {
    5742  return m_VulkanFunctions;
    5743  }
    5744 
    5745  VkDeviceSize GetBufferImageGranularity() const
    5746  {
    5747  return VMA_MAX(
    5748  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5749  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5750  }
    5751 
    5752  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5753  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5754 
    5755  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5756  {
    5757  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5758  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5759  }
    5760  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5761  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5762  {
    5763  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5764  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5765  }
    5766  // Minimum alignment for all allocations in specific memory type.
    5767  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5768  {
    5769  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5770  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5771  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5772  }
    5773 
    5774  bool IsIntegratedGpu() const
    5775  {
    5776  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5777  }
    5778 
    5779 #if VMA_RECORDING_ENABLED
    5780  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5781 #endif
    5782 
    5783  void GetBufferMemoryRequirements(
    5784  VkBuffer hBuffer,
    5785  VkMemoryRequirements& memReq,
    5786  bool& requiresDedicatedAllocation,
    5787  bool& prefersDedicatedAllocation) const;
    5788  void GetImageMemoryRequirements(
    5789  VkImage hImage,
    5790  VkMemoryRequirements& memReq,
    5791  bool& requiresDedicatedAllocation,
    5792  bool& prefersDedicatedAllocation) const;
    5793 
    5794  // Main allocation function.
    5795  VkResult AllocateMemory(
    5796  const VkMemoryRequirements& vkMemReq,
    5797  bool requiresDedicatedAllocation,
    5798  bool prefersDedicatedAllocation,
    5799  VkBuffer dedicatedBuffer,
    5800  VkImage dedicatedImage,
    5801  const VmaAllocationCreateInfo& createInfo,
    5802  VmaSuballocationType suballocType,
    5803  VmaAllocation* pAllocation);
    5804 
    5805  // Main deallocation function.
    5806  void FreeMemory(const VmaAllocation allocation);
    5807 
    5808  VkResult ResizeAllocation(
    5809  const VmaAllocation alloc,
    5810  VkDeviceSize newSize);
    5811 
    5812  void CalculateStats(VmaStats* pStats);
    5813 
    5814 #if VMA_STATS_STRING_ENABLED
    5815  void PrintDetailedMap(class VmaJsonWriter& json);
    5816 #endif
    5817 
    5818  VkResult Defragment(
    5819  VmaAllocation* pAllocations,
    5820  size_t allocationCount,
    5821  VkBool32* pAllocationsChanged,
    5822  const VmaDefragmentationInfo* pDefragmentationInfo,
    5823  VmaDefragmentationStats* pDefragmentationStats);
    5824 
    5825  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5826  bool TouchAllocation(VmaAllocation hAllocation);
    5827 
    5828  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5829  void DestroyPool(VmaPool pool);
    5830  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5831 
    5832  void SetCurrentFrameIndex(uint32_t frameIndex);
    5833  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5834 
    5835  void MakePoolAllocationsLost(
    5836  VmaPool hPool,
    5837  size_t* pLostAllocationCount);
    5838  VkResult CheckPoolCorruption(VmaPool hPool);
    5839  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5840 
    5841  void CreateLostAllocation(VmaAllocation* pAllocation);
    5842 
    5843  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5844  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5845 
    5846  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5847  void Unmap(VmaAllocation hAllocation);
    5848 
    5849  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5850  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5851 
    5852  void FlushOrInvalidateAllocation(
    5853  VmaAllocation hAllocation,
    5854  VkDeviceSize offset, VkDeviceSize size,
    5855  VMA_CACHE_OPERATION op);
    5856 
    5857  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5858 
    5859 private:
    5860  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5861 
    5862  VkPhysicalDevice m_PhysicalDevice;
    5863  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5864 
    5865  VMA_MUTEX m_PoolsMutex;
    5866  // Protected by m_PoolsMutex. Sorted by pointer value.
    5867  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5868  uint32_t m_NextPoolId;
    5869 
    5870  VmaVulkanFunctions m_VulkanFunctions;
    5871 
    5872 #if VMA_RECORDING_ENABLED
    5873  VmaRecorder* m_pRecorder;
    5874 #endif
    5875 
    5876  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5877 
    5878  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5879 
    5880  VkResult AllocateMemoryOfType(
    5881  VkDeviceSize size,
    5882  VkDeviceSize alignment,
    5883  bool dedicatedAllocation,
    5884  VkBuffer dedicatedBuffer,
    5885  VkImage dedicatedImage,
    5886  const VmaAllocationCreateInfo& createInfo,
    5887  uint32_t memTypeIndex,
    5888  VmaSuballocationType suballocType,
    5889  VmaAllocation* pAllocation);
    5890 
    5891  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5892  VkResult AllocateDedicatedMemory(
    5893  VkDeviceSize size,
    5894  VmaSuballocationType suballocType,
    5895  uint32_t memTypeIndex,
    5896  bool map,
    5897  bool isUserDataString,
    5898  void* pUserData,
    5899  VkBuffer dedicatedBuffer,
    5900  VkImage dedicatedImage,
    5901  VmaAllocation* pAllocation);
    5902 
    5903  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5904  void FreeDedicatedMemory(VmaAllocation allocation);
    5905 };
    5906 
    5908 // Memory allocation #2 after VmaAllocator_T definition
    5909 
    5910 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5911 {
    5912  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5913 }
    5914 
    5915 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5916 {
    5917  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5918 }
    5919 
    5920 template<typename T>
    5921 static T* VmaAllocate(VmaAllocator hAllocator)
    5922 {
    5923  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5924 }
    5925 
    5926 template<typename T>
    5927 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5928 {
    5929  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5930 }
    5931 
    5932 template<typename T>
    5933 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5934 {
    5935  if(ptr != VMA_NULL)
    5936  {
    5937  ptr->~T();
    5938  VmaFree(hAllocator, ptr);
    5939  }
    5940 }
    5941 
    5942 template<typename T>
    5943 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5944 {
    5945  if(ptr != VMA_NULL)
    5946  {
    5947  for(size_t i = count; i--; )
    5948  ptr[i].~T();
    5949  VmaFree(hAllocator, ptr);
    5950  }
    5951 }
    5952 
    5954 // VmaStringBuilder
    5955 
    5956 #if VMA_STATS_STRING_ENABLED
    5957 
    5958 class VmaStringBuilder
    5959 {
    5960 public:
    5961  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5962  size_t GetLength() const { return m_Data.size(); }
    5963  const char* GetData() const { return m_Data.data(); }
    5964 
    5965  void Add(char ch) { m_Data.push_back(ch); }
    5966  void Add(const char* pStr);
    5967  void AddNewLine() { Add('\n'); }
    5968  void AddNumber(uint32_t num);
    5969  void AddNumber(uint64_t num);
    5970  void AddPointer(const void* ptr);
    5971 
    5972 private:
    5973  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5974 };
    5975 
    5976 void VmaStringBuilder::Add(const char* pStr)
    5977 {
    5978  const size_t strLen = strlen(pStr);
    5979  if(strLen > 0)
    5980  {
    5981  const size_t oldCount = m_Data.size();
    5982  m_Data.resize(oldCount + strLen);
    5983  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5984  }
    5985 }
    5986 
    5987 void VmaStringBuilder::AddNumber(uint32_t num)
    5988 {
    5989  char buf[11];
    5990  VmaUint32ToStr(buf, sizeof(buf), num);
    5991  Add(buf);
    5992 }
    5993 
    5994 void VmaStringBuilder::AddNumber(uint64_t num)
    5995 {
    5996  char buf[21];
    5997  VmaUint64ToStr(buf, sizeof(buf), num);
    5998  Add(buf);
    5999 }
    6000 
    6001 void VmaStringBuilder::AddPointer(const void* ptr)
    6002 {
    6003  char buf[21];
    6004  VmaPtrToStr(buf, sizeof(buf), ptr);
    6005  Add(buf);
    6006 }
    6007 
    6008 #endif // #if VMA_STATS_STRING_ENABLED
    6009 
    6011 // VmaJsonWriter
    6012 
    6013 #if VMA_STATS_STRING_ENABLED
    6014 
    6015 class VmaJsonWriter
    6016 {
    6017  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6018 public:
    6019  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6020  ~VmaJsonWriter();
    6021 
    6022  void BeginObject(bool singleLine = false);
    6023  void EndObject();
    6024 
    6025  void BeginArray(bool singleLine = false);
    6026  void EndArray();
    6027 
    6028  void WriteString(const char* pStr);
    6029  void BeginString(const char* pStr = VMA_NULL);
    6030  void ContinueString(const char* pStr);
    6031  void ContinueString(uint32_t n);
    6032  void ContinueString(uint64_t n);
    6033  void ContinueString_Pointer(const void* ptr);
    6034  void EndString(const char* pStr = VMA_NULL);
    6035 
    6036  void WriteNumber(uint32_t n);
    6037  void WriteNumber(uint64_t n);
    6038  void WriteBool(bool b);
    6039  void WriteNull();
    6040 
    6041 private:
    6042  static const char* const INDENT;
    6043 
    6044  enum COLLECTION_TYPE
    6045  {
    6046  COLLECTION_TYPE_OBJECT,
    6047  COLLECTION_TYPE_ARRAY,
    6048  };
    6049  struct StackItem
    6050  {
    6051  COLLECTION_TYPE type;
    6052  uint32_t valueCount;
    6053  bool singleLineMode;
    6054  };
    6055 
    6056  VmaStringBuilder& m_SB;
    6057  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6058  bool m_InsideString;
    6059 
    6060  void BeginValue(bool isString);
    6061  void WriteIndent(bool oneLess = false);
    6062 };
    6063 
    6064 const char* const VmaJsonWriter::INDENT = " ";
    6065 
    6066 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6067  m_SB(sb),
    6068  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6069  m_InsideString(false)
    6070 {
    6071 }
    6072 
    6073 VmaJsonWriter::~VmaJsonWriter()
    6074 {
    6075  VMA_ASSERT(!m_InsideString);
    6076  VMA_ASSERT(m_Stack.empty());
    6077 }
    6078 
    6079 void VmaJsonWriter::BeginObject(bool singleLine)
    6080 {
    6081  VMA_ASSERT(!m_InsideString);
    6082 
    6083  BeginValue(false);
    6084  m_SB.Add('{');
    6085 
    6086  StackItem item;
    6087  item.type = COLLECTION_TYPE_OBJECT;
    6088  item.valueCount = 0;
    6089  item.singleLineMode = singleLine;
    6090  m_Stack.push_back(item);
    6091 }
    6092 
    6093 void VmaJsonWriter::EndObject()
    6094 {
    6095  VMA_ASSERT(!m_InsideString);
    6096 
    6097  WriteIndent(true);
    6098  m_SB.Add('}');
    6099 
    6100  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6101  m_Stack.pop_back();
    6102 }
    6103 
    6104 void VmaJsonWriter::BeginArray(bool singleLine)
    6105 {
    6106  VMA_ASSERT(!m_InsideString);
    6107 
    6108  BeginValue(false);
    6109  m_SB.Add('[');
    6110 
    6111  StackItem item;
    6112  item.type = COLLECTION_TYPE_ARRAY;
    6113  item.valueCount = 0;
    6114  item.singleLineMode = singleLine;
    6115  m_Stack.push_back(item);
    6116 }
    6117 
    6118 void VmaJsonWriter::EndArray()
    6119 {
    6120  VMA_ASSERT(!m_InsideString);
    6121 
    6122  WriteIndent(true);
    6123  m_SB.Add(']');
    6124 
    6125  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6126  m_Stack.pop_back();
    6127 }
    6128 
    6129 void VmaJsonWriter::WriteString(const char* pStr)
    6130 {
    6131  BeginString(pStr);
    6132  EndString();
    6133 }
    6134 
    6135 void VmaJsonWriter::BeginString(const char* pStr)
    6136 {
    6137  VMA_ASSERT(!m_InsideString);
    6138 
    6139  BeginValue(true);
    6140  m_SB.Add('"');
    6141  m_InsideString = true;
    6142  if(pStr != VMA_NULL && pStr[0] != '\0')
    6143  {
    6144  ContinueString(pStr);
    6145  }
    6146 }
    6147 
    6148 void VmaJsonWriter::ContinueString(const char* pStr)
    6149 {
    6150  VMA_ASSERT(m_InsideString);
    6151 
    6152  const size_t strLen = strlen(pStr);
    6153  for(size_t i = 0; i < strLen; ++i)
    6154  {
    6155  char ch = pStr[i];
    6156  if(ch == '\\')
    6157  {
    6158  m_SB.Add("\\\\");
    6159  }
    6160  else if(ch == '"')
    6161  {
    6162  m_SB.Add("\\\"");
    6163  }
    6164  else if(ch >= 32)
    6165  {
    6166  m_SB.Add(ch);
    6167  }
    6168  else switch(ch)
    6169  {
    6170  case '\b':
    6171  m_SB.Add("\\b");
    6172  break;
    6173  case '\f':
    6174  m_SB.Add("\\f");
    6175  break;
    6176  case '\n':
    6177  m_SB.Add("\\n");
    6178  break;
    6179  case '\r':
    6180  m_SB.Add("\\r");
    6181  break;
    6182  case '\t':
    6183  m_SB.Add("\\t");
    6184  break;
    6185  default:
    6186  VMA_ASSERT(0 && "Character not currently supported.");
    6187  break;
    6188  }
    6189  }
    6190 }
    6191 
    6192 void VmaJsonWriter::ContinueString(uint32_t n)
    6193 {
    6194  VMA_ASSERT(m_InsideString);
    6195  m_SB.AddNumber(n);
    6196 }
    6197 
    6198 void VmaJsonWriter::ContinueString(uint64_t n)
    6199 {
    6200  VMA_ASSERT(m_InsideString);
    6201  m_SB.AddNumber(n);
    6202 }
    6203 
    6204 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6205 {
    6206  VMA_ASSERT(m_InsideString);
    6207  m_SB.AddPointer(ptr);
    6208 }
    6209 
    6210 void VmaJsonWriter::EndString(const char* pStr)
    6211 {
    6212  VMA_ASSERT(m_InsideString);
    6213  if(pStr != VMA_NULL && pStr[0] != '\0')
    6214  {
    6215  ContinueString(pStr);
    6216  }
    6217  m_SB.Add('"');
    6218  m_InsideString = false;
    6219 }
    6220 
    6221 void VmaJsonWriter::WriteNumber(uint32_t n)
    6222 {
    6223  VMA_ASSERT(!m_InsideString);
    6224  BeginValue(false);
    6225  m_SB.AddNumber(n);
    6226 }
    6227 
    6228 void VmaJsonWriter::WriteNumber(uint64_t n)
    6229 {
    6230  VMA_ASSERT(!m_InsideString);
    6231  BeginValue(false);
    6232  m_SB.AddNumber(n);
    6233 }
    6234 
    6235 void VmaJsonWriter::WriteBool(bool b)
    6236 {
    6237  VMA_ASSERT(!m_InsideString);
    6238  BeginValue(false);
    6239  m_SB.Add(b ? "true" : "false");
    6240 }
    6241 
    6242 void VmaJsonWriter::WriteNull()
    6243 {
    6244  VMA_ASSERT(!m_InsideString);
    6245  BeginValue(false);
    6246  m_SB.Add("null");
    6247 }
    6248 
    6249 void VmaJsonWriter::BeginValue(bool isString)
    6250 {
    6251  if(!m_Stack.empty())
    6252  {
    6253  StackItem& currItem = m_Stack.back();
    6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6255  currItem.valueCount % 2 == 0)
    6256  {
    6257  VMA_ASSERT(isString);
    6258  }
    6259 
    6260  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6261  currItem.valueCount % 2 != 0)
    6262  {
    6263  m_SB.Add(": ");
    6264  }
    6265  else if(currItem.valueCount > 0)
    6266  {
    6267  m_SB.Add(", ");
    6268  WriteIndent();
    6269  }
    6270  else
    6271  {
    6272  WriteIndent();
    6273  }
    6274  ++currItem.valueCount;
    6275  }
    6276 }
    6277 
    6278 void VmaJsonWriter::WriteIndent(bool oneLess)
    6279 {
    6280  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6281  {
    6282  m_SB.AddNewLine();
    6283 
    6284  size_t count = m_Stack.size();
    6285  if(count > 0 && oneLess)
    6286  {
    6287  --count;
    6288  }
    6289  for(size_t i = 0; i < count; ++i)
    6290  {
    6291  m_SB.Add(INDENT);
    6292  }
    6293  }
    6294 }
    6295 
    6296 #endif // #if VMA_STATS_STRING_ENABLED
    6297 
    6299 
    6300 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6301 {
    6302  if(IsUserDataString())
    6303  {
    6304  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6305 
    6306  FreeUserDataString(hAllocator);
    6307 
    6308  if(pUserData != VMA_NULL)
    6309  {
    6310  const char* const newStrSrc = (char*)pUserData;
    6311  const size_t newStrLen = strlen(newStrSrc);
    6312  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6313  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6314  m_pUserData = newStrDst;
    6315  }
    6316  }
    6317  else
    6318  {
    6319  m_pUserData = pUserData;
    6320  }
    6321 }
    6322 
    6323 void VmaAllocation_T::ChangeBlockAllocation(
    6324  VmaAllocator hAllocator,
    6325  VmaDeviceMemoryBlock* block,
    6326  VkDeviceSize offset)
    6327 {
    6328  VMA_ASSERT(block != VMA_NULL);
    6329  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6330 
    6331  // Move mapping reference counter from old block to new block.
    6332  if(block != m_BlockAllocation.m_Block)
    6333  {
    6334  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6335  if(IsPersistentMap())
    6336  ++mapRefCount;
    6337  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6338  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6339  }
    6340 
    6341  m_BlockAllocation.m_Block = block;
    6342  m_BlockAllocation.m_Offset = offset;
    6343 }
    6344 
    6345 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6346 {
    6347  VMA_ASSERT(newSize > 0);
    6348  m_Size = newSize;
    6349 }
    6350 
    6351 VkDeviceSize VmaAllocation_T::GetOffset() const
    6352 {
    6353  switch(m_Type)
    6354  {
    6355  case ALLOCATION_TYPE_BLOCK:
    6356  return m_BlockAllocation.m_Offset;
    6357  case ALLOCATION_TYPE_DEDICATED:
    6358  return 0;
    6359  default:
    6360  VMA_ASSERT(0);
    6361  return 0;
    6362  }
    6363 }
    6364 
    6365 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6366 {
    6367  switch(m_Type)
    6368  {
    6369  case ALLOCATION_TYPE_BLOCK:
    6370  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6371  case ALLOCATION_TYPE_DEDICATED:
    6372  return m_DedicatedAllocation.m_hMemory;
    6373  default:
    6374  VMA_ASSERT(0);
    6375  return VK_NULL_HANDLE;
    6376  }
    6377 }
    6378 
    6379 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6380 {
    6381  switch(m_Type)
    6382  {
    6383  case ALLOCATION_TYPE_BLOCK:
    6384  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6385  case ALLOCATION_TYPE_DEDICATED:
    6386  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6387  default:
    6388  VMA_ASSERT(0);
    6389  return UINT32_MAX;
    6390  }
    6391 }
    6392 
    6393 void* VmaAllocation_T::GetMappedData() const
    6394 {
    6395  switch(m_Type)
    6396  {
    6397  case ALLOCATION_TYPE_BLOCK:
    6398  if(m_MapCount != 0)
    6399  {
    6400  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6401  VMA_ASSERT(pBlockData != VMA_NULL);
    6402  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6403  }
    6404  else
    6405  {
    6406  return VMA_NULL;
    6407  }
    6408  break;
    6409  case ALLOCATION_TYPE_DEDICATED:
    6410  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6411  return m_DedicatedAllocation.m_pMappedData;
    6412  default:
    6413  VMA_ASSERT(0);
    6414  return VMA_NULL;
    6415  }
    6416 }
    6417 
    6418 bool VmaAllocation_T::CanBecomeLost() const
    6419 {
    6420  switch(m_Type)
    6421  {
    6422  case ALLOCATION_TYPE_BLOCK:
    6423  return m_BlockAllocation.m_CanBecomeLost;
    6424  case ALLOCATION_TYPE_DEDICATED:
    6425  return false;
    6426  default:
    6427  VMA_ASSERT(0);
    6428  return false;
    6429  }
    6430 }
    6431 
    6432 VmaPool VmaAllocation_T::GetPool() const
    6433 {
    6434  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6435  return m_BlockAllocation.m_hPool;
    6436 }
    6437 
    6438 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6439 {
    6440  VMA_ASSERT(CanBecomeLost());
    6441 
    6442  /*
    6443  Warning: This is a carefully designed algorithm.
    6444  Do not modify unless you really know what you're doing :)
    6445  */
    6446  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6447  for(;;)
    6448  {
    6449  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6450  {
    6451  VMA_ASSERT(0);
    6452  return false;
    6453  }
    6454  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6455  {
    6456  return false;
    6457  }
    6458  else // Last use time earlier than current time.
    6459  {
    6460  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6461  {
    6462  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6463  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6464  return true;
    6465  }
    6466  }
    6467  }
    6468 }
    6469 
    6470 #if VMA_STATS_STRING_ENABLED
    6471 
    6472 // Correspond to values of enum VmaSuballocationType.
    6473 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6474  "FREE",
    6475  "UNKNOWN",
    6476  "BUFFER",
    6477  "IMAGE_UNKNOWN",
    6478  "IMAGE_LINEAR",
    6479  "IMAGE_OPTIMAL",
    6480 };
    6481 
    6482 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6483 {
    6484  json.WriteString("Type");
    6485  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6486 
    6487  json.WriteString("Size");
    6488  json.WriteNumber(m_Size);
    6489 
    6490  if(m_pUserData != VMA_NULL)
    6491  {
    6492  json.WriteString("UserData");
    6493  if(IsUserDataString())
    6494  {
    6495  json.WriteString((const char*)m_pUserData);
    6496  }
    6497  else
    6498  {
    6499  json.BeginString();
    6500  json.ContinueString_Pointer(m_pUserData);
    6501  json.EndString();
    6502  }
    6503  }
    6504 
    6505  json.WriteString("CreationFrameIndex");
    6506  json.WriteNumber(m_CreationFrameIndex);
    6507 
    6508  json.WriteString("LastUseFrameIndex");
    6509  json.WriteNumber(GetLastUseFrameIndex());
    6510 
    6511  if(m_BufferImageUsage != 0)
    6512  {
    6513  json.WriteString("Usage");
    6514  json.WriteNumber(m_BufferImageUsage);
    6515  }
    6516 }
    6517 
    6518 #endif
    6519 
    6520 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6521 {
    6522  VMA_ASSERT(IsUserDataString());
    6523  if(m_pUserData != VMA_NULL)
    6524  {
    6525  char* const oldStr = (char*)m_pUserData;
    6526  const size_t oldStrLen = strlen(oldStr);
    6527  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6528  m_pUserData = VMA_NULL;
    6529  }
    6530 }
    6531 
    6532 void VmaAllocation_T::BlockAllocMap()
    6533 {
    6534  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6535 
    6536  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6537  {
    6538  ++m_MapCount;
    6539  }
    6540  else
    6541  {
    6542  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6543  }
    6544 }
    6545 
    6546 void VmaAllocation_T::BlockAllocUnmap()
    6547 {
    6548  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6549 
    6550  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6551  {
    6552  --m_MapCount;
    6553  }
    6554  else
    6555  {
    6556  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6557  }
    6558 }
    6559 
    6560 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6561 {
    6562  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6563 
    6564  if(m_MapCount != 0)
    6565  {
    6566  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6567  {
    6568  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6569  *ppData = m_DedicatedAllocation.m_pMappedData;
    6570  ++m_MapCount;
    6571  return VK_SUCCESS;
    6572  }
    6573  else
    6574  {
    6575  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6576  return VK_ERROR_MEMORY_MAP_FAILED;
    6577  }
    6578  }
    6579  else
    6580  {
    6581  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6582  hAllocator->m_hDevice,
    6583  m_DedicatedAllocation.m_hMemory,
    6584  0, // offset
    6585  VK_WHOLE_SIZE,
    6586  0, // flags
    6587  ppData);
    6588  if(result == VK_SUCCESS)
    6589  {
    6590  m_DedicatedAllocation.m_pMappedData = *ppData;
    6591  m_MapCount = 1;
    6592  }
    6593  return result;
    6594  }
    6595 }
    6596 
    6597 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6598 {
    6599  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6600 
    6601  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6602  {
    6603  --m_MapCount;
    6604  if(m_MapCount == 0)
    6605  {
    6606  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6607  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6608  hAllocator->m_hDevice,
    6609  m_DedicatedAllocation.m_hMemory);
    6610  }
    6611  }
    6612  else
    6613  {
    6614  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6615  }
    6616 }
    6617 
    6618 #if VMA_STATS_STRING_ENABLED
    6619 
    6620 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6621 {
    6622  json.BeginObject();
    6623 
    6624  json.WriteString("Blocks");
    6625  json.WriteNumber(stat.blockCount);
    6626 
    6627  json.WriteString("Allocations");
    6628  json.WriteNumber(stat.allocationCount);
    6629 
    6630  json.WriteString("UnusedRanges");
    6631  json.WriteNumber(stat.unusedRangeCount);
    6632 
    6633  json.WriteString("UsedBytes");
    6634  json.WriteNumber(stat.usedBytes);
    6635 
    6636  json.WriteString("UnusedBytes");
    6637  json.WriteNumber(stat.unusedBytes);
    6638 
    6639  if(stat.allocationCount > 1)
    6640  {
    6641  json.WriteString("AllocationSize");
    6642  json.BeginObject(true);
    6643  json.WriteString("Min");
    6644  json.WriteNumber(stat.allocationSizeMin);
    6645  json.WriteString("Avg");
    6646  json.WriteNumber(stat.allocationSizeAvg);
    6647  json.WriteString("Max");
    6648  json.WriteNumber(stat.allocationSizeMax);
    6649  json.EndObject();
    6650  }
    6651 
    6652  if(stat.unusedRangeCount > 1)
    6653  {
    6654  json.WriteString("UnusedRangeSize");
    6655  json.BeginObject(true);
    6656  json.WriteString("Min");
    6657  json.WriteNumber(stat.unusedRangeSizeMin);
    6658  json.WriteString("Avg");
    6659  json.WriteNumber(stat.unusedRangeSizeAvg);
    6660  json.WriteString("Max");
    6661  json.WriteNumber(stat.unusedRangeSizeMax);
    6662  json.EndObject();
    6663  }
    6664 
    6665  json.EndObject();
    6666 }
    6667 
    6668 #endif // #if VMA_STATS_STRING_ENABLED
    6669 
    6670 struct VmaSuballocationItemSizeLess
    6671 {
    6672  bool operator()(
    6673  const VmaSuballocationList::iterator lhs,
    6674  const VmaSuballocationList::iterator rhs) const
    6675  {
    6676  return lhs->size < rhs->size;
    6677  }
    6678  bool operator()(
    6679  const VmaSuballocationList::iterator lhs,
    6680  VkDeviceSize rhsSize) const
    6681  {
    6682  return lhs->size < rhsSize;
    6683  }
    6684 };
    6685 
    6686 
    6688 // class VmaBlockMetadata
    6689 
    6690 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6691  m_Size(0),
    6692  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6693 {
    6694 }
    6695 
    6696 #if VMA_STATS_STRING_ENABLED
    6697 
    6698 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6699  VkDeviceSize unusedBytes,
    6700  size_t allocationCount,
    6701  size_t unusedRangeCount) const
    6702 {
    6703  json.BeginObject();
    6704 
    6705  json.WriteString("TotalBytes");
    6706  json.WriteNumber(GetSize());
    6707 
    6708  json.WriteString("UnusedBytes");
    6709  json.WriteNumber(unusedBytes);
    6710 
    6711  json.WriteString("Allocations");
    6712  json.WriteNumber((uint64_t)allocationCount);
    6713 
    6714  json.WriteString("UnusedRanges");
    6715  json.WriteNumber((uint64_t)unusedRangeCount);
    6716 
    6717  json.WriteString("Suballocations");
    6718  json.BeginArray();
    6719 }
    6720 
    6721 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6722  VkDeviceSize offset,
    6723  VmaAllocation hAllocation) const
    6724 {
    6725  json.BeginObject(true);
    6726 
    6727  json.WriteString("Offset");
    6728  json.WriteNumber(offset);
    6729 
    6730  hAllocation->PrintParameters(json);
    6731 
    6732  json.EndObject();
    6733 }
    6734 
    6735 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6736  VkDeviceSize offset,
    6737  VkDeviceSize size) const
    6738 {
    6739  json.BeginObject(true);
    6740 
    6741  json.WriteString("Offset");
    6742  json.WriteNumber(offset);
    6743 
    6744  json.WriteString("Type");
    6745  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6746 
    6747  json.WriteString("Size");
    6748  json.WriteNumber(size);
    6749 
    6750  json.EndObject();
    6751 }
    6752 
    6753 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6754 {
    6755  json.EndArray();
    6756  json.EndObject();
    6757 }
    6758 
    6759 #endif // #if VMA_STATS_STRING_ENABLED
    6760 
    6762 // class VmaBlockMetadata_Generic
    6763 
    6764 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6765  VmaBlockMetadata(hAllocator),
    6766  m_FreeCount(0),
    6767  m_SumFreeSize(0),
    6768  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6769  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6770 {
    6771 }
    6772 
    6773 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6774 {
    6775 }
    6776 
    6777 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6778 {
    6779  VmaBlockMetadata::Init(size);
    6780 
    6781  m_FreeCount = 1;
    6782  m_SumFreeSize = size;
    6783 
    6784  VmaSuballocation suballoc = {};
    6785  suballoc.offset = 0;
    6786  suballoc.size = size;
    6787  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6788  suballoc.hAllocation = VK_NULL_HANDLE;
    6789 
    6790  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6791  m_Suballocations.push_back(suballoc);
    6792  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6793  --suballocItem;
    6794  m_FreeSuballocationsBySize.push_back(suballocItem);
    6795 }
    6796 
    6797 bool VmaBlockMetadata_Generic::Validate() const
    6798 {
    6799  VMA_VALIDATE(!m_Suballocations.empty());
    6800 
    6801  // Expected offset of new suballocation as calculated from previous ones.
    6802  VkDeviceSize calculatedOffset = 0;
    6803  // Expected number of free suballocations as calculated from traversing their list.
    6804  uint32_t calculatedFreeCount = 0;
    6805  // Expected sum size of free suballocations as calculated from traversing their list.
    6806  VkDeviceSize calculatedSumFreeSize = 0;
    6807  // Expected number of free suballocations that should be registered in
    6808  // m_FreeSuballocationsBySize calculated from traversing their list.
    6809  size_t freeSuballocationsToRegister = 0;
    6810  // True if previous visited suballocation was free.
    6811  bool prevFree = false;
    6812 
    6813  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6814  suballocItem != m_Suballocations.cend();
    6815  ++suballocItem)
    6816  {
    6817  const VmaSuballocation& subAlloc = *suballocItem;
    6818 
    6819  // Actual offset of this suballocation doesn't match expected one.
    6820  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6821 
    6822  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6823  // Two adjacent free suballocations are invalid. They should be merged.
    6824  VMA_VALIDATE(!prevFree || !currFree);
    6825 
    6826  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6827 
    6828  if(currFree)
    6829  {
    6830  calculatedSumFreeSize += subAlloc.size;
    6831  ++calculatedFreeCount;
    6832  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6833  {
    6834  ++freeSuballocationsToRegister;
    6835  }
    6836 
    6837  // Margin required between allocations - every free space must be at least that large.
    6838  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6839  }
    6840  else
    6841  {
    6842  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6843  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6844 
    6845  // Margin required between allocations - previous allocation must be free.
    6846  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6847  }
    6848 
    6849  calculatedOffset += subAlloc.size;
    6850  prevFree = currFree;
    6851  }
    6852 
    6853  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6854  // match expected one.
    6855  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6856 
    6857  VkDeviceSize lastSize = 0;
    6858  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6859  {
    6860  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6861 
    6862  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6863  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6864  // They must be sorted by size ascending.
    6865  VMA_VALIDATE(suballocItem->size >= lastSize);
    6866 
    6867  lastSize = suballocItem->size;
    6868  }
    6869 
    6870  // Check if totals match calculacted values.
    6871  VMA_VALIDATE(ValidateFreeSuballocationList());
    6872  VMA_VALIDATE(calculatedOffset == GetSize());
    6873  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6874  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6875 
    6876  return true;
    6877 }
    6878 
    6879 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6880 {
    6881  if(!m_FreeSuballocationsBySize.empty())
    6882  {
    6883  return m_FreeSuballocationsBySize.back()->size;
    6884  }
    6885  else
    6886  {
    6887  return 0;
    6888  }
    6889 }
    6890 
    6891 bool VmaBlockMetadata_Generic::IsEmpty() const
    6892 {
    6893  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6894 }
    6895 
    6896 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6897 {
    6898  outInfo.blockCount = 1;
    6899 
    6900  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6901  outInfo.allocationCount = rangeCount - m_FreeCount;
    6902  outInfo.unusedRangeCount = m_FreeCount;
    6903 
    6904  outInfo.unusedBytes = m_SumFreeSize;
    6905  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6906 
    6907  outInfo.allocationSizeMin = UINT64_MAX;
    6908  outInfo.allocationSizeMax = 0;
    6909  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6910  outInfo.unusedRangeSizeMax = 0;
    6911 
    6912  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6913  suballocItem != m_Suballocations.cend();
    6914  ++suballocItem)
    6915  {
    6916  const VmaSuballocation& suballoc = *suballocItem;
    6917  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6918  {
    6919  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6920  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6921  }
    6922  else
    6923  {
    6924  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6925  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6926  }
    6927  }
    6928 }
    6929 
    6930 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6931 {
    6932  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6933 
    6934  inoutStats.size += GetSize();
    6935  inoutStats.unusedSize += m_SumFreeSize;
    6936  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6937  inoutStats.unusedRangeCount += m_FreeCount;
    6938  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6939 }
    6940 
    6941 #if VMA_STATS_STRING_ENABLED
    6942 
    6943 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6944 {
    6945  PrintDetailedMap_Begin(json,
    6946  m_SumFreeSize, // unusedBytes
    6947  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6948  m_FreeCount); // unusedRangeCount
    6949 
    6950  size_t i = 0;
    6951  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6952  suballocItem != m_Suballocations.cend();
    6953  ++suballocItem, ++i)
    6954  {
    6955  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6956  {
    6957  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6958  }
    6959  else
    6960  {
    6961  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6962  }
    6963  }
    6964 
    6965  PrintDetailedMap_End(json);
    6966 }
    6967 
    6968 #endif // #if VMA_STATS_STRING_ENABLED
    6969 
    6970 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6971  uint32_t currentFrameIndex,
    6972  uint32_t frameInUseCount,
    6973  VkDeviceSize bufferImageGranularity,
    6974  VkDeviceSize allocSize,
    6975  VkDeviceSize allocAlignment,
    6976  bool upperAddress,
    6977  VmaSuballocationType allocType,
    6978  bool canMakeOtherLost,
    6979  uint32_t strategy,
    6980  VmaAllocationRequest* pAllocationRequest)
    6981 {
    6982  VMA_ASSERT(allocSize > 0);
    6983  VMA_ASSERT(!upperAddress);
    6984  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6985  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6986  VMA_HEAVY_ASSERT(Validate());
    6987 
    6988  // There is not enough total free space in this block to fullfill the request: Early return.
    6989  if(canMakeOtherLost == false &&
    6990  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6991  {
    6992  return false;
    6993  }
    6994 
    6995  // New algorithm, efficiently searching freeSuballocationsBySize.
    6996  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6997  if(freeSuballocCount > 0)
    6998  {
    7000  {
    7001  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7002  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7003  m_FreeSuballocationsBySize.data(),
    7004  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7005  allocSize + 2 * VMA_DEBUG_MARGIN,
    7006  VmaSuballocationItemSizeLess());
    7007  size_t index = it - m_FreeSuballocationsBySize.data();
    7008  for(; index < freeSuballocCount; ++index)
    7009  {
    7010  if(CheckAllocation(
    7011  currentFrameIndex,
    7012  frameInUseCount,
    7013  bufferImageGranularity,
    7014  allocSize,
    7015  allocAlignment,
    7016  allocType,
    7017  m_FreeSuballocationsBySize[index],
    7018  false, // canMakeOtherLost
    7019  &pAllocationRequest->offset,
    7020  &pAllocationRequest->itemsToMakeLostCount,
    7021  &pAllocationRequest->sumFreeSize,
    7022  &pAllocationRequest->sumItemSize))
    7023  {
    7024  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7025  return true;
    7026  }
    7027  }
    7028  }
    7029  else // WORST_FIT, FIRST_FIT
    7030  {
    7031  // Search staring from biggest suballocations.
    7032  for(size_t index = freeSuballocCount; index--; )
    7033  {
    7034  if(CheckAllocation(
    7035  currentFrameIndex,
    7036  frameInUseCount,
    7037  bufferImageGranularity,
    7038  allocSize,
    7039  allocAlignment,
    7040  allocType,
    7041  m_FreeSuballocationsBySize[index],
    7042  false, // canMakeOtherLost
    7043  &pAllocationRequest->offset,
    7044  &pAllocationRequest->itemsToMakeLostCount,
    7045  &pAllocationRequest->sumFreeSize,
    7046  &pAllocationRequest->sumItemSize))
    7047  {
    7048  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7049  return true;
    7050  }
    7051  }
    7052  }
    7053  }
    7054 
    7055  if(canMakeOtherLost)
    7056  {
    7057  // Brute-force algorithm. TODO: Come up with something better.
    7058 
    7059  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7060  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7061 
    7062  VmaAllocationRequest tmpAllocRequest = {};
    7063  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7064  suballocIt != m_Suballocations.end();
    7065  ++suballocIt)
    7066  {
    7067  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7068  suballocIt->hAllocation->CanBecomeLost())
    7069  {
    7070  if(CheckAllocation(
    7071  currentFrameIndex,
    7072  frameInUseCount,
    7073  bufferImageGranularity,
    7074  allocSize,
    7075  allocAlignment,
    7076  allocType,
    7077  suballocIt,
    7078  canMakeOtherLost,
    7079  &tmpAllocRequest.offset,
    7080  &tmpAllocRequest.itemsToMakeLostCount,
    7081  &tmpAllocRequest.sumFreeSize,
    7082  &tmpAllocRequest.sumItemSize))
    7083  {
    7084  tmpAllocRequest.item = suballocIt;
    7085 
    7086  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7088  {
    7089  *pAllocationRequest = tmpAllocRequest;
    7090  }
    7091  }
    7092  }
    7093  }
    7094 
    7095  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7096  {
    7097  return true;
    7098  }
    7099  }
    7100 
    7101  return false;
    7102 }
    7103 
    7104 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7105  uint32_t currentFrameIndex,
    7106  uint32_t frameInUseCount,
    7107  VmaAllocationRequest* pAllocationRequest)
    7108 {
    7109  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7110  {
    7111  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7112  {
    7113  ++pAllocationRequest->item;
    7114  }
    7115  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7116  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7117  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7118  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7119  {
    7120  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7121  --pAllocationRequest->itemsToMakeLostCount;
    7122  }
    7123  else
    7124  {
    7125  return false;
    7126  }
    7127  }
    7128 
    7129  VMA_HEAVY_ASSERT(Validate());
    7130  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7131  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7132 
    7133  return true;
    7134 }
    7135 
    7136 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7137 {
    7138  uint32_t lostAllocationCount = 0;
    7139  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7140  it != m_Suballocations.end();
    7141  ++it)
    7142  {
    7143  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7144  it->hAllocation->CanBecomeLost() &&
    7145  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7146  {
    7147  it = FreeSuballocation(it);
    7148  ++lostAllocationCount;
    7149  }
    7150  }
    7151  return lostAllocationCount;
    7152 }
    7153 
    7154 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7155 {
    7156  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7157  it != m_Suballocations.end();
    7158  ++it)
    7159  {
    7160  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7161  {
    7162  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7163  {
    7164  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7165  return VK_ERROR_VALIDATION_FAILED_EXT;
    7166  }
    7167  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7168  {
    7169  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7170  return VK_ERROR_VALIDATION_FAILED_EXT;
    7171  }
    7172  }
    7173  }
    7174 
    7175  return VK_SUCCESS;
    7176 }
    7177 
    7178 void VmaBlockMetadata_Generic::Alloc(
    7179  const VmaAllocationRequest& request,
    7180  VmaSuballocationType type,
    7181  VkDeviceSize allocSize,
    7182  bool upperAddress,
    7183  VmaAllocation hAllocation)
    7184 {
    7185  VMA_ASSERT(!upperAddress);
    7186  VMA_ASSERT(request.item != m_Suballocations.end());
    7187  VmaSuballocation& suballoc = *request.item;
    7188  // Given suballocation is a free block.
    7189  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7190  // Given offset is inside this suballocation.
    7191  VMA_ASSERT(request.offset >= suballoc.offset);
    7192  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7193  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7194  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7195 
    7196  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7197  // it to become used.
    7198  UnregisterFreeSuballocation(request.item);
    7199 
    7200  suballoc.offset = request.offset;
    7201  suballoc.size = allocSize;
    7202  suballoc.type = type;
    7203  suballoc.hAllocation = hAllocation;
    7204 
    7205  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7206  if(paddingEnd)
    7207  {
    7208  VmaSuballocation paddingSuballoc = {};
    7209  paddingSuballoc.offset = request.offset + allocSize;
    7210  paddingSuballoc.size = paddingEnd;
    7211  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7212  VmaSuballocationList::iterator next = request.item;
    7213  ++next;
    7214  const VmaSuballocationList::iterator paddingEndItem =
    7215  m_Suballocations.insert(next, paddingSuballoc);
    7216  RegisterFreeSuballocation(paddingEndItem);
    7217  }
    7218 
    7219  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7220  if(paddingBegin)
    7221  {
    7222  VmaSuballocation paddingSuballoc = {};
    7223  paddingSuballoc.offset = request.offset - paddingBegin;
    7224  paddingSuballoc.size = paddingBegin;
    7225  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7226  const VmaSuballocationList::iterator paddingBeginItem =
    7227  m_Suballocations.insert(request.item, paddingSuballoc);
    7228  RegisterFreeSuballocation(paddingBeginItem);
    7229  }
    7230 
    7231  // Update totals.
    7232  m_FreeCount = m_FreeCount - 1;
    7233  if(paddingBegin > 0)
    7234  {
    7235  ++m_FreeCount;
    7236  }
    7237  if(paddingEnd > 0)
    7238  {
    7239  ++m_FreeCount;
    7240  }
    7241  m_SumFreeSize -= allocSize;
    7242 }
    7243 
    7244 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7245 {
    7246  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7247  suballocItem != m_Suballocations.end();
    7248  ++suballocItem)
    7249  {
    7250  VmaSuballocation& suballoc = *suballocItem;
    7251  if(suballoc.hAllocation == allocation)
    7252  {
    7253  FreeSuballocation(suballocItem);
    7254  VMA_HEAVY_ASSERT(Validate());
    7255  return;
    7256  }
    7257  }
    7258  VMA_ASSERT(0 && "Not found!");
    7259 }
    7260 
    7261 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7262 {
    7263  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7264  suballocItem != m_Suballocations.end();
    7265  ++suballocItem)
    7266  {
    7267  VmaSuballocation& suballoc = *suballocItem;
    7268  if(suballoc.offset == offset)
    7269  {
    7270  FreeSuballocation(suballocItem);
    7271  return;
    7272  }
    7273  }
    7274  VMA_ASSERT(0 && "Not found!");
    7275 }
    7276 
    7277 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7278 {
    7279  typedef VmaSuballocationList::iterator iter_type;
    7280  for(iter_type suballocItem = m_Suballocations.begin();
    7281  suballocItem != m_Suballocations.end();
    7282  ++suballocItem)
    7283  {
    7284  VmaSuballocation& suballoc = *suballocItem;
    7285  if(suballoc.hAllocation == alloc)
    7286  {
    7287  iter_type nextItem = suballocItem;
    7288  ++nextItem;
    7289 
    7290  // Should have been ensured on higher level.
    7291  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7292 
    7293  // Shrinking.
    7294  if(newSize < alloc->GetSize())
    7295  {
    7296  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7297 
    7298  // There is next item.
    7299  if(nextItem != m_Suballocations.end())
    7300  {
    7301  // Next item is free.
    7302  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7303  {
    7304  // Grow this next item backward.
    7305  UnregisterFreeSuballocation(nextItem);
    7306  nextItem->offset -= sizeDiff;
    7307  nextItem->size += sizeDiff;
    7308  RegisterFreeSuballocation(nextItem);
    7309  }
    7310  // Next item is not free.
    7311  else
    7312  {
    7313  // Create free item after current one.
    7314  VmaSuballocation newFreeSuballoc;
    7315  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7316  newFreeSuballoc.offset = suballoc.offset + newSize;
    7317  newFreeSuballoc.size = sizeDiff;
    7318  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7319  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7320  RegisterFreeSuballocation(newFreeSuballocIt);
    7321 
    7322  ++m_FreeCount;
    7323  }
    7324  }
    7325  // This is the last item.
    7326  else
    7327  {
    7328  // Create free item at the end.
    7329  VmaSuballocation newFreeSuballoc;
    7330  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7331  newFreeSuballoc.offset = suballoc.offset + newSize;
    7332  newFreeSuballoc.size = sizeDiff;
    7333  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7334  m_Suballocations.push_back(newFreeSuballoc);
    7335 
    7336  iter_type newFreeSuballocIt = m_Suballocations.end();
    7337  RegisterFreeSuballocation(--newFreeSuballocIt);
    7338 
    7339  ++m_FreeCount;
    7340  }
    7341 
    7342  suballoc.size = newSize;
    7343  m_SumFreeSize += sizeDiff;
    7344  }
    7345  // Growing.
    7346  else
    7347  {
    7348  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7349 
    7350  // There is next item.
    7351  if(nextItem != m_Suballocations.end())
    7352  {
    7353  // Next item is free.
    7354  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7355  {
    7356  // There is not enough free space, including margin.
    7357  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7358  {
    7359  return false;
    7360  }
    7361 
    7362  // There is more free space than required.
    7363  if(nextItem->size > sizeDiff)
    7364  {
    7365  // Move and shrink this next item.
    7366  UnregisterFreeSuballocation(nextItem);
    7367  nextItem->offset += sizeDiff;
    7368  nextItem->size -= sizeDiff;
    7369  RegisterFreeSuballocation(nextItem);
    7370  }
    7371  // There is exactly the amount of free space required.
    7372  else
    7373  {
    7374  // Remove this next free item.
    7375  UnregisterFreeSuballocation(nextItem);
    7376  m_Suballocations.erase(nextItem);
    7377  --m_FreeCount;
    7378  }
    7379  }
    7380  // Next item is not free - there is no space to grow.
    7381  else
    7382  {
    7383  return false;
    7384  }
    7385  }
    7386  // This is the last item - there is no space to grow.
    7387  else
    7388  {
    7389  return false;
    7390  }
    7391 
    7392  suballoc.size = newSize;
    7393  m_SumFreeSize -= sizeDiff;
    7394  }
    7395 
    7396  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7397  return true;
    7398  }
    7399  }
    7400  VMA_ASSERT(0 && "Not found!");
    7401  return false;
    7402 }
    7403 
    7404 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7405 {
    7406  VkDeviceSize lastSize = 0;
    7407  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7408  {
    7409  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7410 
    7411  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7412  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7413  VMA_VALIDATE(it->size >= lastSize);
    7414  lastSize = it->size;
    7415  }
    7416  return true;
    7417 }
    7418 
    7419 bool VmaBlockMetadata_Generic::CheckAllocation(
    7420  uint32_t currentFrameIndex,
    7421  uint32_t frameInUseCount,
    7422  VkDeviceSize bufferImageGranularity,
    7423  VkDeviceSize allocSize,
    7424  VkDeviceSize allocAlignment,
    7425  VmaSuballocationType allocType,
    7426  VmaSuballocationList::const_iterator suballocItem,
    7427  bool canMakeOtherLost,
    7428  VkDeviceSize* pOffset,
    7429  size_t* itemsToMakeLostCount,
    7430  VkDeviceSize* pSumFreeSize,
    7431  VkDeviceSize* pSumItemSize) const
    7432 {
    7433  VMA_ASSERT(allocSize > 0);
    7434  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7435  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7436  VMA_ASSERT(pOffset != VMA_NULL);
    7437 
    7438  *itemsToMakeLostCount = 0;
    7439  *pSumFreeSize = 0;
    7440  *pSumItemSize = 0;
    7441 
    7442  if(canMakeOtherLost)
    7443  {
    7444  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7445  {
    7446  *pSumFreeSize = suballocItem->size;
    7447  }
    7448  else
    7449  {
    7450  if(suballocItem->hAllocation->CanBecomeLost() &&
    7451  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7452  {
    7453  ++*itemsToMakeLostCount;
    7454  *pSumItemSize = suballocItem->size;
    7455  }
    7456  else
    7457  {
    7458  return false;
    7459  }
    7460  }
    7461 
    7462  // Remaining size is too small for this request: Early return.
    7463  if(GetSize() - suballocItem->offset < allocSize)
    7464  {
    7465  return false;
    7466  }
    7467 
    7468  // Start from offset equal to beginning of this suballocation.
    7469  *pOffset = suballocItem->offset;
    7470 
    7471  // Apply VMA_DEBUG_MARGIN at the beginning.
    7472  if(VMA_DEBUG_MARGIN > 0)
    7473  {
    7474  *pOffset += VMA_DEBUG_MARGIN;
    7475  }
    7476 
    7477  // Apply alignment.
    7478  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7479 
    7480  // Check previous suballocations for BufferImageGranularity conflicts.
    7481  // Make bigger alignment if necessary.
    7482  if(bufferImageGranularity > 1)
    7483  {
    7484  bool bufferImageGranularityConflict = false;
    7485  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7486  while(prevSuballocItem != m_Suballocations.cbegin())
    7487  {
    7488  --prevSuballocItem;
    7489  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7490  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7491  {
    7492  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7493  {
    7494  bufferImageGranularityConflict = true;
    7495  break;
    7496  }
    7497  }
    7498  else
    7499  // Already on previous page.
    7500  break;
    7501  }
    7502  if(bufferImageGranularityConflict)
    7503  {
    7504  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7505  }
    7506  }
    7507 
    7508  // Now that we have final *pOffset, check if we are past suballocItem.
    7509  // If yes, return false - this function should be called for another suballocItem as starting point.
    7510  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7511  {
    7512  return false;
    7513  }
    7514 
    7515  // Calculate padding at the beginning based on current offset.
    7516  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7517 
    7518  // Calculate required margin at the end.
    7519  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7520 
    7521  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7522  // Another early return check.
    7523  if(suballocItem->offset + totalSize > GetSize())
    7524  {
    7525  return false;
    7526  }
    7527 
    7528  // Advance lastSuballocItem until desired size is reached.
    7529  // Update itemsToMakeLostCount.
    7530  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7531  if(totalSize > suballocItem->size)
    7532  {
    7533  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7534  while(remainingSize > 0)
    7535  {
    7536  ++lastSuballocItem;
    7537  if(lastSuballocItem == m_Suballocations.cend())
    7538  {
    7539  return false;
    7540  }
    7541  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7542  {
    7543  *pSumFreeSize += lastSuballocItem->size;
    7544  }
    7545  else
    7546  {
    7547  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7548  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7549  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7550  {
    7551  ++*itemsToMakeLostCount;
    7552  *pSumItemSize += lastSuballocItem->size;
    7553  }
    7554  else
    7555  {
    7556  return false;
    7557  }
    7558  }
    7559  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7560  remainingSize - lastSuballocItem->size : 0;
    7561  }
    7562  }
    7563 
    7564  // Check next suballocations for BufferImageGranularity conflicts.
    7565  // If conflict exists, we must mark more allocations lost or fail.
    7566  if(bufferImageGranularity > 1)
    7567  {
    7568  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7569  ++nextSuballocItem;
    7570  while(nextSuballocItem != m_Suballocations.cend())
    7571  {
    7572  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7573  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7574  {
    7575  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7576  {
    7577  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7578  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7579  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7580  {
    7581  ++*itemsToMakeLostCount;
    7582  }
    7583  else
    7584  {
    7585  return false;
    7586  }
    7587  }
    7588  }
    7589  else
    7590  {
    7591  // Already on next page.
    7592  break;
    7593  }
    7594  ++nextSuballocItem;
    7595  }
    7596  }
    7597  }
    7598  else
    7599  {
    7600  const VmaSuballocation& suballoc = *suballocItem;
    7601  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7602 
    7603  *pSumFreeSize = suballoc.size;
    7604 
    7605  // Size of this suballocation is too small for this request: Early return.
    7606  if(suballoc.size < allocSize)
    7607  {
    7608  return false;
    7609  }
    7610 
    7611  // Start from offset equal to beginning of this suballocation.
    7612  *pOffset = suballoc.offset;
    7613 
    7614  // Apply VMA_DEBUG_MARGIN at the beginning.
    7615  if(VMA_DEBUG_MARGIN > 0)
    7616  {
    7617  *pOffset += VMA_DEBUG_MARGIN;
    7618  }
    7619 
    7620  // Apply alignment.
    7621  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7622 
    7623  // Check previous suballocations for BufferImageGranularity conflicts.
    7624  // Make bigger alignment if necessary.
    7625  if(bufferImageGranularity > 1)
    7626  {
    7627  bool bufferImageGranularityConflict = false;
    7628  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7629  while(prevSuballocItem != m_Suballocations.cbegin())
    7630  {
    7631  --prevSuballocItem;
    7632  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7633  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7634  {
    7635  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7636  {
    7637  bufferImageGranularityConflict = true;
    7638  break;
    7639  }
    7640  }
    7641  else
    7642  // Already on previous page.
    7643  break;
    7644  }
    7645  if(bufferImageGranularityConflict)
    7646  {
    7647  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7648  }
    7649  }
    7650 
    7651  // Calculate padding at the beginning based on current offset.
    7652  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7653 
    7654  // Calculate required margin at the end.
    7655  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7656 
    7657  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7658  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7659  {
    7660  return false;
    7661  }
    7662 
    7663  // Check next suballocations for BufferImageGranularity conflicts.
    7664  // If conflict exists, allocation cannot be made here.
    7665  if(bufferImageGranularity > 1)
    7666  {
    7667  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7668  ++nextSuballocItem;
    7669  while(nextSuballocItem != m_Suballocations.cend())
    7670  {
    7671  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7672  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7673  {
    7674  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7675  {
    7676  return false;
    7677  }
    7678  }
    7679  else
    7680  {
    7681  // Already on next page.
    7682  break;
    7683  }
    7684  ++nextSuballocItem;
    7685  }
    7686  }
    7687  }
    7688 
    7689  // All tests passed: Success. pOffset is already filled.
    7690  return true;
    7691 }
    7692 
    7693 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7694 {
    7695  VMA_ASSERT(item != m_Suballocations.end());
    7696  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7697 
    7698  VmaSuballocationList::iterator nextItem = item;
    7699  ++nextItem;
    7700  VMA_ASSERT(nextItem != m_Suballocations.end());
    7701  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7702 
    7703  item->size += nextItem->size;
    7704  --m_FreeCount;
    7705  m_Suballocations.erase(nextItem);
    7706 }
    7707 
    7708 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7709 {
    7710  // Change this suballocation to be marked as free.
    7711  VmaSuballocation& suballoc = *suballocItem;
    7712  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7713  suballoc.hAllocation = VK_NULL_HANDLE;
    7714 
    7715  // Update totals.
    7716  ++m_FreeCount;
    7717  m_SumFreeSize += suballoc.size;
    7718 
    7719  // Merge with previous and/or next suballocation if it's also free.
    7720  bool mergeWithNext = false;
    7721  bool mergeWithPrev = false;
    7722 
    7723  VmaSuballocationList::iterator nextItem = suballocItem;
    7724  ++nextItem;
    7725  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7726  {
    7727  mergeWithNext = true;
    7728  }
    7729 
    7730  VmaSuballocationList::iterator prevItem = suballocItem;
    7731  if(suballocItem != m_Suballocations.begin())
    7732  {
    7733  --prevItem;
    7734  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7735  {
    7736  mergeWithPrev = true;
    7737  }
    7738  }
    7739 
    7740  if(mergeWithNext)
    7741  {
    7742  UnregisterFreeSuballocation(nextItem);
    7743  MergeFreeWithNext(suballocItem);
    7744  }
    7745 
    7746  if(mergeWithPrev)
    7747  {
    7748  UnregisterFreeSuballocation(prevItem);
    7749  MergeFreeWithNext(prevItem);
    7750  RegisterFreeSuballocation(prevItem);
    7751  return prevItem;
    7752  }
    7753  else
    7754  {
    7755  RegisterFreeSuballocation(suballocItem);
    7756  return suballocItem;
    7757  }
    7758 }
    7759 
    7760 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7761 {
    7762  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7763  VMA_ASSERT(item->size > 0);
    7764 
    7765  // You may want to enable this validation at the beginning or at the end of
    7766  // this function, depending on what do you want to check.
    7767  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7768 
    7769  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7770  {
    7771  if(m_FreeSuballocationsBySize.empty())
    7772  {
    7773  m_FreeSuballocationsBySize.push_back(item);
    7774  }
    7775  else
    7776  {
    7777  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7778  }
    7779  }
    7780 
    7781  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7782 }
    7783 
    7784 
    7785 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7786 {
    7787  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7788  VMA_ASSERT(item->size > 0);
    7789 
    7790  // You may want to enable this validation at the beginning or at the end of
    7791  // this function, depending on what do you want to check.
    7792  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7793 
    7794  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7795  {
    7796  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7797  m_FreeSuballocationsBySize.data(),
    7798  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7799  item,
    7800  VmaSuballocationItemSizeLess());
    7801  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7802  index < m_FreeSuballocationsBySize.size();
    7803  ++index)
    7804  {
    7805  if(m_FreeSuballocationsBySize[index] == item)
    7806  {
    7807  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7808  return;
    7809  }
    7810  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7811  }
    7812  VMA_ASSERT(0 && "Not found.");
    7813  }
    7814 
    7815  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7816 }
    7817 
    7819 // class VmaBlockMetadata_Linear
    7820 
    7821 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7822  VmaBlockMetadata(hAllocator),
    7823  m_SumFreeSize(0),
    7824  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7825  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7826  m_1stVectorIndex(0),
    7827  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7828  m_1stNullItemsBeginCount(0),
    7829  m_1stNullItemsMiddleCount(0),
    7830  m_2ndNullItemsCount(0)
    7831 {
    7832 }
    7833 
    7834 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7835 {
    7836 }
    7837 
    7838 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7839 {
    7840  VmaBlockMetadata::Init(size);
    7841  m_SumFreeSize = size;
    7842 }
    7843 
    7844 bool VmaBlockMetadata_Linear::Validate() const
    7845 {
    7846  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7847  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7848 
    7849  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7850  VMA_VALIDATE(!suballocations1st.empty() ||
    7851  suballocations2nd.empty() ||
    7852  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7853 
    7854  if(!suballocations1st.empty())
    7855  {
    7856  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7857  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7858  // Null item at the end should be just pop_back().
    7859  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7860  }
    7861  if(!suballocations2nd.empty())
    7862  {
    7863  // Null item at the end should be just pop_back().
    7864  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7865  }
    7866 
    7867  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7868  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7869 
    7870  VkDeviceSize sumUsedSize = 0;
    7871  const size_t suballoc1stCount = suballocations1st.size();
    7872  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7873 
    7874  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7875  {
    7876  const size_t suballoc2ndCount = suballocations2nd.size();
    7877  size_t nullItem2ndCount = 0;
    7878  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7879  {
    7880  const VmaSuballocation& suballoc = suballocations2nd[i];
    7881  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7882 
    7883  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7884  VMA_VALIDATE(suballoc.offset >= offset);
    7885 
    7886  if(!currFree)
    7887  {
    7888  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7889  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7890  sumUsedSize += suballoc.size;
    7891  }
    7892  else
    7893  {
    7894  ++nullItem2ndCount;
    7895  }
    7896 
    7897  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7898  }
    7899 
    7900  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7901  }
    7902 
    7903  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7904  {
    7905  const VmaSuballocation& suballoc = suballocations1st[i];
    7906  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7907  suballoc.hAllocation == VK_NULL_HANDLE);
    7908  }
    7909 
    7910  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7911 
    7912  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7913  {
    7914  const VmaSuballocation& suballoc = suballocations1st[i];
    7915  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7916 
    7917  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7918  VMA_VALIDATE(suballoc.offset >= offset);
    7919  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7920 
    7921  if(!currFree)
    7922  {
    7923  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7924  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7925  sumUsedSize += suballoc.size;
    7926  }
    7927  else
    7928  {
    7929  ++nullItem1stCount;
    7930  }
    7931 
    7932  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7933  }
    7934  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7935 
    7936  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7937  {
    7938  const size_t suballoc2ndCount = suballocations2nd.size();
    7939  size_t nullItem2ndCount = 0;
    7940  for(size_t i = suballoc2ndCount; i--; )
    7941  {
    7942  const VmaSuballocation& suballoc = suballocations2nd[i];
    7943  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7944 
    7945  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7946  VMA_VALIDATE(suballoc.offset >= offset);
    7947 
    7948  if(!currFree)
    7949  {
    7950  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7951  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7952  sumUsedSize += suballoc.size;
    7953  }
    7954  else
    7955  {
    7956  ++nullItem2ndCount;
    7957  }
    7958 
    7959  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7960  }
    7961 
    7962  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7963  }
    7964 
    7965  VMA_VALIDATE(offset <= GetSize());
    7966  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7967 
    7968  return true;
    7969 }
    7970 
    7971 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7972 {
    7973  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7974  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7975 }
    7976 
    7977 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7978 {
    7979  const VkDeviceSize size = GetSize();
    7980 
    7981  /*
    7982  We don't consider gaps inside allocation vectors with freed allocations because
    7983  they are not suitable for reuse in linear allocator. We consider only space that
    7984  is available for new allocations.
    7985  */
    7986  if(IsEmpty())
    7987  {
    7988  return size;
    7989  }
    7990 
    7991  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7992 
    7993  switch(m_2ndVectorMode)
    7994  {
    7995  case SECOND_VECTOR_EMPTY:
    7996  /*
    7997  Available space is after end of 1st, as well as before beginning of 1st (which
    7998  whould make it a ring buffer).
    7999  */
    8000  {
    8001  const size_t suballocations1stCount = suballocations1st.size();
    8002  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8003  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8004  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8005  return VMA_MAX(
    8006  firstSuballoc.offset,
    8007  size - (lastSuballoc.offset + lastSuballoc.size));
    8008  }
    8009  break;
    8010 
    8011  case SECOND_VECTOR_RING_BUFFER:
    8012  /*
    8013  Available space is only between end of 2nd and beginning of 1st.
    8014  */
    8015  {
    8016  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8017  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8018  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8019  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8020  }
    8021  break;
    8022 
    8023  case SECOND_VECTOR_DOUBLE_STACK:
    8024  /*
    8025  Available space is only between end of 1st and top of 2nd.
    8026  */
    8027  {
    8028  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8029  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8030  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8031  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8032  }
    8033  break;
    8034 
    8035  default:
    8036  VMA_ASSERT(0);
    8037  return 0;
    8038  }
    8039 }
    8040 
    8041 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8042 {
    8043  const VkDeviceSize size = GetSize();
    8044  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8045  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8046  const size_t suballoc1stCount = suballocations1st.size();
    8047  const size_t suballoc2ndCount = suballocations2nd.size();
    8048 
    8049  outInfo.blockCount = 1;
    8050  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8051  outInfo.unusedRangeCount = 0;
    8052  outInfo.usedBytes = 0;
    8053  outInfo.allocationSizeMin = UINT64_MAX;
    8054  outInfo.allocationSizeMax = 0;
    8055  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8056  outInfo.unusedRangeSizeMax = 0;
    8057 
    8058  VkDeviceSize lastOffset = 0;
    8059 
    8060  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8061  {
    8062  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8063  size_t nextAlloc2ndIndex = 0;
    8064  while(lastOffset < freeSpace2ndTo1stEnd)
    8065  {
    8066  // Find next non-null allocation or move nextAllocIndex to the end.
    8067  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8068  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8069  {
    8070  ++nextAlloc2ndIndex;
    8071  }
    8072 
    8073  // Found non-null allocation.
    8074  if(nextAlloc2ndIndex < suballoc2ndCount)
    8075  {
    8076  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8077 
    8078  // 1. Process free space before this allocation.
    8079  if(lastOffset < suballoc.offset)
    8080  {
    8081  // There is free space from lastOffset to suballoc.offset.
    8082  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8083  ++outInfo.unusedRangeCount;
    8084  outInfo.unusedBytes += unusedRangeSize;
    8085  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8086  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8087  }
    8088 
    8089  // 2. Process this allocation.
    8090  // There is allocation with suballoc.offset, suballoc.size.
    8091  outInfo.usedBytes += suballoc.size;
    8092  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8093  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8094 
    8095  // 3. Prepare for next iteration.
    8096  lastOffset = suballoc.offset + suballoc.size;
    8097  ++nextAlloc2ndIndex;
    8098  }
    8099  // We are at the end.
    8100  else
    8101  {
    8102  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8103  if(lastOffset < freeSpace2ndTo1stEnd)
    8104  {
    8105  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8106  ++outInfo.unusedRangeCount;
    8107  outInfo.unusedBytes += unusedRangeSize;
    8108  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8109  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8110  }
    8111 
    8112  // End of loop.
    8113  lastOffset = freeSpace2ndTo1stEnd;
    8114  }
    8115  }
    8116  }
    8117 
    8118  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8119  const VkDeviceSize freeSpace1stTo2ndEnd =
    8120  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8121  while(lastOffset < freeSpace1stTo2ndEnd)
    8122  {
    8123  // Find next non-null allocation or move nextAllocIndex to the end.
    8124  while(nextAlloc1stIndex < suballoc1stCount &&
    8125  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8126  {
    8127  ++nextAlloc1stIndex;
    8128  }
    8129 
    8130  // Found non-null allocation.
    8131  if(nextAlloc1stIndex < suballoc1stCount)
    8132  {
    8133  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8134 
    8135  // 1. Process free space before this allocation.
    8136  if(lastOffset < suballoc.offset)
    8137  {
    8138  // There is free space from lastOffset to suballoc.offset.
    8139  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8140  ++outInfo.unusedRangeCount;
    8141  outInfo.unusedBytes += unusedRangeSize;
    8142  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8143  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8144  }
    8145 
    8146  // 2. Process this allocation.
    8147  // There is allocation with suballoc.offset, suballoc.size.
    8148  outInfo.usedBytes += suballoc.size;
    8149  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8150  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8151 
    8152  // 3. Prepare for next iteration.
    8153  lastOffset = suballoc.offset + suballoc.size;
    8154  ++nextAlloc1stIndex;
    8155  }
    8156  // We are at the end.
    8157  else
    8158  {
    8159  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8160  if(lastOffset < freeSpace1stTo2ndEnd)
    8161  {
    8162  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8163  ++outInfo.unusedRangeCount;
    8164  outInfo.unusedBytes += unusedRangeSize;
    8165  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8166  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8167  }
    8168 
    8169  // End of loop.
    8170  lastOffset = freeSpace1stTo2ndEnd;
    8171  }
    8172  }
    8173 
    8174  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8175  {
    8176  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8177  while(lastOffset < size)
    8178  {
    8179  // Find next non-null allocation or move nextAllocIndex to the end.
    8180  while(nextAlloc2ndIndex != SIZE_MAX &&
    8181  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8182  {
    8183  --nextAlloc2ndIndex;
    8184  }
    8185 
    8186  // Found non-null allocation.
    8187  if(nextAlloc2ndIndex != SIZE_MAX)
    8188  {
    8189  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8190 
    8191  // 1. Process free space before this allocation.
    8192  if(lastOffset < suballoc.offset)
    8193  {
    8194  // There is free space from lastOffset to suballoc.offset.
    8195  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8196  ++outInfo.unusedRangeCount;
    8197  outInfo.unusedBytes += unusedRangeSize;
    8198  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8199  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8200  }
    8201 
    8202  // 2. Process this allocation.
    8203  // There is allocation with suballoc.offset, suballoc.size.
    8204  outInfo.usedBytes += suballoc.size;
    8205  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8206  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8207 
    8208  // 3. Prepare for next iteration.
    8209  lastOffset = suballoc.offset + suballoc.size;
    8210  --nextAlloc2ndIndex;
    8211  }
    8212  // We are at the end.
    8213  else
    8214  {
    8215  // There is free space from lastOffset to size.
    8216  if(lastOffset < size)
    8217  {
    8218  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8219  ++outInfo.unusedRangeCount;
    8220  outInfo.unusedBytes += unusedRangeSize;
    8221  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8222  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8223  }
    8224 
    8225  // End of loop.
    8226  lastOffset = size;
    8227  }
    8228  }
    8229  }
    8230 
    8231  outInfo.unusedBytes = size - outInfo.usedBytes;
    8232 }
    8233 
    8234 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8235 {
    8236  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8237  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8238  const VkDeviceSize size = GetSize();
    8239  const size_t suballoc1stCount = suballocations1st.size();
    8240  const size_t suballoc2ndCount = suballocations2nd.size();
    8241 
    8242  inoutStats.size += size;
    8243 
    8244  VkDeviceSize lastOffset = 0;
    8245 
    8246  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8247  {
    8248  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8249  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8250  while(lastOffset < freeSpace2ndTo1stEnd)
    8251  {
    8252  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8253  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8254  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8255  {
    8256  ++nextAlloc2ndIndex;
    8257  }
    8258 
    8259  // Found non-null allocation.
    8260  if(nextAlloc2ndIndex < suballoc2ndCount)
    8261  {
    8262  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8263 
    8264  // 1. Process free space before this allocation.
    8265  if(lastOffset < suballoc.offset)
    8266  {
    8267  // There is free space from lastOffset to suballoc.offset.
    8268  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8269  inoutStats.unusedSize += unusedRangeSize;
    8270  ++inoutStats.unusedRangeCount;
    8271  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8272  }
    8273 
    8274  // 2. Process this allocation.
    8275  // There is allocation with suballoc.offset, suballoc.size.
    8276  ++inoutStats.allocationCount;
    8277 
    8278  // 3. Prepare for next iteration.
    8279  lastOffset = suballoc.offset + suballoc.size;
    8280  ++nextAlloc2ndIndex;
    8281  }
    8282  // We are at the end.
    8283  else
    8284  {
    8285  if(lastOffset < freeSpace2ndTo1stEnd)
    8286  {
    8287  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8288  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8289  inoutStats.unusedSize += unusedRangeSize;
    8290  ++inoutStats.unusedRangeCount;
    8291  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8292  }
    8293 
    8294  // End of loop.
    8295  lastOffset = freeSpace2ndTo1stEnd;
    8296  }
    8297  }
    8298  }
    8299 
    8300  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8301  const VkDeviceSize freeSpace1stTo2ndEnd =
    8302  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8303  while(lastOffset < freeSpace1stTo2ndEnd)
    8304  {
    8305  // Find next non-null allocation or move nextAllocIndex to the end.
    8306  while(nextAlloc1stIndex < suballoc1stCount &&
    8307  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8308  {
    8309  ++nextAlloc1stIndex;
    8310  }
    8311 
    8312  // Found non-null allocation.
    8313  if(nextAlloc1stIndex < suballoc1stCount)
    8314  {
    8315  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8316 
    8317  // 1. Process free space before this allocation.
    8318  if(lastOffset < suballoc.offset)
    8319  {
    8320  // There is free space from lastOffset to suballoc.offset.
    8321  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8322  inoutStats.unusedSize += unusedRangeSize;
    8323  ++inoutStats.unusedRangeCount;
    8324  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8325  }
    8326 
    8327  // 2. Process this allocation.
    8328  // There is allocation with suballoc.offset, suballoc.size.
    8329  ++inoutStats.allocationCount;
    8330 
    8331  // 3. Prepare for next iteration.
    8332  lastOffset = suballoc.offset + suballoc.size;
    8333  ++nextAlloc1stIndex;
    8334  }
    8335  // We are at the end.
    8336  else
    8337  {
    8338  if(lastOffset < freeSpace1stTo2ndEnd)
    8339  {
    8340  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8341  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8342  inoutStats.unusedSize += unusedRangeSize;
    8343  ++inoutStats.unusedRangeCount;
    8344  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8345  }
    8346 
    8347  // End of loop.
    8348  lastOffset = freeSpace1stTo2ndEnd;
    8349  }
    8350  }
    8351 
    8352  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8353  {
    8354  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8355  while(lastOffset < size)
    8356  {
    8357  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8358  while(nextAlloc2ndIndex != SIZE_MAX &&
    8359  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8360  {
    8361  --nextAlloc2ndIndex;
    8362  }
    8363 
    8364  // Found non-null allocation.
    8365  if(nextAlloc2ndIndex != SIZE_MAX)
    8366  {
    8367  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8368 
    8369  // 1. Process free space before this allocation.
    8370  if(lastOffset < suballoc.offset)
    8371  {
    8372  // There is free space from lastOffset to suballoc.offset.
    8373  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8374  inoutStats.unusedSize += unusedRangeSize;
    8375  ++inoutStats.unusedRangeCount;
    8376  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8377  }
    8378 
    8379  // 2. Process this allocation.
    8380  // There is allocation with suballoc.offset, suballoc.size.
    8381  ++inoutStats.allocationCount;
    8382 
    8383  // 3. Prepare for next iteration.
    8384  lastOffset = suballoc.offset + suballoc.size;
    8385  --nextAlloc2ndIndex;
    8386  }
    8387  // We are at the end.
    8388  else
    8389  {
    8390  if(lastOffset < size)
    8391  {
    8392  // There is free space from lastOffset to size.
    8393  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8394  inoutStats.unusedSize += unusedRangeSize;
    8395  ++inoutStats.unusedRangeCount;
    8396  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8397  }
    8398 
    8399  // End of loop.
    8400  lastOffset = size;
    8401  }
    8402  }
    8403  }
    8404 }
    8405 
    8406 #if VMA_STATS_STRING_ENABLED
    8407 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8408 {
    8409  const VkDeviceSize size = GetSize();
    8410  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8411  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8412  const size_t suballoc1stCount = suballocations1st.size();
    8413  const size_t suballoc2ndCount = suballocations2nd.size();
    8414 
    8415  // FIRST PASS
    8416 
    8417  size_t unusedRangeCount = 0;
    8418  VkDeviceSize usedBytes = 0;
    8419 
    8420  VkDeviceSize lastOffset = 0;
    8421 
    8422  size_t alloc2ndCount = 0;
    8423  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8424  {
    8425  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8426  size_t nextAlloc2ndIndex = 0;
    8427  while(lastOffset < freeSpace2ndTo1stEnd)
    8428  {
    8429  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8430  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8431  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8432  {
    8433  ++nextAlloc2ndIndex;
    8434  }
    8435 
    8436  // Found non-null allocation.
    8437  if(nextAlloc2ndIndex < suballoc2ndCount)
    8438  {
    8439  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8440 
    8441  // 1. Process free space before this allocation.
    8442  if(lastOffset < suballoc.offset)
    8443  {
    8444  // There is free space from lastOffset to suballoc.offset.
    8445  ++unusedRangeCount;
    8446  }
    8447 
    8448  // 2. Process this allocation.
    8449  // There is allocation with suballoc.offset, suballoc.size.
    8450  ++alloc2ndCount;
    8451  usedBytes += suballoc.size;
    8452 
    8453  // 3. Prepare for next iteration.
    8454  lastOffset = suballoc.offset + suballoc.size;
    8455  ++nextAlloc2ndIndex;
    8456  }
    8457  // We are at the end.
    8458  else
    8459  {
    8460  if(lastOffset < freeSpace2ndTo1stEnd)
    8461  {
    8462  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8463  ++unusedRangeCount;
    8464  }
    8465 
    8466  // End of loop.
    8467  lastOffset = freeSpace2ndTo1stEnd;
    8468  }
    8469  }
    8470  }
    8471 
    8472  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8473  size_t alloc1stCount = 0;
    8474  const VkDeviceSize freeSpace1stTo2ndEnd =
    8475  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8476  while(lastOffset < freeSpace1stTo2ndEnd)
    8477  {
    8478  // Find next non-null allocation or move nextAllocIndex to the end.
    8479  while(nextAlloc1stIndex < suballoc1stCount &&
    8480  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8481  {
    8482  ++nextAlloc1stIndex;
    8483  }
    8484 
    8485  // Found non-null allocation.
    8486  if(nextAlloc1stIndex < suballoc1stCount)
    8487  {
    8488  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8489 
    8490  // 1. Process free space before this allocation.
    8491  if(lastOffset < suballoc.offset)
    8492  {
    8493  // There is free space from lastOffset to suballoc.offset.
    8494  ++unusedRangeCount;
    8495  }
    8496 
    8497  // 2. Process this allocation.
    8498  // There is allocation with suballoc.offset, suballoc.size.
    8499  ++alloc1stCount;
    8500  usedBytes += suballoc.size;
    8501 
    8502  // 3. Prepare for next iteration.
    8503  lastOffset = suballoc.offset + suballoc.size;
    8504  ++nextAlloc1stIndex;
    8505  }
    8506  // We are at the end.
    8507  else
    8508  {
    8509  if(lastOffset < size)
    8510  {
    8511  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8512  ++unusedRangeCount;
    8513  }
    8514 
    8515  // End of loop.
    8516  lastOffset = freeSpace1stTo2ndEnd;
    8517  }
    8518  }
    8519 
    8520  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8521  {
    8522  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8523  while(lastOffset < size)
    8524  {
    8525  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8526  while(nextAlloc2ndIndex != SIZE_MAX &&
    8527  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8528  {
    8529  --nextAlloc2ndIndex;
    8530  }
    8531 
    8532  // Found non-null allocation.
    8533  if(nextAlloc2ndIndex != SIZE_MAX)
    8534  {
    8535  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8536 
    8537  // 1. Process free space before this allocation.
    8538  if(lastOffset < suballoc.offset)
    8539  {
    8540  // There is free space from lastOffset to suballoc.offset.
    8541  ++unusedRangeCount;
    8542  }
    8543 
    8544  // 2. Process this allocation.
    8545  // There is allocation with suballoc.offset, suballoc.size.
    8546  ++alloc2ndCount;
    8547  usedBytes += suballoc.size;
    8548 
    8549  // 3. Prepare for next iteration.
    8550  lastOffset = suballoc.offset + suballoc.size;
    8551  --nextAlloc2ndIndex;
    8552  }
    8553  // We are at the end.
    8554  else
    8555  {
    8556  if(lastOffset < size)
    8557  {
    8558  // There is free space from lastOffset to size.
    8559  ++unusedRangeCount;
    8560  }
    8561 
    8562  // End of loop.
    8563  lastOffset = size;
    8564  }
    8565  }
    8566  }
    8567 
    8568  const VkDeviceSize unusedBytes = size - usedBytes;
    8569  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8570 
    8571  // SECOND PASS
    8572  lastOffset = 0;
    8573 
    8574  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8575  {
    8576  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8577  size_t nextAlloc2ndIndex = 0;
    8578  while(lastOffset < freeSpace2ndTo1stEnd)
    8579  {
    8580  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8581  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8582  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8583  {
    8584  ++nextAlloc2ndIndex;
    8585  }
    8586 
    8587  // Found non-null allocation.
    8588  if(nextAlloc2ndIndex < suballoc2ndCount)
    8589  {
    8590  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8591 
    8592  // 1. Process free space before this allocation.
    8593  if(lastOffset < suballoc.offset)
    8594  {
    8595  // There is free space from lastOffset to suballoc.offset.
    8596  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8597  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8598  }
    8599 
    8600  // 2. Process this allocation.
    8601  // There is allocation with suballoc.offset, suballoc.size.
    8602  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8603 
    8604  // 3. Prepare for next iteration.
    8605  lastOffset = suballoc.offset + suballoc.size;
    8606  ++nextAlloc2ndIndex;
    8607  }
    8608  // We are at the end.
    8609  else
    8610  {
    8611  if(lastOffset < freeSpace2ndTo1stEnd)
    8612  {
    8613  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8614  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8615  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8616  }
    8617 
    8618  // End of loop.
    8619  lastOffset = freeSpace2ndTo1stEnd;
    8620  }
    8621  }
    8622  }
    8623 
    8624  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8625  while(lastOffset < freeSpace1stTo2ndEnd)
    8626  {
    8627  // Find next non-null allocation or move nextAllocIndex to the end.
    8628  while(nextAlloc1stIndex < suballoc1stCount &&
    8629  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8630  {
    8631  ++nextAlloc1stIndex;
    8632  }
    8633 
    8634  // Found non-null allocation.
    8635  if(nextAlloc1stIndex < suballoc1stCount)
    8636  {
    8637  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8638 
    8639  // 1. Process free space before this allocation.
    8640  if(lastOffset < suballoc.offset)
    8641  {
    8642  // There is free space from lastOffset to suballoc.offset.
    8643  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8644  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8645  }
    8646 
    8647  // 2. Process this allocation.
    8648  // There is allocation with suballoc.offset, suballoc.size.
    8649  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8650 
    8651  // 3. Prepare for next iteration.
    8652  lastOffset = suballoc.offset + suballoc.size;
    8653  ++nextAlloc1stIndex;
    8654  }
    8655  // We are at the end.
    8656  else
    8657  {
    8658  if(lastOffset < freeSpace1stTo2ndEnd)
    8659  {
    8660  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8661  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8662  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8663  }
    8664 
    8665  // End of loop.
    8666  lastOffset = freeSpace1stTo2ndEnd;
    8667  }
    8668  }
    8669 
    8670  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8671  {
    8672  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8673  while(lastOffset < size)
    8674  {
    8675  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8676  while(nextAlloc2ndIndex != SIZE_MAX &&
    8677  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8678  {
    8679  --nextAlloc2ndIndex;
    8680  }
    8681 
    8682  // Found non-null allocation.
    8683  if(nextAlloc2ndIndex != SIZE_MAX)
    8684  {
    8685  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8686 
    8687  // 1. Process free space before this allocation.
    8688  if(lastOffset < suballoc.offset)
    8689  {
    8690  // There is free space from lastOffset to suballoc.offset.
    8691  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8692  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8693  }
    8694 
    8695  // 2. Process this allocation.
    8696  // There is allocation with suballoc.offset, suballoc.size.
    8697  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8698 
    8699  // 3. Prepare for next iteration.
    8700  lastOffset = suballoc.offset + suballoc.size;
    8701  --nextAlloc2ndIndex;
    8702  }
    8703  // We are at the end.
    8704  else
    8705  {
    8706  if(lastOffset < size)
    8707  {
    8708  // There is free space from lastOffset to size.
    8709  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8710  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8711  }
    8712 
    8713  // End of loop.
    8714  lastOffset = size;
    8715  }
    8716  }
    8717  }
    8718 
    8719  PrintDetailedMap_End(json);
    8720 }
    8721 #endif // #if VMA_STATS_STRING_ENABLED
    8722 
    8723 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8724  uint32_t currentFrameIndex,
    8725  uint32_t frameInUseCount,
    8726  VkDeviceSize bufferImageGranularity,
    8727  VkDeviceSize allocSize,
    8728  VkDeviceSize allocAlignment,
    8729  bool upperAddress,
    8730  VmaSuballocationType allocType,
    8731  bool canMakeOtherLost,
    8732  uint32_t strategy,
    8733  VmaAllocationRequest* pAllocationRequest)
    8734 {
    8735  VMA_ASSERT(allocSize > 0);
    8736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8737  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8738  VMA_HEAVY_ASSERT(Validate());
    8739 
    8740  const VkDeviceSize size = GetSize();
    8741  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8742  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8743 
    8744  if(upperAddress)
    8745  {
    8746  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8747  {
    8748  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8749  return false;
    8750  }
    8751 
    8752  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8753  if(allocSize > size)
    8754  {
    8755  return false;
    8756  }
    8757  VkDeviceSize resultBaseOffset = size - allocSize;
    8758  if(!suballocations2nd.empty())
    8759  {
    8760  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8761  resultBaseOffset = lastSuballoc.offset - allocSize;
    8762  if(allocSize > lastSuballoc.offset)
    8763  {
    8764  return false;
    8765  }
    8766  }
    8767 
    8768  // Start from offset equal to end of free space.
    8769  VkDeviceSize resultOffset = resultBaseOffset;
    8770 
    8771  // Apply VMA_DEBUG_MARGIN at the end.
    8772  if(VMA_DEBUG_MARGIN > 0)
    8773  {
    8774  if(resultOffset < VMA_DEBUG_MARGIN)
    8775  {
    8776  return false;
    8777  }
    8778  resultOffset -= VMA_DEBUG_MARGIN;
    8779  }
    8780 
    8781  // Apply alignment.
    8782  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8783 
    8784  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8785  // Make bigger alignment if necessary.
    8786  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8787  {
    8788  bool bufferImageGranularityConflict = false;
    8789  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8790  {
    8791  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8792  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8793  {
    8794  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8795  {
    8796  bufferImageGranularityConflict = true;
    8797  break;
    8798  }
    8799  }
    8800  else
    8801  // Already on previous page.
    8802  break;
    8803  }
    8804  if(bufferImageGranularityConflict)
    8805  {
    8806  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8807  }
    8808  }
    8809 
    8810  // There is enough free space.
    8811  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8812  suballocations1st.back().offset + suballocations1st.back().size :
    8813  0;
    8814  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8815  {
    8816  // Check previous suballocations for BufferImageGranularity conflicts.
    8817  // If conflict exists, allocation cannot be made here.
    8818  if(bufferImageGranularity > 1)
    8819  {
    8820  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8821  {
    8822  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8823  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8824  {
    8825  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8826  {
    8827  return false;
    8828  }
    8829  }
    8830  else
    8831  {
    8832  // Already on next page.
    8833  break;
    8834  }
    8835  }
    8836  }
    8837 
    8838  // All tests passed: Success.
    8839  pAllocationRequest->offset = resultOffset;
    8840  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8841  pAllocationRequest->sumItemSize = 0;
    8842  // pAllocationRequest->item unused.
    8843  pAllocationRequest->itemsToMakeLostCount = 0;
    8844  return true;
    8845  }
    8846  }
    8847  else // !upperAddress
    8848  {
    8849  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8850  {
    8851  // Try to allocate at the end of 1st vector.
    8852 
    8853  VkDeviceSize resultBaseOffset = 0;
    8854  if(!suballocations1st.empty())
    8855  {
    8856  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8857  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8858  }
    8859 
    8860  // Start from offset equal to beginning of free space.
    8861  VkDeviceSize resultOffset = resultBaseOffset;
    8862 
    8863  // Apply VMA_DEBUG_MARGIN at the beginning.
    8864  if(VMA_DEBUG_MARGIN > 0)
    8865  {
    8866  resultOffset += VMA_DEBUG_MARGIN;
    8867  }
    8868 
    8869  // Apply alignment.
    8870  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8871 
    8872  // Check previous suballocations for BufferImageGranularity conflicts.
    8873  // Make bigger alignment if necessary.
    8874  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8875  {
    8876  bool bufferImageGranularityConflict = false;
    8877  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8878  {
    8879  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8880  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8881  {
    8882  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8883  {
    8884  bufferImageGranularityConflict = true;
    8885  break;
    8886  }
    8887  }
    8888  else
    8889  // Already on previous page.
    8890  break;
    8891  }
    8892  if(bufferImageGranularityConflict)
    8893  {
    8894  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8895  }
    8896  }
    8897 
    8898  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8899  suballocations2nd.back().offset : size;
    8900 
    8901  // There is enough free space at the end after alignment.
    8902  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8903  {
    8904  // Check next suballocations for BufferImageGranularity conflicts.
    8905  // If conflict exists, allocation cannot be made here.
    8906  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8907  {
    8908  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8909  {
    8910  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8911  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8912  {
    8913  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8914  {
    8915  return false;
    8916  }
    8917  }
    8918  else
    8919  {
    8920  // Already on previous page.
    8921  break;
    8922  }
    8923  }
    8924  }
    8925 
    8926  // All tests passed: Success.
    8927  pAllocationRequest->offset = resultOffset;
    8928  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8929  pAllocationRequest->sumItemSize = 0;
    8930  // pAllocationRequest->item unused.
    8931  pAllocationRequest->itemsToMakeLostCount = 0;
    8932  return true;
    8933  }
    8934  }
    8935 
    8936  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8937  // beginning of 1st vector as the end of free space.
    8938  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8939  {
    8940  VMA_ASSERT(!suballocations1st.empty());
    8941 
    8942  VkDeviceSize resultBaseOffset = 0;
    8943  if(!suballocations2nd.empty())
    8944  {
    8945  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8946  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8947  }
    8948 
    8949  // Start from offset equal to beginning of free space.
    8950  VkDeviceSize resultOffset = resultBaseOffset;
    8951 
    8952  // Apply VMA_DEBUG_MARGIN at the beginning.
    8953  if(VMA_DEBUG_MARGIN > 0)
    8954  {
    8955  resultOffset += VMA_DEBUG_MARGIN;
    8956  }
    8957 
    8958  // Apply alignment.
    8959  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8960 
    8961  // Check previous suballocations for BufferImageGranularity conflicts.
    8962  // Make bigger alignment if necessary.
    8963  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8964  {
    8965  bool bufferImageGranularityConflict = false;
    8966  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8967  {
    8968  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8969  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8970  {
    8971  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8972  {
    8973  bufferImageGranularityConflict = true;
    8974  break;
    8975  }
    8976  }
    8977  else
    8978  // Already on previous page.
    8979  break;
    8980  }
    8981  if(bufferImageGranularityConflict)
    8982  {
    8983  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8984  }
    8985  }
    8986 
    8987  pAllocationRequest->itemsToMakeLostCount = 0;
    8988  pAllocationRequest->sumItemSize = 0;
    8989  size_t index1st = m_1stNullItemsBeginCount;
    8990 
    8991  if(canMakeOtherLost)
    8992  {
    8993  while(index1st < suballocations1st.size() &&
    8994  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8995  {
    8996  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8997  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8998  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8999  {
    9000  // No problem.
    9001  }
    9002  else
    9003  {
    9004  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9005  if(suballoc.hAllocation->CanBecomeLost() &&
    9006  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9007  {
    9008  ++pAllocationRequest->itemsToMakeLostCount;
    9009  pAllocationRequest->sumItemSize += suballoc.size;
    9010  }
    9011  else
    9012  {
    9013  return false;
    9014  }
    9015  }
    9016  ++index1st;
    9017  }
    9018 
    9019  // Check next suballocations for BufferImageGranularity conflicts.
    9020  // If conflict exists, we must mark more allocations lost or fail.
    9021  if(bufferImageGranularity > 1)
    9022  {
    9023  while(index1st < suballocations1st.size())
    9024  {
    9025  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9026  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9027  {
    9028  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9029  {
    9030  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9031  if(suballoc.hAllocation->CanBecomeLost() &&
    9032  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9033  {
    9034  ++pAllocationRequest->itemsToMakeLostCount;
    9035  pAllocationRequest->sumItemSize += suballoc.size;
    9036  }
    9037  else
    9038  {
    9039  return false;
    9040  }
    9041  }
    9042  }
    9043  else
    9044  {
    9045  // Already on next page.
    9046  break;
    9047  }
    9048  ++index1st;
    9049  }
    9050  }
    9051  }
    9052 
    9053  // There is enough free space at the end after alignment.
    9054  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9055  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9056  {
    9057  // Check next suballocations for BufferImageGranularity conflicts.
    9058  // If conflict exists, allocation cannot be made here.
    9059  if(bufferImageGranularity > 1)
    9060  {
    9061  for(size_t nextSuballocIndex = index1st;
    9062  nextSuballocIndex < suballocations1st.size();
    9063  nextSuballocIndex++)
    9064  {
    9065  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9066  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9067  {
    9068  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9069  {
    9070  return false;
    9071  }
    9072  }
    9073  else
    9074  {
    9075  // Already on next page.
    9076  break;
    9077  }
    9078  }
    9079  }
    9080 
    9081  // All tests passed: Success.
    9082  pAllocationRequest->offset = resultOffset;
    9083  pAllocationRequest->sumFreeSize =
    9084  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9085  - resultBaseOffset
    9086  - pAllocationRequest->sumItemSize;
    9087  // pAllocationRequest->item unused.
    9088  return true;
    9089  }
    9090  }
    9091  }
    9092 
    9093  return false;
    9094 }
    9095 
    9096 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9097  uint32_t currentFrameIndex,
    9098  uint32_t frameInUseCount,
    9099  VmaAllocationRequest* pAllocationRequest)
    9100 {
    9101  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9102  {
    9103  return true;
    9104  }
    9105 
    9106  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9107 
    9108  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9109  size_t index1st = m_1stNullItemsBeginCount;
    9110  size_t madeLostCount = 0;
    9111  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9112  {
    9113  VMA_ASSERT(index1st < suballocations1st.size());
    9114  VmaSuballocation& suballoc = suballocations1st[index1st];
    9115  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9116  {
    9117  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9118  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9119  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9120  {
    9121  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9122  suballoc.hAllocation = VK_NULL_HANDLE;
    9123  m_SumFreeSize += suballoc.size;
    9124  ++m_1stNullItemsMiddleCount;
    9125  ++madeLostCount;
    9126  }
    9127  else
    9128  {
    9129  return false;
    9130  }
    9131  }
    9132  ++index1st;
    9133  }
    9134 
    9135  CleanupAfterFree();
    9136  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9137 
    9138  return true;
    9139 }
    9140 
    9141 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9142 {
    9143  uint32_t lostAllocationCount = 0;
    9144 
    9145  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9146  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9147  {
    9148  VmaSuballocation& suballoc = suballocations1st[i];
    9149  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9150  suballoc.hAllocation->CanBecomeLost() &&
    9151  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9152  {
    9153  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9154  suballoc.hAllocation = VK_NULL_HANDLE;
    9155  ++m_1stNullItemsMiddleCount;
    9156  m_SumFreeSize += suballoc.size;
    9157  ++lostAllocationCount;
    9158  }
    9159  }
    9160 
    9161  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9162  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9163  {
    9164  VmaSuballocation& suballoc = suballocations2nd[i];
    9165  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9166  suballoc.hAllocation->CanBecomeLost() &&
    9167  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9168  {
    9169  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9170  suballoc.hAllocation = VK_NULL_HANDLE;
    9171  ++m_2ndNullItemsCount;
    9172  ++lostAllocationCount;
    9173  }
    9174  }
    9175 
    9176  if(lostAllocationCount)
    9177  {
    9178  CleanupAfterFree();
    9179  }
    9180 
    9181  return lostAllocationCount;
    9182 }
    9183 
    9184 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9185 {
    9186  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9187  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9188  {
    9189  const VmaSuballocation& suballoc = suballocations1st[i];
    9190  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9191  {
    9192  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9193  {
    9194  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9195  return VK_ERROR_VALIDATION_FAILED_EXT;
    9196  }
    9197  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9198  {
    9199  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9200  return VK_ERROR_VALIDATION_FAILED_EXT;
    9201  }
    9202  }
    9203  }
    9204 
    9205  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9206  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9207  {
    9208  const VmaSuballocation& suballoc = suballocations2nd[i];
    9209  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9210  {
    9211  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9212  {
    9213  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9214  return VK_ERROR_VALIDATION_FAILED_EXT;
    9215  }
    9216  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9217  {
    9218  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9219  return VK_ERROR_VALIDATION_FAILED_EXT;
    9220  }
    9221  }
    9222  }
    9223 
    9224  return VK_SUCCESS;
    9225 }
    9226 
    9227 void VmaBlockMetadata_Linear::Alloc(
    9228  const VmaAllocationRequest& request,
    9229  VmaSuballocationType type,
    9230  VkDeviceSize allocSize,
    9231  bool upperAddress,
    9232  VmaAllocation hAllocation)
    9233 {
    9234  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9235 
    9236  if(upperAddress)
    9237  {
    9238  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9239  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9240  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9241  suballocations2nd.push_back(newSuballoc);
    9242  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9243  }
    9244  else
    9245  {
    9246  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9247 
    9248  // First allocation.
    9249  if(suballocations1st.empty())
    9250  {
    9251  suballocations1st.push_back(newSuballoc);
    9252  }
    9253  else
    9254  {
    9255  // New allocation at the end of 1st vector.
    9256  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9257  {
    9258  // Check if it fits before the end of the block.
    9259  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9260  suballocations1st.push_back(newSuballoc);
    9261  }
    9262  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9263  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9264  {
    9265  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9266 
    9267  switch(m_2ndVectorMode)
    9268  {
    9269  case SECOND_VECTOR_EMPTY:
    9270  // First allocation from second part ring buffer.
    9271  VMA_ASSERT(suballocations2nd.empty());
    9272  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9273  break;
    9274  case SECOND_VECTOR_RING_BUFFER:
    9275  // 2-part ring buffer is already started.
    9276  VMA_ASSERT(!suballocations2nd.empty());
    9277  break;
    9278  case SECOND_VECTOR_DOUBLE_STACK:
    9279  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9280  break;
    9281  default:
    9282  VMA_ASSERT(0);
    9283  }
    9284 
    9285  suballocations2nd.push_back(newSuballoc);
    9286  }
    9287  else
    9288  {
    9289  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9290  }
    9291  }
    9292  }
    9293 
    9294  m_SumFreeSize -= newSuballoc.size;
    9295 }
    9296 
    9297 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9298 {
    9299  FreeAtOffset(allocation->GetOffset());
    9300 }
    9301 
    9302 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9303 {
    9304  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9305  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9306 
    9307  if(!suballocations1st.empty())
    9308  {
    9309  // First allocation: Mark it as next empty at the beginning.
    9310  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9311  if(firstSuballoc.offset == offset)
    9312  {
    9313  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9314  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9315  m_SumFreeSize += firstSuballoc.size;
    9316  ++m_1stNullItemsBeginCount;
    9317  CleanupAfterFree();
    9318  return;
    9319  }
    9320  }
    9321 
    9322  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9323  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9324  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9325  {
    9326  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9327  if(lastSuballoc.offset == offset)
    9328  {
    9329  m_SumFreeSize += lastSuballoc.size;
    9330  suballocations2nd.pop_back();
    9331  CleanupAfterFree();
    9332  return;
    9333  }
    9334  }
    9335  // Last allocation in 1st vector.
    9336  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9337  {
    9338  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9339  if(lastSuballoc.offset == offset)
    9340  {
    9341  m_SumFreeSize += lastSuballoc.size;
    9342  suballocations1st.pop_back();
    9343  CleanupAfterFree();
    9344  return;
    9345  }
    9346  }
    9347 
    9348  // Item from the middle of 1st vector.
    9349  {
    9350  VmaSuballocation refSuballoc;
    9351  refSuballoc.offset = offset;
    9352  // Rest of members stays uninitialized intentionally for better performance.
    9353  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9354  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9355  suballocations1st.end(),
    9356  refSuballoc);
    9357  if(it != suballocations1st.end())
    9358  {
    9359  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9360  it->hAllocation = VK_NULL_HANDLE;
    9361  ++m_1stNullItemsMiddleCount;
    9362  m_SumFreeSize += it->size;
    9363  CleanupAfterFree();
    9364  return;
    9365  }
    9366  }
    9367 
    9368  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9369  {
    9370  // Item from the middle of 2nd vector.
    9371  VmaSuballocation refSuballoc;
    9372  refSuballoc.offset = offset;
    9373  // Rest of members stays uninitialized intentionally for better performance.
    9374  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9375  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9376  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9377  if(it != suballocations2nd.end())
    9378  {
    9379  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9380  it->hAllocation = VK_NULL_HANDLE;
    9381  ++m_2ndNullItemsCount;
    9382  m_SumFreeSize += it->size;
    9383  CleanupAfterFree();
    9384  return;
    9385  }
    9386  }
    9387 
    9388  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9389 }
    9390 
    9391 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9392 {
    9393  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9394  const size_t suballocCount = AccessSuballocations1st().size();
    9395  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9396 }
    9397 
    9398 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9399 {
    9400  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9401  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9402 
    9403  if(IsEmpty())
    9404  {
    9405  suballocations1st.clear();
    9406  suballocations2nd.clear();
    9407  m_1stNullItemsBeginCount = 0;
    9408  m_1stNullItemsMiddleCount = 0;
    9409  m_2ndNullItemsCount = 0;
    9410  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9411  }
    9412  else
    9413  {
    9414  const size_t suballoc1stCount = suballocations1st.size();
    9415  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9416  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9417 
    9418  // Find more null items at the beginning of 1st vector.
    9419  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9420  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9421  {
    9422  ++m_1stNullItemsBeginCount;
    9423  --m_1stNullItemsMiddleCount;
    9424  }
    9425 
    9426  // Find more null items at the end of 1st vector.
    9427  while(m_1stNullItemsMiddleCount > 0 &&
    9428  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9429  {
    9430  --m_1stNullItemsMiddleCount;
    9431  suballocations1st.pop_back();
    9432  }
    9433 
    9434  // Find more null items at the end of 2nd vector.
    9435  while(m_2ndNullItemsCount > 0 &&
    9436  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9437  {
    9438  --m_2ndNullItemsCount;
    9439  suballocations2nd.pop_back();
    9440  }
    9441 
    9442  if(ShouldCompact1st())
    9443  {
    9444  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9445  size_t srcIndex = m_1stNullItemsBeginCount;
    9446  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9447  {
    9448  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9449  {
    9450  ++srcIndex;
    9451  }
    9452  if(dstIndex != srcIndex)
    9453  {
    9454  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9455  }
    9456  ++srcIndex;
    9457  }
    9458  suballocations1st.resize(nonNullItemCount);
    9459  m_1stNullItemsBeginCount = 0;
    9460  m_1stNullItemsMiddleCount = 0;
    9461  }
    9462 
    9463  // 2nd vector became empty.
    9464  if(suballocations2nd.empty())
    9465  {
    9466  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9467  }
    9468 
    9469  // 1st vector became empty.
    9470  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9471  {
    9472  suballocations1st.clear();
    9473  m_1stNullItemsBeginCount = 0;
    9474 
    9475  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9476  {
    9477  // Swap 1st with 2nd. Now 2nd is empty.
    9478  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9479  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9480  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9481  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9482  {
    9483  ++m_1stNullItemsBeginCount;
    9484  --m_1stNullItemsMiddleCount;
    9485  }
    9486  m_2ndNullItemsCount = 0;
    9487  m_1stVectorIndex ^= 1;
    9488  }
    9489  }
    9490  }
    9491 
    9492  VMA_HEAVY_ASSERT(Validate());
    9493 }
    9494 
    9495 
    9497 // class VmaBlockMetadata_Buddy
    9498 
    9499 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9500  VmaBlockMetadata(hAllocator),
    9501  m_Root(VMA_NULL),
    9502  m_AllocationCount(0),
    9503  m_FreeCount(1),
    9504  m_SumFreeSize(0)
    9505 {
    9506  memset(m_FreeList, 0, sizeof(m_FreeList));
    9507 }
    9508 
    9509 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9510 {
    9511  DeleteNode(m_Root);
    9512 }
    9513 
    9514 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9515 {
    9516  VmaBlockMetadata::Init(size);
    9517 
    9518  m_UsableSize = VmaPrevPow2(size);
    9519  m_SumFreeSize = m_UsableSize;
    9520 
    9521  // Calculate m_LevelCount.
    9522  m_LevelCount = 1;
    9523  while(m_LevelCount < MAX_LEVELS &&
    9524  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9525  {
    9526  ++m_LevelCount;
    9527  }
    9528 
    9529  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9530  rootNode->offset = 0;
    9531  rootNode->type = Node::TYPE_FREE;
    9532  rootNode->parent = VMA_NULL;
    9533  rootNode->buddy = VMA_NULL;
    9534 
    9535  m_Root = rootNode;
    9536  AddToFreeListFront(0, rootNode);
    9537 }
    9538 
    9539 bool VmaBlockMetadata_Buddy::Validate() const
    9540 {
    9541  // Validate tree.
    9542  ValidationContext ctx;
    9543  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9544  {
    9545  VMA_VALIDATE(false && "ValidateNode failed.");
    9546  }
    9547  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9548  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9549 
    9550  // Validate free node lists.
    9551  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9552  {
    9553  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9554  m_FreeList[level].front->free.prev == VMA_NULL);
    9555 
    9556  for(Node* node = m_FreeList[level].front;
    9557  node != VMA_NULL;
    9558  node = node->free.next)
    9559  {
    9560  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9561 
    9562  if(node->free.next == VMA_NULL)
    9563  {
    9564  VMA_VALIDATE(m_FreeList[level].back == node);
    9565  }
    9566  else
    9567  {
    9568  VMA_VALIDATE(node->free.next->free.prev == node);
    9569  }
    9570  }
    9571  }
    9572 
    9573  // Validate that free lists ar higher levels are empty.
    9574  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9575  {
    9576  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9577  }
    9578 
    9579  return true;
    9580 }
    9581 
    9582 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9583 {
    9584  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9585  {
    9586  if(m_FreeList[level].front != VMA_NULL)
    9587  {
    9588  return LevelToNodeSize(level);
    9589  }
    9590  }
    9591  return 0;
    9592 }
    9593 
    9594 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9595 {
    9596  const VkDeviceSize unusableSize = GetUnusableSize();
    9597 
    9598  outInfo.blockCount = 1;
    9599 
    9600  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9601  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9602 
    9603  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9604  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9605  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9606 
    9607  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9608 
    9609  if(unusableSize > 0)
    9610  {
    9611  ++outInfo.unusedRangeCount;
    9612  outInfo.unusedBytes += unusableSize;
    9613  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9614  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9615  }
    9616 }
    9617 
    9618 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9619 {
    9620  const VkDeviceSize unusableSize = GetUnusableSize();
    9621 
    9622  inoutStats.size += GetSize();
    9623  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9624  inoutStats.allocationCount += m_AllocationCount;
    9625  inoutStats.unusedRangeCount += m_FreeCount;
    9626  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9627 
    9628  if(unusableSize > 0)
    9629  {
    9630  ++inoutStats.unusedRangeCount;
    9631  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9632  }
    9633 }
    9634 
    9635 #if VMA_STATS_STRING_ENABLED
    9636 
    9637 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9638 {
    9639  // TODO optimize
    9640  VmaStatInfo stat;
    9641  CalcAllocationStatInfo(stat);
    9642 
    9643  PrintDetailedMap_Begin(
    9644  json,
    9645  stat.unusedBytes,
    9646  stat.allocationCount,
    9647  stat.unusedRangeCount);
    9648 
    9649  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9650 
    9651  const VkDeviceSize unusableSize = GetUnusableSize();
    9652  if(unusableSize > 0)
    9653  {
    9654  PrintDetailedMap_UnusedRange(json,
    9655  m_UsableSize, // offset
    9656  unusableSize); // size
    9657  }
    9658 
    9659  PrintDetailedMap_End(json);
    9660 }
    9661 
    9662 #endif // #if VMA_STATS_STRING_ENABLED
    9663 
    9664 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9665  uint32_t currentFrameIndex,
    9666  uint32_t frameInUseCount,
    9667  VkDeviceSize bufferImageGranularity,
    9668  VkDeviceSize allocSize,
    9669  VkDeviceSize allocAlignment,
    9670  bool upperAddress,
    9671  VmaSuballocationType allocType,
    9672  bool canMakeOtherLost,
    9673  uint32_t strategy,
    9674  VmaAllocationRequest* pAllocationRequest)
    9675 {
    9676  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9677 
    9678  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9679  // Whenever it might be an OPTIMAL image...
    9680  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9682  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9683  {
    9684  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9685  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9686  }
    9687 
    9688  if(allocSize > m_UsableSize)
    9689  {
    9690  return false;
    9691  }
    9692 
    9693  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9694  for(uint32_t level = targetLevel + 1; level--; )
    9695  {
    9696  for(Node* freeNode = m_FreeList[level].front;
    9697  freeNode != VMA_NULL;
    9698  freeNode = freeNode->free.next)
    9699  {
    9700  if(freeNode->offset % allocAlignment == 0)
    9701  {
    9702  pAllocationRequest->offset = freeNode->offset;
    9703  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9704  pAllocationRequest->sumItemSize = 0;
    9705  pAllocationRequest->itemsToMakeLostCount = 0;
    9706  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9707  return true;
    9708  }
    9709  }
    9710  }
    9711 
    9712  return false;
    9713 }
    9714 
    9715 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9716  uint32_t currentFrameIndex,
    9717  uint32_t frameInUseCount,
    9718  VmaAllocationRequest* pAllocationRequest)
    9719 {
    9720  /*
    9721  Lost allocations are not supported in buddy allocator at the moment.
    9722  Support might be added in the future.
    9723  */
    9724  return pAllocationRequest->itemsToMakeLostCount == 0;
    9725 }
    9726 
    9727 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9728 {
    9729  /*
    9730  Lost allocations are not supported in buddy allocator at the moment.
    9731  Support might be added in the future.
    9732  */
    9733  return 0;
    9734 }
    9735 
    9736 void VmaBlockMetadata_Buddy::Alloc(
    9737  const VmaAllocationRequest& request,
    9738  VmaSuballocationType type,
    9739  VkDeviceSize allocSize,
    9740  bool upperAddress,
    9741  VmaAllocation hAllocation)
    9742 {
    9743  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9744  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9745 
    9746  Node* currNode = m_FreeList[currLevel].front;
    9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9748  while(currNode->offset != request.offset)
    9749  {
    9750  currNode = currNode->free.next;
    9751  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9752  }
    9753 
    9754  // Go down, splitting free nodes.
    9755  while(currLevel < targetLevel)
    9756  {
    9757  // currNode is already first free node at currLevel.
    9758  // Remove it from list of free nodes at this currLevel.
    9759  RemoveFromFreeList(currLevel, currNode);
    9760 
    9761  const uint32_t childrenLevel = currLevel + 1;
    9762 
    9763  // Create two free sub-nodes.
    9764  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9765  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9766 
    9767  leftChild->offset = currNode->offset;
    9768  leftChild->type = Node::TYPE_FREE;
    9769  leftChild->parent = currNode;
    9770  leftChild->buddy = rightChild;
    9771 
    9772  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9773  rightChild->type = Node::TYPE_FREE;
    9774  rightChild->parent = currNode;
    9775  rightChild->buddy = leftChild;
    9776 
    9777  // Convert current currNode to split type.
    9778  currNode->type = Node::TYPE_SPLIT;
    9779  currNode->split.leftChild = leftChild;
    9780 
    9781  // Add child nodes to free list. Order is important!
    9782  AddToFreeListFront(childrenLevel, rightChild);
    9783  AddToFreeListFront(childrenLevel, leftChild);
    9784 
    9785  ++m_FreeCount;
    9786  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9787  ++currLevel;
    9788  currNode = m_FreeList[currLevel].front;
    9789 
    9790  /*
    9791  We can be sure that currNode, as left child of node previously split,
    9792  also fullfills the alignment requirement.
    9793  */
    9794  }
    9795 
    9796  // Remove from free list.
    9797  VMA_ASSERT(currLevel == targetLevel &&
    9798  currNode != VMA_NULL &&
    9799  currNode->type == Node::TYPE_FREE);
    9800  RemoveFromFreeList(currLevel, currNode);
    9801 
    9802  // Convert to allocation node.
    9803  currNode->type = Node::TYPE_ALLOCATION;
    9804  currNode->allocation.alloc = hAllocation;
    9805 
    9806  ++m_AllocationCount;
    9807  --m_FreeCount;
    9808  m_SumFreeSize -= allocSize;
    9809 }
    9810 
    9811 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9812 {
    9813  if(node->type == Node::TYPE_SPLIT)
    9814  {
    9815  DeleteNode(node->split.leftChild->buddy);
    9816  DeleteNode(node->split.leftChild);
    9817  }
    9818 
    9819  vma_delete(GetAllocationCallbacks(), node);
    9820 }
    9821 
    9822 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9823 {
    9824  VMA_VALIDATE(level < m_LevelCount);
    9825  VMA_VALIDATE(curr->parent == parent);
    9826  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9827  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9828  switch(curr->type)
    9829  {
    9830  case Node::TYPE_FREE:
    9831  // curr->free.prev, next are validated separately.
    9832  ctx.calculatedSumFreeSize += levelNodeSize;
    9833  ++ctx.calculatedFreeCount;
    9834  break;
    9835  case Node::TYPE_ALLOCATION:
    9836  ++ctx.calculatedAllocationCount;
    9837  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9838  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9839  break;
    9840  case Node::TYPE_SPLIT:
    9841  {
    9842  const uint32_t childrenLevel = level + 1;
    9843  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9844  const Node* const leftChild = curr->split.leftChild;
    9845  VMA_VALIDATE(leftChild != VMA_NULL);
    9846  VMA_VALIDATE(leftChild->offset == curr->offset);
    9847  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9848  {
    9849  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9850  }
    9851  const Node* const rightChild = leftChild->buddy;
    9852  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9853  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9854  {
    9855  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9856  }
    9857  }
    9858  break;
    9859  default:
    9860  return false;
    9861  }
    9862 
    9863  return true;
    9864 }
    9865 
    9866 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9867 {
    9868  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9869  uint32_t level = 0;
    9870  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9871  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9872  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9873  {
    9874  ++level;
    9875  currLevelNodeSize = nextLevelNodeSize;
    9876  nextLevelNodeSize = currLevelNodeSize >> 1;
    9877  }
    9878  return level;
    9879 }
    9880 
    9881 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9882 {
    9883  // Find node and level.
    9884  Node* node = m_Root;
    9885  VkDeviceSize nodeOffset = 0;
    9886  uint32_t level = 0;
    9887  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9888  while(node->type == Node::TYPE_SPLIT)
    9889  {
    9890  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9891  if(offset < nodeOffset + nextLevelSize)
    9892  {
    9893  node = node->split.leftChild;
    9894  }
    9895  else
    9896  {
    9897  node = node->split.leftChild->buddy;
    9898  nodeOffset += nextLevelSize;
    9899  }
    9900  ++level;
    9901  levelNodeSize = nextLevelSize;
    9902  }
    9903 
    9904  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9905  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9906 
    9907  ++m_FreeCount;
    9908  --m_AllocationCount;
    9909  m_SumFreeSize += alloc->GetSize();
    9910 
    9911  node->type = Node::TYPE_FREE;
    9912 
    9913  // Join free nodes if possible.
    9914  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9915  {
    9916  RemoveFromFreeList(level, node->buddy);
    9917  Node* const parent = node->parent;
    9918 
    9919  vma_delete(GetAllocationCallbacks(), node->buddy);
    9920  vma_delete(GetAllocationCallbacks(), node);
    9921  parent->type = Node::TYPE_FREE;
    9922 
    9923  node = parent;
    9924  --level;
    9925  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9926  --m_FreeCount;
    9927  }
    9928 
    9929  AddToFreeListFront(level, node);
    9930 }
    9931 
    9932 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9933 {
    9934  switch(node->type)
    9935  {
    9936  case Node::TYPE_FREE:
    9937  ++outInfo.unusedRangeCount;
    9938  outInfo.unusedBytes += levelNodeSize;
    9939  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9940  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9941  break;
    9942  case Node::TYPE_ALLOCATION:
    9943  {
    9944  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9945  ++outInfo.allocationCount;
    9946  outInfo.usedBytes += allocSize;
    9947  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9948  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9949 
    9950  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9951  if(unusedRangeSize > 0)
    9952  {
    9953  ++outInfo.unusedRangeCount;
    9954  outInfo.unusedBytes += unusedRangeSize;
    9955  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9956  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9957  }
    9958  }
    9959  break;
    9960  case Node::TYPE_SPLIT:
    9961  {
    9962  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9963  const Node* const leftChild = node->split.leftChild;
    9964  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9965  const Node* const rightChild = leftChild->buddy;
    9966  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9967  }
    9968  break;
    9969  default:
    9970  VMA_ASSERT(0);
    9971  }
    9972 }
    9973 
    9974 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9975 {
    9976  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9977 
    9978  // List is empty.
    9979  Node* const frontNode = m_FreeList[level].front;
    9980  if(frontNode == VMA_NULL)
    9981  {
    9982  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9983  node->free.prev = node->free.next = VMA_NULL;
    9984  m_FreeList[level].front = m_FreeList[level].back = node;
    9985  }
    9986  else
    9987  {
    9988  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9989  node->free.prev = VMA_NULL;
    9990  node->free.next = frontNode;
    9991  frontNode->free.prev = node;
    9992  m_FreeList[level].front = node;
    9993  }
    9994 }
    9995 
    9996 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9997 {
    9998  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9999 
    10000  // It is at the front.
    10001  if(node->free.prev == VMA_NULL)
    10002  {
    10003  VMA_ASSERT(m_FreeList[level].front == node);
    10004  m_FreeList[level].front = node->free.next;
    10005  }
    10006  else
    10007  {
    10008  Node* const prevFreeNode = node->free.prev;
    10009  VMA_ASSERT(prevFreeNode->free.next == node);
    10010  prevFreeNode->free.next = node->free.next;
    10011  }
    10012 
    10013  // It is at the back.
    10014  if(node->free.next == VMA_NULL)
    10015  {
    10016  VMA_ASSERT(m_FreeList[level].back == node);
    10017  m_FreeList[level].back = node->free.prev;
    10018  }
    10019  else
    10020  {
    10021  Node* const nextFreeNode = node->free.next;
    10022  VMA_ASSERT(nextFreeNode->free.prev == node);
    10023  nextFreeNode->free.prev = node->free.prev;
    10024  }
    10025 }
    10026 
    10027 #if VMA_STATS_STRING_ENABLED
    10028 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10029 {
    10030  switch(node->type)
    10031  {
    10032  case Node::TYPE_FREE:
    10033  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10034  break;
    10035  case Node::TYPE_ALLOCATION:
    10036  {
    10037  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10038  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10039  if(allocSize < levelNodeSize)
    10040  {
    10041  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10042  }
    10043  }
    10044  break;
    10045  case Node::TYPE_SPLIT:
    10046  {
    10047  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10048  const Node* const leftChild = node->split.leftChild;
    10049  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10050  const Node* const rightChild = leftChild->buddy;
    10051  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10052  }
    10053  break;
    10054  default:
    10055  VMA_ASSERT(0);
    10056  }
    10057 }
    10058 #endif // #if VMA_STATS_STRING_ENABLED
    10059 
    10060 
    10062 // class VmaDeviceMemoryBlock
    10063 
    10064 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10065  m_pMetadata(VMA_NULL),
    10066  m_MemoryTypeIndex(UINT32_MAX),
    10067  m_Id(0),
    10068  m_hMemory(VK_NULL_HANDLE),
    10069  m_MapCount(0),
    10070  m_pMappedData(VMA_NULL)
    10071 {
    10072 }
    10073 
    10074 void VmaDeviceMemoryBlock::Init(
    10075  VmaAllocator hAllocator,
    10076  uint32_t newMemoryTypeIndex,
    10077  VkDeviceMemory newMemory,
    10078  VkDeviceSize newSize,
    10079  uint32_t id,
    10080  uint32_t algorithm)
    10081 {
    10082  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10083 
    10084  m_MemoryTypeIndex = newMemoryTypeIndex;
    10085  m_Id = id;
    10086  m_hMemory = newMemory;
    10087 
    10088  switch(algorithm)
    10089  {
    10091  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10092  break;
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10095  break;
    10096  default:
    10097  VMA_ASSERT(0);
    10098  // Fall-through.
    10099  case 0:
    10100  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10101  }
    10102  m_pMetadata->Init(newSize);
    10103 }
    10104 
    10105 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10106 {
    10107  // This is the most important assert in the entire library.
    10108  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10109  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10110 
    10111  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10112  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10113  m_hMemory = VK_NULL_HANDLE;
    10114 
    10115  vma_delete(allocator, m_pMetadata);
    10116  m_pMetadata = VMA_NULL;
    10117 }
    10118 
    10119 bool VmaDeviceMemoryBlock::Validate() const
    10120 {
    10121  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10122  (m_pMetadata->GetSize() != 0));
    10123 
    10124  return m_pMetadata->Validate();
    10125 }
    10126 
    10127 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10128 {
    10129  void* pData = nullptr;
    10130  VkResult res = Map(hAllocator, 1, &pData);
    10131  if(res != VK_SUCCESS)
    10132  {
    10133  return res;
    10134  }
    10135 
    10136  res = m_pMetadata->CheckCorruption(pData);
    10137 
    10138  Unmap(hAllocator, 1);
    10139 
    10140  return res;
    10141 }
    10142 
    10143 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10144 {
    10145  if(count == 0)
    10146  {
    10147  return VK_SUCCESS;
    10148  }
    10149 
    10150  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10151  if(m_MapCount != 0)
    10152  {
    10153  m_MapCount += count;
    10154  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10155  if(ppData != VMA_NULL)
    10156  {
    10157  *ppData = m_pMappedData;
    10158  }
    10159  return VK_SUCCESS;
    10160  }
    10161  else
    10162  {
    10163  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10164  hAllocator->m_hDevice,
    10165  m_hMemory,
    10166  0, // offset
    10167  VK_WHOLE_SIZE,
    10168  0, // flags
    10169  &m_pMappedData);
    10170  if(result == VK_SUCCESS)
    10171  {
    10172  if(ppData != VMA_NULL)
    10173  {
    10174  *ppData = m_pMappedData;
    10175  }
    10176  m_MapCount = count;
    10177  }
    10178  return result;
    10179  }
    10180 }
    10181 
    10182 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10183 {
    10184  if(count == 0)
    10185  {
    10186  return;
    10187  }
    10188 
    10189  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10190  if(m_MapCount >= count)
    10191  {
    10192  m_MapCount -= count;
    10193  if(m_MapCount == 0)
    10194  {
    10195  m_pMappedData = VMA_NULL;
    10196  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10197  }
    10198  }
    10199  else
    10200  {
    10201  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10202  }
    10203 }
    10204 
    10205 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10206 {
    10207  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10208  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10209 
    10210  void* pData;
    10211  VkResult res = Map(hAllocator, 1, &pData);
    10212  if(res != VK_SUCCESS)
    10213  {
    10214  return res;
    10215  }
    10216 
    10217  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10218  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10219 
    10220  Unmap(hAllocator, 1);
    10221 
    10222  return VK_SUCCESS;
    10223 }
    10224 
    10225 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10226 {
    10227  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10228  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10229 
    10230  void* pData;
    10231  VkResult res = Map(hAllocator, 1, &pData);
    10232  if(res != VK_SUCCESS)
    10233  {
    10234  return res;
    10235  }
    10236 
    10237  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10240  }
    10241  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10242  {
    10243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10244  }
    10245 
    10246  Unmap(hAllocator, 1);
    10247 
    10248  return VK_SUCCESS;
    10249 }
    10250 
    10251 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10252  const VmaAllocator hAllocator,
    10253  const VmaAllocation hAllocation,
    10254  VkBuffer hBuffer)
    10255 {
    10256  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10257  hAllocation->GetBlock() == this);
    10258  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10259  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10260  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10261  hAllocator->m_hDevice,
    10262  hBuffer,
    10263  m_hMemory,
    10264  hAllocation->GetOffset());
    10265 }
    10266 
    10267 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10268  const VmaAllocator hAllocator,
    10269  const VmaAllocation hAllocation,
    10270  VkImage hImage)
    10271 {
    10272  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10273  hAllocation->GetBlock() == this);
    10274  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10275  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10276  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10277  hAllocator->m_hDevice,
    10278  hImage,
    10279  m_hMemory,
    10280  hAllocation->GetOffset());
    10281 }
    10282 
    10283 static void InitStatInfo(VmaStatInfo& outInfo)
    10284 {
    10285  memset(&outInfo, 0, sizeof(outInfo));
    10286  outInfo.allocationSizeMin = UINT64_MAX;
    10287  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10288 }
    10289 
    10290 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10291 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10292 {
    10293  inoutInfo.blockCount += srcInfo.blockCount;
    10294  inoutInfo.allocationCount += srcInfo.allocationCount;
    10295  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10296  inoutInfo.usedBytes += srcInfo.usedBytes;
    10297  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10298  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10299  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10300  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10301  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10302 }
    10303 
    10304 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10305 {
    10306  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10307  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10308  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10309  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10310 }
    10311 
    10312 VmaPool_T::VmaPool_T(
    10313  VmaAllocator hAllocator,
    10314  const VmaPoolCreateInfo& createInfo,
    10315  VkDeviceSize preferredBlockSize) :
    10316  m_BlockVector(
    10317  hAllocator,
    10318  createInfo.memoryTypeIndex,
    10319  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10320  createInfo.minBlockCount,
    10321  createInfo.maxBlockCount,
    10322  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10323  createInfo.frameInUseCount,
    10324  true, // isCustomPool
    10325  createInfo.blockSize != 0, // explicitBlockSize
    10326  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10327  m_Id(0)
    10328 {
    10329 }
    10330 
    10331 VmaPool_T::~VmaPool_T()
    10332 {
    10333 }
    10334 
    10335 #if VMA_STATS_STRING_ENABLED
    10336 
    10337 #endif // #if VMA_STATS_STRING_ENABLED
    10338 
    10339 VmaBlockVector::VmaBlockVector(
    10340  VmaAllocator hAllocator,
    10341  uint32_t memoryTypeIndex,
    10342  VkDeviceSize preferredBlockSize,
    10343  size_t minBlockCount,
    10344  size_t maxBlockCount,
    10345  VkDeviceSize bufferImageGranularity,
    10346  uint32_t frameInUseCount,
    10347  bool isCustomPool,
    10348  bool explicitBlockSize,
    10349  uint32_t algorithm) :
    10350  m_hAllocator(hAllocator),
    10351  m_MemoryTypeIndex(memoryTypeIndex),
    10352  m_PreferredBlockSize(preferredBlockSize),
    10353  m_MinBlockCount(minBlockCount),
    10354  m_MaxBlockCount(maxBlockCount),
    10355  m_BufferImageGranularity(bufferImageGranularity),
    10356  m_FrameInUseCount(frameInUseCount),
    10357  m_IsCustomPool(isCustomPool),
    10358  m_ExplicitBlockSize(explicitBlockSize),
    10359  m_Algorithm(algorithm),
    10360  m_HasEmptyBlock(false),
    10361  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10362  m_pDefragmentator(VMA_NULL),
    10363  m_NextBlockId(0)
    10364 {
    10365 }
    10366 
    10367 VmaBlockVector::~VmaBlockVector()
    10368 {
    10369  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10370 
    10371  for(size_t i = m_Blocks.size(); i--; )
    10372  {
    10373  m_Blocks[i]->Destroy(m_hAllocator);
    10374  vma_delete(m_hAllocator, m_Blocks[i]);
    10375  }
    10376 }
    10377 
    10378 VkResult VmaBlockVector::CreateMinBlocks()
    10379 {
    10380  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10381  {
    10382  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10383  if(res != VK_SUCCESS)
    10384  {
    10385  return res;
    10386  }
    10387  }
    10388  return VK_SUCCESS;
    10389 }
    10390 
    10391 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10392 {
    10393  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10394 
    10395  const size_t blockCount = m_Blocks.size();
    10396 
    10397  pStats->size = 0;
    10398  pStats->unusedSize = 0;
    10399  pStats->allocationCount = 0;
    10400  pStats->unusedRangeCount = 0;
    10401  pStats->unusedRangeSizeMax = 0;
    10402  pStats->blockCount = blockCount;
    10403 
    10404  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10405  {
    10406  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10407  VMA_ASSERT(pBlock);
    10408  VMA_HEAVY_ASSERT(pBlock->Validate());
    10409  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10410  }
    10411 }
    10412 
    10413 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10414 {
    10415  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10416  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10417  (VMA_DEBUG_MARGIN > 0) &&
    10418  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10419 }
    10420 
    10421 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10422 
    10423 VkResult VmaBlockVector::Allocate(
    10424  VmaPool hCurrentPool,
    10425  uint32_t currentFrameIndex,
    10426  VkDeviceSize size,
    10427  VkDeviceSize alignment,
    10428  const VmaAllocationCreateInfo& createInfo,
    10429  VmaSuballocationType suballocType,
    10430  VmaAllocation* pAllocation)
    10431 {
    10432  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10433  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10434  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10435  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10436  const bool canCreateNewBlock =
    10437  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10438  (m_Blocks.size() < m_MaxBlockCount);
    10439  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10440 
    10441  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10442  // Which in turn is available only when maxBlockCount = 1.
    10443  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10444  {
    10445  canMakeOtherLost = false;
    10446  }
    10447 
    10448  // Upper address can only be used with linear allocator and within single memory block.
    10449  if(isUpperAddress &&
    10450  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10451  {
    10452  return VK_ERROR_FEATURE_NOT_PRESENT;
    10453  }
    10454 
    10455  // Validate strategy.
    10456  switch(strategy)
    10457  {
    10458  case 0:
    10460  break;
    10464  break;
    10465  default:
    10466  return VK_ERROR_FEATURE_NOT_PRESENT;
    10467  }
    10468 
    10469  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10470  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10471  {
    10472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10473  }
    10474 
    10475  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10476 
    10477  /*
    10478  Under certain condition, this whole section can be skipped for optimization, so
    10479  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10480  e.g. for custom pools with linear algorithm.
    10481  */
    10482  if(!canMakeOtherLost || canCreateNewBlock)
    10483  {
    10484  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10485  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10487 
    10488  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10489  {
    10490  // Use only last block.
    10491  if(!m_Blocks.empty())
    10492  {
    10493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10494  VMA_ASSERT(pCurrBlock);
    10495  VkResult res = AllocateFromBlock(
    10496  pCurrBlock,
    10497  hCurrentPool,
    10498  currentFrameIndex,
    10499  size,
    10500  alignment,
    10501  allocFlagsCopy,
    10502  createInfo.pUserData,
    10503  suballocType,
    10504  strategy,
    10505  pAllocation);
    10506  if(res == VK_SUCCESS)
    10507  {
    10508  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10509  return VK_SUCCESS;
    10510  }
    10511  }
    10512  }
    10513  else
    10514  {
    10516  {
    10517  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10518  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10519  {
    10520  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10521  VMA_ASSERT(pCurrBlock);
    10522  VkResult res = AllocateFromBlock(
    10523  pCurrBlock,
    10524  hCurrentPool,
    10525  currentFrameIndex,
    10526  size,
    10527  alignment,
    10528  allocFlagsCopy,
    10529  createInfo.pUserData,
    10530  suballocType,
    10531  strategy,
    10532  pAllocation);
    10533  if(res == VK_SUCCESS)
    10534  {
    10535  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10536  return VK_SUCCESS;
    10537  }
    10538  }
    10539  }
    10540  else // WORST_FIT, FIRST_FIT
    10541  {
    10542  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10543  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10544  {
    10545  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10546  VMA_ASSERT(pCurrBlock);
    10547  VkResult res = AllocateFromBlock(
    10548  pCurrBlock,
    10549  hCurrentPool,
    10550  currentFrameIndex,
    10551  size,
    10552  alignment,
    10553  allocFlagsCopy,
    10554  createInfo.pUserData,
    10555  suballocType,
    10556  strategy,
    10557  pAllocation);
    10558  if(res == VK_SUCCESS)
    10559  {
    10560  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10561  return VK_SUCCESS;
    10562  }
    10563  }
    10564  }
    10565  }
    10566 
    10567  // 2. Try to create new block.
    10568  if(canCreateNewBlock)
    10569  {
    10570  // Calculate optimal size for new block.
    10571  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10572  uint32_t newBlockSizeShift = 0;
    10573  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10574 
    10575  if(!m_ExplicitBlockSize)
    10576  {
    10577  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10578  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10579  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10580  {
    10581  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10582  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10583  {
    10584  newBlockSize = smallerNewBlockSize;
    10585  ++newBlockSizeShift;
    10586  }
    10587  else
    10588  {
    10589  break;
    10590  }
    10591  }
    10592  }
    10593 
    10594  size_t newBlockIndex = 0;
    10595  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10596  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10597  if(!m_ExplicitBlockSize)
    10598  {
    10599  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10600  {
    10601  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10602  if(smallerNewBlockSize >= size)
    10603  {
    10604  newBlockSize = smallerNewBlockSize;
    10605  ++newBlockSizeShift;
    10606  res = CreateBlock(newBlockSize, &newBlockIndex);
    10607  }
    10608  else
    10609  {
    10610  break;
    10611  }
    10612  }
    10613  }
    10614 
    10615  if(res == VK_SUCCESS)
    10616  {
    10617  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10618  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10619 
    10620  res = AllocateFromBlock(
    10621  pBlock,
    10622  hCurrentPool,
    10623  currentFrameIndex,
    10624  size,
    10625  alignment,
    10626  allocFlagsCopy,
    10627  createInfo.pUserData,
    10628  suballocType,
    10629  strategy,
    10630  pAllocation);
    10631  if(res == VK_SUCCESS)
    10632  {
    10633  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10634  return VK_SUCCESS;
    10635  }
    10636  else
    10637  {
    10638  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10639  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10640  }
    10641  }
    10642  }
    10643  }
    10644 
    10645  // 3. Try to allocate from existing blocks with making other allocations lost.
    10646  if(canMakeOtherLost)
    10647  {
    10648  uint32_t tryIndex = 0;
    10649  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10650  {
    10651  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10652  VmaAllocationRequest bestRequest = {};
    10653  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10654 
    10655  // 1. Search existing allocations.
    10657  {
    10658  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10659  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10660  {
    10661  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10662  VMA_ASSERT(pCurrBlock);
    10663  VmaAllocationRequest currRequest = {};
    10664  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10665  currentFrameIndex,
    10666  m_FrameInUseCount,
    10667  m_BufferImageGranularity,
    10668  size,
    10669  alignment,
    10670  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10671  suballocType,
    10672  canMakeOtherLost,
    10673  strategy,
    10674  &currRequest))
    10675  {
    10676  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10677  if(pBestRequestBlock == VMA_NULL ||
    10678  currRequestCost < bestRequestCost)
    10679  {
    10680  pBestRequestBlock = pCurrBlock;
    10681  bestRequest = currRequest;
    10682  bestRequestCost = currRequestCost;
    10683 
    10684  if(bestRequestCost == 0)
    10685  {
    10686  break;
    10687  }
    10688  }
    10689  }
    10690  }
    10691  }
    10692  else // WORST_FIT, FIRST_FIT
    10693  {
    10694  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10695  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10696  {
    10697  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10698  VMA_ASSERT(pCurrBlock);
    10699  VmaAllocationRequest currRequest = {};
    10700  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10701  currentFrameIndex,
    10702  m_FrameInUseCount,
    10703  m_BufferImageGranularity,
    10704  size,
    10705  alignment,
    10706  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10707  suballocType,
    10708  canMakeOtherLost,
    10709  strategy,
    10710  &currRequest))
    10711  {
    10712  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10713  if(pBestRequestBlock == VMA_NULL ||
    10714  currRequestCost < bestRequestCost ||
    10716  {
    10717  pBestRequestBlock = pCurrBlock;
    10718  bestRequest = currRequest;
    10719  bestRequestCost = currRequestCost;
    10720 
    10721  if(bestRequestCost == 0 ||
    10723  {
    10724  break;
    10725  }
    10726  }
    10727  }
    10728  }
    10729  }
    10730 
    10731  if(pBestRequestBlock != VMA_NULL)
    10732  {
    10733  if(mapped)
    10734  {
    10735  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10736  if(res != VK_SUCCESS)
    10737  {
    10738  return res;
    10739  }
    10740  }
    10741 
    10742  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10743  currentFrameIndex,
    10744  m_FrameInUseCount,
    10745  &bestRequest))
    10746  {
    10747  // We no longer have an empty Allocation.
    10748  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10749  {
    10750  m_HasEmptyBlock = false;
    10751  }
    10752  // Allocate from this pBlock.
    10753  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10754  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10755  (*pAllocation)->InitBlockAllocation(
    10756  hCurrentPool,
    10757  pBestRequestBlock,
    10758  bestRequest.offset,
    10759  alignment,
    10760  size,
    10761  suballocType,
    10762  mapped,
    10763  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10764  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10765  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10766  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10767  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10768  {
    10769  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10770  }
    10771  if(IsCorruptionDetectionEnabled())
    10772  {
    10773  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10774  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10775  }
    10776  return VK_SUCCESS;
    10777  }
    10778  // else: Some allocations must have been touched while we are here. Next try.
    10779  }
    10780  else
    10781  {
    10782  // Could not find place in any of the blocks - break outer loop.
    10783  break;
    10784  }
    10785  }
    10786  /* Maximum number of tries exceeded - a very unlike event when many other
    10787  threads are simultaneously touching allocations making it impossible to make
    10788  lost at the same time as we try to allocate. */
    10789  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10790  {
    10791  return VK_ERROR_TOO_MANY_OBJECTS;
    10792  }
    10793  }
    10794 
    10795  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10796 }
    10797 
    10798 void VmaBlockVector::Free(
    10799  VmaAllocation hAllocation)
    10800 {
    10801  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10802 
    10803  // Scope for lock.
    10804  {
    10805  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10806 
    10807  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10808 
    10809  if(IsCorruptionDetectionEnabled())
    10810  {
    10811  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10812  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10813  }
    10814 
    10815  if(hAllocation->IsPersistentMap())
    10816  {
    10817  pBlock->Unmap(m_hAllocator, 1);
    10818  }
    10819 
    10820  pBlock->m_pMetadata->Free(hAllocation);
    10821  VMA_HEAVY_ASSERT(pBlock->Validate());
    10822 
    10823  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10824 
    10825  // pBlock became empty after this deallocation.
    10826  if(pBlock->m_pMetadata->IsEmpty())
    10827  {
    10828  // Already has empty Allocation. We don't want to have two, so delete this one.
    10829  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10830  {
    10831  pBlockToDelete = pBlock;
    10832  Remove(pBlock);
    10833  }
    10834  // We now have first empty block.
    10835  else
    10836  {
    10837  m_HasEmptyBlock = true;
    10838  }
    10839  }
    10840  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10841  // (This is optional, heuristics.)
    10842  else if(m_HasEmptyBlock)
    10843  {
    10844  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10845  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10846  {
    10847  pBlockToDelete = pLastBlock;
    10848  m_Blocks.pop_back();
    10849  m_HasEmptyBlock = false;
    10850  }
    10851  }
    10852 
    10853  IncrementallySortBlocks();
    10854  }
    10855 
    10856  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10857  // lock, for performance reason.
    10858  if(pBlockToDelete != VMA_NULL)
    10859  {
    10860  VMA_DEBUG_LOG(" Deleted empty allocation");
    10861  pBlockToDelete->Destroy(m_hAllocator);
    10862  vma_delete(m_hAllocator, pBlockToDelete);
    10863  }
    10864 }
    10865 
    10866 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10867 {
    10868  VkDeviceSize result = 0;
    10869  for(size_t i = m_Blocks.size(); i--; )
    10870  {
    10871  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10872  if(result >= m_PreferredBlockSize)
    10873  {
    10874  break;
    10875  }
    10876  }
    10877  return result;
    10878 }
    10879 
    10880 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10881 {
    10882  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10883  {
    10884  if(m_Blocks[blockIndex] == pBlock)
    10885  {
    10886  VmaVectorRemove(m_Blocks, blockIndex);
    10887  return;
    10888  }
    10889  }
    10890  VMA_ASSERT(0);
    10891 }
    10892 
    10893 void VmaBlockVector::IncrementallySortBlocks()
    10894 {
    10895  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10896  {
    10897  // Bubble sort only until first swap.
    10898  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10899  {
    10900  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10901  {
    10902  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10903  return;
    10904  }
    10905  }
    10906  }
    10907 }
    10908 
    10909 VkResult VmaBlockVector::AllocateFromBlock(
    10910  VmaDeviceMemoryBlock* pBlock,
    10911  VmaPool hCurrentPool,
    10912  uint32_t currentFrameIndex,
    10913  VkDeviceSize size,
    10914  VkDeviceSize alignment,
    10915  VmaAllocationCreateFlags allocFlags,
    10916  void* pUserData,
    10917  VmaSuballocationType suballocType,
    10918  uint32_t strategy,
    10919  VmaAllocation* pAllocation)
    10920 {
    10921  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10922  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10923  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10924  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10925 
    10926  VmaAllocationRequest currRequest = {};
    10927  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10928  currentFrameIndex,
    10929  m_FrameInUseCount,
    10930  m_BufferImageGranularity,
    10931  size,
    10932  alignment,
    10933  isUpperAddress,
    10934  suballocType,
    10935  false, // canMakeOtherLost
    10936  strategy,
    10937  &currRequest))
    10938  {
    10939  // Allocate from pCurrBlock.
    10940  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10941 
    10942  if(mapped)
    10943  {
    10944  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10945  if(res != VK_SUCCESS)
    10946  {
    10947  return res;
    10948  }
    10949  }
    10950 
    10951  // We no longer have an empty Allocation.
    10952  if(pBlock->m_pMetadata->IsEmpty())
    10953  {
    10954  m_HasEmptyBlock = false;
    10955  }
    10956 
    10957  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10958  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10959  (*pAllocation)->InitBlockAllocation(
    10960  hCurrentPool,
    10961  pBlock,
    10962  currRequest.offset,
    10963  alignment,
    10964  size,
    10965  suballocType,
    10966  mapped,
    10967  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10968  VMA_HEAVY_ASSERT(pBlock->Validate());
    10969  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10970  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10971  {
    10972  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10973  }
    10974  if(IsCorruptionDetectionEnabled())
    10975  {
    10976  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10977  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10978  }
    10979  return VK_SUCCESS;
    10980  }
    10981  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10982 }
    10983 
    10984 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10985 {
    10986  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10987  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10988  allocInfo.allocationSize = blockSize;
    10989  VkDeviceMemory mem = VK_NULL_HANDLE;
    10990  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10991  if(res < 0)
    10992  {
    10993  return res;
    10994  }
    10995 
    10996  // New VkDeviceMemory successfully created.
    10997 
    10998  // Create new Allocation for it.
    10999  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11000  pBlock->Init(
    11001  m_hAllocator,
    11002  m_MemoryTypeIndex,
    11003  mem,
    11004  allocInfo.allocationSize,
    11005  m_NextBlockId++,
    11006  m_Algorithm);
    11007 
    11008  m_Blocks.push_back(pBlock);
    11009  if(pNewBlockIndex != VMA_NULL)
    11010  {
    11011  *pNewBlockIndex = m_Blocks.size() - 1;
    11012  }
    11013 
    11014  return VK_SUCCESS;
    11015 }
    11016 
    11017 #if VMA_STATS_STRING_ENABLED
    11018 
    11019 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11020 {
    11021  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11022 
    11023  json.BeginObject();
    11024 
    11025  if(m_IsCustomPool)
    11026  {
    11027  json.WriteString("MemoryTypeIndex");
    11028  json.WriteNumber(m_MemoryTypeIndex);
    11029 
    11030  json.WriteString("BlockSize");
    11031  json.WriteNumber(m_PreferredBlockSize);
    11032 
    11033  json.WriteString("BlockCount");
    11034  json.BeginObject(true);
    11035  if(m_MinBlockCount > 0)
    11036  {
    11037  json.WriteString("Min");
    11038  json.WriteNumber((uint64_t)m_MinBlockCount);
    11039  }
    11040  if(m_MaxBlockCount < SIZE_MAX)
    11041  {
    11042  json.WriteString("Max");
    11043  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11044  }
    11045  json.WriteString("Cur");
    11046  json.WriteNumber((uint64_t)m_Blocks.size());
    11047  json.EndObject();
    11048 
    11049  if(m_FrameInUseCount > 0)
    11050  {
    11051  json.WriteString("FrameInUseCount");
    11052  json.WriteNumber(m_FrameInUseCount);
    11053  }
    11054 
    11055  if(m_Algorithm != 0)
    11056  {
    11057  json.WriteString("Algorithm");
    11058  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11059  }
    11060  }
    11061  else
    11062  {
    11063  json.WriteString("PreferredBlockSize");
    11064  json.WriteNumber(m_PreferredBlockSize);
    11065  }
    11066 
    11067  json.WriteString("Blocks");
    11068  json.BeginObject();
    11069  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11070  {
    11071  json.BeginString();
    11072  json.ContinueString(m_Blocks[i]->GetId());
    11073  json.EndString();
    11074 
    11075  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11076  }
    11077  json.EndObject();
    11078 
    11079  json.EndObject();
    11080 }
    11081 
    11082 #endif // #if VMA_STATS_STRING_ENABLED
    11083 
    11084 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11085  VmaAllocator hAllocator,
    11086  uint32_t currentFrameIndex)
    11087 {
    11088  if(m_pDefragmentator == VMA_NULL)
    11089  {
    11090  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11091  hAllocator,
    11092  this,
    11093  currentFrameIndex);
    11094  }
    11095 
    11096  return m_pDefragmentator;
    11097 }
    11098 
    11099 VkResult VmaBlockVector::Defragment(
    11100  VmaDefragmentationStats* pDefragmentationStats,
    11101  VkDeviceSize& maxBytesToMove,
    11102  uint32_t& maxAllocationsToMove)
    11103 {
    11104  if(m_pDefragmentator == VMA_NULL)
    11105  {
    11106  return VK_SUCCESS;
    11107  }
    11108 
    11109  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11110 
    11111  // Defragment.
    11112  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11113 
    11114  // Accumulate statistics.
    11115  if(pDefragmentationStats != VMA_NULL)
    11116  {
    11117  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11118  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11119  pDefragmentationStats->bytesMoved += bytesMoved;
    11120  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11121  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11122  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11123  maxBytesToMove -= bytesMoved;
    11124  maxAllocationsToMove -= allocationsMoved;
    11125  }
    11126 
    11127  // Free empty blocks.
    11128  m_HasEmptyBlock = false;
    11129  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11130  {
    11131  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11132  if(pBlock->m_pMetadata->IsEmpty())
    11133  {
    11134  if(m_Blocks.size() > m_MinBlockCount)
    11135  {
    11136  if(pDefragmentationStats != VMA_NULL)
    11137  {
    11138  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11139  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11140  }
    11141 
    11142  VmaVectorRemove(m_Blocks, blockIndex);
    11143  pBlock->Destroy(m_hAllocator);
    11144  vma_delete(m_hAllocator, pBlock);
    11145  }
    11146  else
    11147  {
    11148  m_HasEmptyBlock = true;
    11149  }
    11150  }
    11151  }
    11152 
    11153  return result;
    11154 }
    11155 
    11156 void VmaBlockVector::DestroyDefragmentator()
    11157 {
    11158  if(m_pDefragmentator != VMA_NULL)
    11159  {
    11160  vma_delete(m_hAllocator, m_pDefragmentator);
    11161  m_pDefragmentator = VMA_NULL;
    11162  }
    11163 }
    11164 
    11165 void VmaBlockVector::MakePoolAllocationsLost(
    11166  uint32_t currentFrameIndex,
    11167  size_t* pLostAllocationCount)
    11168 {
    11169  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11170  size_t lostAllocationCount = 0;
    11171  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11172  {
    11173  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11174  VMA_ASSERT(pBlock);
    11175  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11176  }
    11177  if(pLostAllocationCount != VMA_NULL)
    11178  {
    11179  *pLostAllocationCount = lostAllocationCount;
    11180  }
    11181 }
    11182 
    11183 VkResult VmaBlockVector::CheckCorruption()
    11184 {
    11185  if(!IsCorruptionDetectionEnabled())
    11186  {
    11187  return VK_ERROR_FEATURE_NOT_PRESENT;
    11188  }
    11189 
    11190  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11191  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11192  {
    11193  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11194  VMA_ASSERT(pBlock);
    11195  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11196  if(res != VK_SUCCESS)
    11197  {
    11198  return res;
    11199  }
    11200  }
    11201  return VK_SUCCESS;
    11202 }
    11203 
    11204 void VmaBlockVector::AddStats(VmaStats* pStats)
    11205 {
    11206  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11207  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11208 
    11209  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11210 
    11211  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11212  {
    11213  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11214  VMA_ASSERT(pBlock);
    11215  VMA_HEAVY_ASSERT(pBlock->Validate());
    11216  VmaStatInfo allocationStatInfo;
    11217  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11218  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11219  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11220  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11221  }
    11222 }
    11223 
    11225 // VmaDefragmentator members definition
    11226 
    11227 VmaDefragmentator::VmaDefragmentator(
    11228  VmaAllocator hAllocator,
    11229  VmaBlockVector* pBlockVector,
    11230  uint32_t currentFrameIndex) :
    11231  m_hAllocator(hAllocator),
    11232  m_pBlockVector(pBlockVector),
    11233  m_CurrentFrameIndex(currentFrameIndex),
    11234  m_BytesMoved(0),
    11235  m_AllocationsMoved(0),
    11236  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11237  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11238 {
    11239  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11240 }
    11241 
    11242 VmaDefragmentator::~VmaDefragmentator()
    11243 {
    11244  for(size_t i = m_Blocks.size(); i--; )
    11245  {
    11246  vma_delete(m_hAllocator, m_Blocks[i]);
    11247  }
    11248 }
    11249 
    11250 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11251 {
    11252  AllocationInfo allocInfo;
    11253  allocInfo.m_hAllocation = hAlloc;
    11254  allocInfo.m_pChanged = pChanged;
    11255  m_Allocations.push_back(allocInfo);
    11256 }
    11257 
    11258 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11259 {
    11260  // It has already been mapped for defragmentation.
    11261  if(m_pMappedDataForDefragmentation)
    11262  {
    11263  *ppMappedData = m_pMappedDataForDefragmentation;
    11264  return VK_SUCCESS;
    11265  }
    11266 
    11267  // It is originally mapped.
    11268  if(m_pBlock->GetMappedData())
    11269  {
    11270  *ppMappedData = m_pBlock->GetMappedData();
    11271  return VK_SUCCESS;
    11272  }
    11273 
    11274  // Map on first usage.
    11275  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11276  *ppMappedData = m_pMappedDataForDefragmentation;
    11277  return res;
    11278 }
    11279 
    11280 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11281 {
    11282  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11283  {
    11284  m_pBlock->Unmap(hAllocator, 1);
    11285  }
    11286 }
    11287 
    11288 VkResult VmaDefragmentator::DefragmentRound(
    11289  VkDeviceSize maxBytesToMove,
    11290  uint32_t maxAllocationsToMove)
    11291 {
    11292  if(m_Blocks.empty())
    11293  {
    11294  return VK_SUCCESS;
    11295  }
    11296 
    11297  size_t srcBlockIndex = m_Blocks.size() - 1;
    11298  size_t srcAllocIndex = SIZE_MAX;
    11299  for(;;)
    11300  {
    11301  // 1. Find next allocation to move.
    11302  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11303  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11304  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11305  {
    11306  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11307  {
    11308  // Finished: no more allocations to process.
    11309  if(srcBlockIndex == 0)
    11310  {
    11311  return VK_SUCCESS;
    11312  }
    11313  else
    11314  {
    11315  --srcBlockIndex;
    11316  srcAllocIndex = SIZE_MAX;
    11317  }
    11318  }
    11319  else
    11320  {
    11321  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11322  }
    11323  }
    11324 
    11325  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11326  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11327 
    11328  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11329  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11330  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11331  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11332 
    11333  // 2. Try to find new place for this allocation in preceding or current block.
    11334  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11335  {
    11336  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11337  VmaAllocationRequest dstAllocRequest;
    11338  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11339  m_CurrentFrameIndex,
    11340  m_pBlockVector->GetFrameInUseCount(),
    11341  m_pBlockVector->GetBufferImageGranularity(),
    11342  size,
    11343  alignment,
    11344  false, // upperAddress
    11345  suballocType,
    11346  false, // canMakeOtherLost
    11348  &dstAllocRequest) &&
    11349  MoveMakesSense(
    11350  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11351  {
    11352  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11353 
    11354  // Reached limit on number of allocations or bytes to move.
    11355  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11356  (m_BytesMoved + size > maxBytesToMove))
    11357  {
    11358  return VK_INCOMPLETE;
    11359  }
    11360 
    11361  void* pDstMappedData = VMA_NULL;
    11362  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11363  if(res != VK_SUCCESS)
    11364  {
    11365  return res;
    11366  }
    11367 
    11368  void* pSrcMappedData = VMA_NULL;
    11369  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11370  if(res != VK_SUCCESS)
    11371  {
    11372  return res;
    11373  }
    11374 
    11375  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11376  memcpy(
    11377  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11378  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11379  static_cast<size_t>(size));
    11380 
    11381  if(VMA_DEBUG_MARGIN > 0)
    11382  {
    11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11384  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11385  }
    11386 
    11387  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11388  dstAllocRequest,
    11389  suballocType,
    11390  size,
    11391  false, // upperAddress
    11392  allocInfo.m_hAllocation);
    11393  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11394 
    11395  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11396 
    11397  if(allocInfo.m_pChanged != VMA_NULL)
    11398  {
    11399  *allocInfo.m_pChanged = VK_TRUE;
    11400  }
    11401 
    11402  ++m_AllocationsMoved;
    11403  m_BytesMoved += size;
    11404 
    11405  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11406 
    11407  break;
    11408  }
    11409  }
    11410 
    11411  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11412 
    11413  if(srcAllocIndex > 0)
    11414  {
    11415  --srcAllocIndex;
    11416  }
    11417  else
    11418  {
    11419  if(srcBlockIndex > 0)
    11420  {
    11421  --srcBlockIndex;
    11422  srcAllocIndex = SIZE_MAX;
    11423  }
    11424  else
    11425  {
    11426  return VK_SUCCESS;
    11427  }
    11428  }
    11429  }
    11430 }
    11431 
    11432 VkResult VmaDefragmentator::Defragment(
    11433  VkDeviceSize maxBytesToMove,
    11434  uint32_t maxAllocationsToMove)
    11435 {
    11436  if(m_Allocations.empty())
    11437  {
    11438  return VK_SUCCESS;
    11439  }
    11440 
    11441  // Create block info for each block.
    11442  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11443  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11444  {
    11445  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11446  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11447  m_Blocks.push_back(pBlockInfo);
    11448  }
    11449 
    11450  // Sort them by m_pBlock pointer value.
    11451  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11452 
    11453  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11454  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11455  {
    11456  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11457  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11458  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11459  {
    11460  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11461  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11462  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11463  {
    11464  (*it)->m_Allocations.push_back(allocInfo);
    11465  }
    11466  else
    11467  {
    11468  VMA_ASSERT(0);
    11469  }
    11470  }
    11471  }
    11472  m_Allocations.clear();
    11473 
    11474  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11475  {
    11476  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11477  pBlockInfo->CalcHasNonMovableAllocations();
    11478  pBlockInfo->SortAllocationsBySizeDescecnding();
    11479  }
    11480 
    11481  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11482  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11483 
    11484  // Execute defragmentation rounds (the main part).
    11485  VkResult result = VK_SUCCESS;
    11486  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11487  {
    11488  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11489  }
    11490 
    11491  // Unmap blocks that were mapped for defragmentation.
    11492  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11493  {
    11494  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11495  }
    11496 
    11497  return result;
    11498 }
    11499 
    11500 bool VmaDefragmentator::MoveMakesSense(
    11501  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11502  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11503 {
    11504  if(dstBlockIndex < srcBlockIndex)
    11505  {
    11506  return true;
    11507  }
    11508  if(dstBlockIndex > srcBlockIndex)
    11509  {
    11510  return false;
    11511  }
    11512  if(dstOffset < srcOffset)
    11513  {
    11514  return true;
    11515  }
    11516  return false;
    11517 }
    11518 
    11520 // VmaRecorder
    11521 
    11522 #if VMA_RECORDING_ENABLED
    11523 
    11524 VmaRecorder::VmaRecorder() :
    11525  m_UseMutex(true),
    11526  m_Flags(0),
    11527  m_File(VMA_NULL),
    11528  m_Freq(INT64_MAX),
    11529  m_StartCounter(INT64_MAX)
    11530 {
    11531 }
    11532 
    11533 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11534 {
    11535  m_UseMutex = useMutex;
    11536  m_Flags = settings.flags;
    11537 
    11538  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11539  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11540 
    11541  // Open file for writing.
    11542  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11543  if(err != 0)
    11544  {
    11545  return VK_ERROR_INITIALIZATION_FAILED;
    11546  }
    11547 
    11548  // Write header.
    11549  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11550  fprintf(m_File, "%s\n", "1,4");
    11551 
    11552  return VK_SUCCESS;
    11553 }
    11554 
    11555 VmaRecorder::~VmaRecorder()
    11556 {
    11557  if(m_File != VMA_NULL)
    11558  {
    11559  fclose(m_File);
    11560  }
    11561 }
    11562 
    11563 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11564 {
    11565  CallParams callParams;
    11566  GetBasicParams(callParams);
    11567 
    11568  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11569  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11570  Flush();
    11571 }
    11572 
    11573 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11574 {
    11575  CallParams callParams;
    11576  GetBasicParams(callParams);
    11577 
    11578  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11579  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11580  Flush();
    11581 }
    11582 
    11583 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11584 {
    11585  CallParams callParams;
    11586  GetBasicParams(callParams);
    11587 
    11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11590  createInfo.memoryTypeIndex,
    11591  createInfo.flags,
    11592  createInfo.blockSize,
    11593  (uint64_t)createInfo.minBlockCount,
    11594  (uint64_t)createInfo.maxBlockCount,
    11595  createInfo.frameInUseCount,
    11596  pool);
    11597  Flush();
    11598 }
    11599 
    11600 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11601 {
    11602  CallParams callParams;
    11603  GetBasicParams(callParams);
    11604 
    11605  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11606  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11607  pool);
    11608  Flush();
    11609 }
    11610 
    11611 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11612  const VkMemoryRequirements& vkMemReq,
    11613  const VmaAllocationCreateInfo& createInfo,
    11614  VmaAllocation allocation)
    11615 {
    11616  CallParams callParams;
    11617  GetBasicParams(callParams);
    11618 
    11619  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11620  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11621  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11622  vkMemReq.size,
    11623  vkMemReq.alignment,
    11624  vkMemReq.memoryTypeBits,
    11625  createInfo.flags,
    11626  createInfo.usage,
    11627  createInfo.requiredFlags,
    11628  createInfo.preferredFlags,
    11629  createInfo.memoryTypeBits,
    11630  createInfo.pool,
    11631  allocation,
    11632  userDataStr.GetString());
    11633  Flush();
    11634 }
    11635 
    11636 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11637  const VkMemoryRequirements& vkMemReq,
    11638  bool requiresDedicatedAllocation,
    11639  bool prefersDedicatedAllocation,
    11640  const VmaAllocationCreateInfo& createInfo,
    11641  VmaAllocation allocation)
    11642 {
    11643  CallParams callParams;
    11644  GetBasicParams(callParams);
    11645 
    11646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11647  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11648  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11649  vkMemReq.size,
    11650  vkMemReq.alignment,
    11651  vkMemReq.memoryTypeBits,
    11652  requiresDedicatedAllocation ? 1 : 0,
    11653  prefersDedicatedAllocation ? 1 : 0,
    11654  createInfo.flags,
    11655  createInfo.usage,
    11656  createInfo.requiredFlags,
    11657  createInfo.preferredFlags,
    11658  createInfo.memoryTypeBits,
    11659  createInfo.pool,
    11660  allocation,
    11661  userDataStr.GetString());
    11662  Flush();
    11663 }
    11664 
    11665 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11666  const VkMemoryRequirements& vkMemReq,
    11667  bool requiresDedicatedAllocation,
    11668  bool prefersDedicatedAllocation,
    11669  const VmaAllocationCreateInfo& createInfo,
    11670  VmaAllocation allocation)
    11671 {
    11672  CallParams callParams;
    11673  GetBasicParams(callParams);
    11674 
    11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11676  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11677  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11678  vkMemReq.size,
    11679  vkMemReq.alignment,
    11680  vkMemReq.memoryTypeBits,
    11681  requiresDedicatedAllocation ? 1 : 0,
    11682  prefersDedicatedAllocation ? 1 : 0,
    11683  createInfo.flags,
    11684  createInfo.usage,
    11685  createInfo.requiredFlags,
    11686  createInfo.preferredFlags,
    11687  createInfo.memoryTypeBits,
    11688  createInfo.pool,
    11689  allocation,
    11690  userDataStr.GetString());
    11691  Flush();
    11692 }
    11693 
    11694 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11695  VmaAllocation allocation)
    11696 {
    11697  CallParams callParams;
    11698  GetBasicParams(callParams);
    11699 
    11700  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11701  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11702  allocation);
    11703  Flush();
    11704 }
    11705 
    11706 void VmaRecorder::RecordResizeAllocation(
    11707  uint32_t frameIndex,
    11708  VmaAllocation allocation,
    11709  VkDeviceSize newSize)
    11710 {
    11711  CallParams callParams;
    11712  GetBasicParams(callParams);
    11713 
    11714  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11715  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11716  allocation, newSize);
    11717  Flush();
    11718 }
    11719 
    11720 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11721  VmaAllocation allocation,
    11722  const void* pUserData)
    11723 {
    11724  CallParams callParams;
    11725  GetBasicParams(callParams);
    11726 
    11727  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11728  UserDataString userDataStr(
    11729  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11730  pUserData);
    11731  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11732  allocation,
    11733  userDataStr.GetString());
    11734  Flush();
    11735 }
    11736 
    11737 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11738  VmaAllocation allocation)
    11739 {
    11740  CallParams callParams;
    11741  GetBasicParams(callParams);
    11742 
    11743  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11744  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11745  allocation);
    11746  Flush();
    11747 }
    11748 
    11749 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11750  VmaAllocation allocation)
    11751 {
    11752  CallParams callParams;
    11753  GetBasicParams(callParams);
    11754 
    11755  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11756  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11757  allocation);
    11758  Flush();
    11759 }
    11760 
    11761 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11762  VmaAllocation allocation)
    11763 {
    11764  CallParams callParams;
    11765  GetBasicParams(callParams);
    11766 
    11767  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11768  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11769  allocation);
    11770  Flush();
    11771 }
    11772 
    11773 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11774  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11775 {
    11776  CallParams callParams;
    11777  GetBasicParams(callParams);
    11778 
    11779  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11780  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11781  allocation,
    11782  offset,
    11783  size);
    11784  Flush();
    11785 }
    11786 
    11787 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11788  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11789 {
    11790  CallParams callParams;
    11791  GetBasicParams(callParams);
    11792 
    11793  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11794  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11795  allocation,
    11796  offset,
    11797  size);
    11798  Flush();
    11799 }
    11800 
    11801 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11802  const VkBufferCreateInfo& bufCreateInfo,
    11803  const VmaAllocationCreateInfo& allocCreateInfo,
    11804  VmaAllocation allocation)
    11805 {
    11806  CallParams callParams;
    11807  GetBasicParams(callParams);
    11808 
    11809  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11810  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11811  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11812  bufCreateInfo.flags,
    11813  bufCreateInfo.size,
    11814  bufCreateInfo.usage,
    11815  bufCreateInfo.sharingMode,
    11816  allocCreateInfo.flags,
    11817  allocCreateInfo.usage,
    11818  allocCreateInfo.requiredFlags,
    11819  allocCreateInfo.preferredFlags,
    11820  allocCreateInfo.memoryTypeBits,
    11821  allocCreateInfo.pool,
    11822  allocation,
    11823  userDataStr.GetString());
    11824  Flush();
    11825 }
    11826 
    11827 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11828  const VkImageCreateInfo& imageCreateInfo,
    11829  const VmaAllocationCreateInfo& allocCreateInfo,
    11830  VmaAllocation allocation)
    11831 {
    11832  CallParams callParams;
    11833  GetBasicParams(callParams);
    11834 
    11835  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11836  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11837  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11838  imageCreateInfo.flags,
    11839  imageCreateInfo.imageType,
    11840  imageCreateInfo.format,
    11841  imageCreateInfo.extent.width,
    11842  imageCreateInfo.extent.height,
    11843  imageCreateInfo.extent.depth,
    11844  imageCreateInfo.mipLevels,
    11845  imageCreateInfo.arrayLayers,
    11846  imageCreateInfo.samples,
    11847  imageCreateInfo.tiling,
    11848  imageCreateInfo.usage,
    11849  imageCreateInfo.sharingMode,
    11850  imageCreateInfo.initialLayout,
    11851  allocCreateInfo.flags,
    11852  allocCreateInfo.usage,
    11853  allocCreateInfo.requiredFlags,
    11854  allocCreateInfo.preferredFlags,
    11855  allocCreateInfo.memoryTypeBits,
    11856  allocCreateInfo.pool,
    11857  allocation,
    11858  userDataStr.GetString());
    11859  Flush();
    11860 }
    11861 
    11862 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11863  VmaAllocation allocation)
    11864 {
    11865  CallParams callParams;
    11866  GetBasicParams(callParams);
    11867 
    11868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11869  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11870  allocation);
    11871  Flush();
    11872 }
    11873 
    11874 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11875  VmaAllocation allocation)
    11876 {
    11877  CallParams callParams;
    11878  GetBasicParams(callParams);
    11879 
    11880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11881  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11882  allocation);
    11883  Flush();
    11884 }
    11885 
    11886 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11887  VmaAllocation allocation)
    11888 {
    11889  CallParams callParams;
    11890  GetBasicParams(callParams);
    11891 
    11892  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11893  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11894  allocation);
    11895  Flush();
    11896 }
    11897 
    11898 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11899  VmaAllocation allocation)
    11900 {
    11901  CallParams callParams;
    11902  GetBasicParams(callParams);
    11903 
    11904  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11905  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11906  allocation);
    11907  Flush();
    11908 }
    11909 
    11910 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11911  VmaPool pool)
    11912 {
    11913  CallParams callParams;
    11914  GetBasicParams(callParams);
    11915 
    11916  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11917  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11918  pool);
    11919  Flush();
    11920 }
    11921 
    11922 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11923 {
    11924  if(pUserData != VMA_NULL)
    11925  {
    11926  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11927  {
    11928  m_Str = (const char*)pUserData;
    11929  }
    11930  else
    11931  {
    11932  sprintf_s(m_PtrStr, "%p", pUserData);
    11933  m_Str = m_PtrStr;
    11934  }
    11935  }
    11936  else
    11937  {
    11938  m_Str = "";
    11939  }
    11940 }
    11941 
    11942 void VmaRecorder::WriteConfiguration(
    11943  const VkPhysicalDeviceProperties& devProps,
    11944  const VkPhysicalDeviceMemoryProperties& memProps,
    11945  bool dedicatedAllocationExtensionEnabled)
    11946 {
    11947  fprintf(m_File, "Config,Begin\n");
    11948 
    11949  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11950  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11951  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11952  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11953  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11954  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11955 
    11956  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11957  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11958  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11959 
    11960  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11961  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11962  {
    11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11964  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11965  }
    11966  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11967  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11968  {
    11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11970  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11971  }
    11972 
    11973  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11974 
    11975  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11978  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11981  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11982  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11983  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11984 
    11985  fprintf(m_File, "Config,End\n");
    11986 }
    11987 
    11988 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11989 {
    11990  outParams.threadId = GetCurrentThreadId();
    11991 
    11992  LARGE_INTEGER counter;
    11993  QueryPerformanceCounter(&counter);
    11994  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11995 }
    11996 
    11997 void VmaRecorder::Flush()
    11998 {
    11999  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    12000  {
    12001  fflush(m_File);
    12002  }
    12003 }
    12004 
    12005 #endif // #if VMA_RECORDING_ENABLED
    12006 
    12008 // VmaAllocator_T
    12009 
    12010 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12011  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12012  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12013  m_hDevice(pCreateInfo->device),
    12014  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12015  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12016  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12017  m_PreferredLargeHeapBlockSize(0),
    12018  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12019  m_CurrentFrameIndex(0),
    12020  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12021  m_NextPoolId(0)
    12023  ,m_pRecorder(VMA_NULL)
    12024 #endif
    12025 {
    12026  if(VMA_DEBUG_DETECT_CORRUPTION)
    12027  {
    12028  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12029  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12030  }
    12031 
    12032  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12033 
    12034 #if !(VMA_DEDICATED_ALLOCATION)
    12036  {
    12037  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12038  }
    12039 #endif
    12040 
    12041  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12042  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12043  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12044 
    12045  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12046  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12047 
    12048  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12049  {
    12050  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12051  }
    12052 
    12053  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12054  {
    12055  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12056  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12057  }
    12058 
    12059  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12060 
    12061  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12062  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12063 
    12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12065  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12067  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12068 
    12069  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12070  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12071 
    12072  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12073  {
    12074  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12075  {
    12076  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12077  if(limit != VK_WHOLE_SIZE)
    12078  {
    12079  m_HeapSizeLimit[heapIndex] = limit;
    12080  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12081  {
    12082  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12083  }
    12084  }
    12085  }
    12086  }
    12087 
    12088  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12089  {
    12090  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12091 
    12092  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12093  this,
    12094  memTypeIndex,
    12095  preferredBlockSize,
    12096  0,
    12097  SIZE_MAX,
    12098  GetBufferImageGranularity(),
    12099  pCreateInfo->frameInUseCount,
    12100  false, // isCustomPool
    12101  false, // explicitBlockSize
    12102  false); // linearAlgorithm
    12103  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12104  // becase minBlockCount is 0.
    12105  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12106 
    12107  }
    12108 }
    12109 
    12110 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12111 {
    12112  VkResult res = VK_SUCCESS;
    12113 
    12114  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12115  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12116  {
    12117 #if VMA_RECORDING_ENABLED
    12118  m_pRecorder = vma_new(this, VmaRecorder)();
    12119  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12120  if(res != VK_SUCCESS)
    12121  {
    12122  return res;
    12123  }
    12124  m_pRecorder->WriteConfiguration(
    12125  m_PhysicalDeviceProperties,
    12126  m_MemProps,
    12127  m_UseKhrDedicatedAllocation);
    12128  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12129 #else
    12130  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12131  return VK_ERROR_FEATURE_NOT_PRESENT;
    12132 #endif
    12133  }
    12134 
    12135  return res;
    12136 }
    12137 
    12138 VmaAllocator_T::~VmaAllocator_T()
    12139 {
    12140 #if VMA_RECORDING_ENABLED
    12141  if(m_pRecorder != VMA_NULL)
    12142  {
    12143  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12144  vma_delete(this, m_pRecorder);
    12145  }
    12146 #endif
    12147 
    12148  VMA_ASSERT(m_Pools.empty());
    12149 
    12150  for(size_t i = GetMemoryTypeCount(); i--; )
    12151  {
    12152  vma_delete(this, m_pDedicatedAllocations[i]);
    12153  vma_delete(this, m_pBlockVectors[i]);
    12154  }
    12155 }
    12156 
    12157 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12158 {
    12159 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12160  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12161  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12162  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12163  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12164  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12165  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12166  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12167  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12168  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12169  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12170  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12171  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12172  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12173  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12174  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12175  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12176 #if VMA_DEDICATED_ALLOCATION
    12177  if(m_UseKhrDedicatedAllocation)
    12178  {
    12179  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12180  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12181  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12182  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12183  }
    12184 #endif // #if VMA_DEDICATED_ALLOCATION
    12185 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12186 
    12187 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12188  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12189 
    12190  if(pVulkanFunctions != VMA_NULL)
    12191  {
    12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12193  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12194  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12198  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12199  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12200  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12202  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12203  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12204  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12205  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12206  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12207  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12208 #if VMA_DEDICATED_ALLOCATION
    12209  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12210  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12211 #endif
    12212  }
    12213 
    12214 #undef VMA_COPY_IF_NOT_NULL
    12215 
    12216  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12217  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12233  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12234 #if VMA_DEDICATED_ALLOCATION
    12235  if(m_UseKhrDedicatedAllocation)
    12236  {
    12237  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12238  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12239  }
    12240 #endif
    12241 }
    12242 
    12243 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12244 {
    12245  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12246  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12247  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12248  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12249 }
    12250 
    12251 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12252  VkDeviceSize size,
    12253  VkDeviceSize alignment,
    12254  bool dedicatedAllocation,
    12255  VkBuffer dedicatedBuffer,
    12256  VkImage dedicatedImage,
    12257  const VmaAllocationCreateInfo& createInfo,
    12258  uint32_t memTypeIndex,
    12259  VmaSuballocationType suballocType,
    12260  VmaAllocation* pAllocation)
    12261 {
    12262  VMA_ASSERT(pAllocation != VMA_NULL);
    12263  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12264 
    12265  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12266 
    12267  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12268  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12269  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12270  {
    12271  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12272  }
    12273 
    12274  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12275  VMA_ASSERT(blockVector);
    12276 
    12277  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12278  bool preferDedicatedMemory =
    12279  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12280  dedicatedAllocation ||
    12281  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12282  size > preferredBlockSize / 2;
    12283 
    12284  if(preferDedicatedMemory &&
    12285  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12286  finalCreateInfo.pool == VK_NULL_HANDLE)
    12287  {
    12289  }
    12290 
    12291  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12292  {
    12293  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12294  {
    12295  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12296  }
    12297  else
    12298  {
    12299  return AllocateDedicatedMemory(
    12300  size,
    12301  suballocType,
    12302  memTypeIndex,
    12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12304  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12305  finalCreateInfo.pUserData,
    12306  dedicatedBuffer,
    12307  dedicatedImage,
    12308  pAllocation);
    12309  }
    12310  }
    12311  else
    12312  {
    12313  VkResult res = blockVector->Allocate(
    12314  VK_NULL_HANDLE, // hCurrentPool
    12315  m_CurrentFrameIndex.load(),
    12316  size,
    12317  alignment,
    12318  finalCreateInfo,
    12319  suballocType,
    12320  pAllocation);
    12321  if(res == VK_SUCCESS)
    12322  {
    12323  return res;
    12324  }
    12325 
    12326  // 5. Try dedicated memory.
    12327  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12328  {
    12329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12330  }
    12331  else
    12332  {
    12333  res = AllocateDedicatedMemory(
    12334  size,
    12335  suballocType,
    12336  memTypeIndex,
    12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12338  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12339  finalCreateInfo.pUserData,
    12340  dedicatedBuffer,
    12341  dedicatedImage,
    12342  pAllocation);
    12343  if(res == VK_SUCCESS)
    12344  {
    12345  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12346  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12347  return VK_SUCCESS;
    12348  }
    12349  else
    12350  {
    12351  // Everything failed: Return error code.
    12352  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12353  return res;
    12354  }
    12355  }
    12356  }
    12357 }
    12358 
    12359 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12360  VkDeviceSize size,
    12361  VmaSuballocationType suballocType,
    12362  uint32_t memTypeIndex,
    12363  bool map,
    12364  bool isUserDataString,
    12365  void* pUserData,
    12366  VkBuffer dedicatedBuffer,
    12367  VkImage dedicatedImage,
    12368  VmaAllocation* pAllocation)
    12369 {
    12370  VMA_ASSERT(pAllocation);
    12371 
    12372  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12373  allocInfo.memoryTypeIndex = memTypeIndex;
    12374  allocInfo.allocationSize = size;
    12375 
    12376 #if VMA_DEDICATED_ALLOCATION
    12377  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12378  if(m_UseKhrDedicatedAllocation)
    12379  {
    12380  if(dedicatedBuffer != VK_NULL_HANDLE)
    12381  {
    12382  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12383  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12384  allocInfo.pNext = &dedicatedAllocInfo;
    12385  }
    12386  else if(dedicatedImage != VK_NULL_HANDLE)
    12387  {
    12388  dedicatedAllocInfo.image = dedicatedImage;
    12389  allocInfo.pNext = &dedicatedAllocInfo;
    12390  }
    12391  }
    12392 #endif // #if VMA_DEDICATED_ALLOCATION
    12393 
    12394  // Allocate VkDeviceMemory.
    12395  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12396  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12397  if(res < 0)
    12398  {
    12399  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12400  return res;
    12401  }
    12402 
    12403  void* pMappedData = VMA_NULL;
    12404  if(map)
    12405  {
    12406  res = (*m_VulkanFunctions.vkMapMemory)(
    12407  m_hDevice,
    12408  hMemory,
    12409  0,
    12410  VK_WHOLE_SIZE,
    12411  0,
    12412  &pMappedData);
    12413  if(res < 0)
    12414  {
    12415  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12416  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12417  return res;
    12418  }
    12419  }
    12420 
    12421  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12422  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12423  (*pAllocation)->SetUserData(this, pUserData);
    12424  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12425  {
    12426  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12427  }
    12428 
    12429  // Register it in m_pDedicatedAllocations.
    12430  {
    12431  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12432  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12433  VMA_ASSERT(pDedicatedAllocations);
    12434  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12435  }
    12436 
    12437  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12438 
    12439  return VK_SUCCESS;
    12440 }
    12441 
    12442 void VmaAllocator_T::GetBufferMemoryRequirements(
    12443  VkBuffer hBuffer,
    12444  VkMemoryRequirements& memReq,
    12445  bool& requiresDedicatedAllocation,
    12446  bool& prefersDedicatedAllocation) const
    12447 {
    12448 #if VMA_DEDICATED_ALLOCATION
    12449  if(m_UseKhrDedicatedAllocation)
    12450  {
    12451  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12452  memReqInfo.buffer = hBuffer;
    12453 
    12454  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12455 
    12456  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12457  memReq2.pNext = &memDedicatedReq;
    12458 
    12459  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12460 
    12461  memReq = memReq2.memoryRequirements;
    12462  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12463  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12464  }
    12465  else
    12466 #endif // #if VMA_DEDICATED_ALLOCATION
    12467  {
    12468  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12469  requiresDedicatedAllocation = false;
    12470  prefersDedicatedAllocation = false;
    12471  }
    12472 }
    12473 
    12474 void VmaAllocator_T::GetImageMemoryRequirements(
    12475  VkImage hImage,
    12476  VkMemoryRequirements& memReq,
    12477  bool& requiresDedicatedAllocation,
    12478  bool& prefersDedicatedAllocation) const
    12479 {
    12480 #if VMA_DEDICATED_ALLOCATION
    12481  if(m_UseKhrDedicatedAllocation)
    12482  {
    12483  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12484  memReqInfo.image = hImage;
    12485 
    12486  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12487 
    12488  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12489  memReq2.pNext = &memDedicatedReq;
    12490 
    12491  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12492 
    12493  memReq = memReq2.memoryRequirements;
    12494  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12495  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12496  }
    12497  else
    12498 #endif // #if VMA_DEDICATED_ALLOCATION
    12499  {
    12500  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12501  requiresDedicatedAllocation = false;
    12502  prefersDedicatedAllocation = false;
    12503  }
    12504 }
    12505 
    12506 VkResult VmaAllocator_T::AllocateMemory(
    12507  const VkMemoryRequirements& vkMemReq,
    12508  bool requiresDedicatedAllocation,
    12509  bool prefersDedicatedAllocation,
    12510  VkBuffer dedicatedBuffer,
    12511  VkImage dedicatedImage,
    12512  const VmaAllocationCreateInfo& createInfo,
    12513  VmaSuballocationType suballocType,
    12514  VmaAllocation* pAllocation)
    12515 {
    12516  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12517 
    12518  if(vkMemReq.size == 0)
    12519  {
    12520  return VK_ERROR_VALIDATION_FAILED_EXT;
    12521  }
    12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12523  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12524  {
    12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12527  }
    12528  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12530  {
    12531  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12532  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12533  }
    12534  if(requiresDedicatedAllocation)
    12535  {
    12536  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12537  {
    12538  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12539  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12540  }
    12541  if(createInfo.pool != VK_NULL_HANDLE)
    12542  {
    12543  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12544  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12545  }
    12546  }
    12547  if((createInfo.pool != VK_NULL_HANDLE) &&
    12548  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12549  {
    12550  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12552  }
    12553 
    12554  if(createInfo.pool != VK_NULL_HANDLE)
    12555  {
    12556  const VkDeviceSize alignmentForPool = VMA_MAX(
    12557  vkMemReq.alignment,
    12558  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12559  return createInfo.pool->m_BlockVector.Allocate(
    12560  createInfo.pool,
    12561  m_CurrentFrameIndex.load(),
    12562  vkMemReq.size,
    12563  alignmentForPool,
    12564  createInfo,
    12565  suballocType,
    12566  pAllocation);
    12567  }
    12568  else
    12569  {
    12570  // Bit mask of memory Vulkan types acceptable for this allocation.
    12571  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12572  uint32_t memTypeIndex = UINT32_MAX;
    12573  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12574  if(res == VK_SUCCESS)
    12575  {
    12576  VkDeviceSize alignmentForMemType = VMA_MAX(
    12577  vkMemReq.alignment,
    12578  GetMemoryTypeMinAlignment(memTypeIndex));
    12579 
    12580  res = AllocateMemoryOfType(
    12581  vkMemReq.size,
    12582  alignmentForMemType,
    12583  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12584  dedicatedBuffer,
    12585  dedicatedImage,
    12586  createInfo,
    12587  memTypeIndex,
    12588  suballocType,
    12589  pAllocation);
    12590  // Succeeded on first try.
    12591  if(res == VK_SUCCESS)
    12592  {
    12593  return res;
    12594  }
    12595  // Allocation from this memory type failed. Try other compatible memory types.
    12596  else
    12597  {
    12598  for(;;)
    12599  {
    12600  // Remove old memTypeIndex from list of possibilities.
    12601  memoryTypeBits &= ~(1u << memTypeIndex);
    12602  // Find alternative memTypeIndex.
    12603  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12604  if(res == VK_SUCCESS)
    12605  {
    12606  alignmentForMemType = VMA_MAX(
    12607  vkMemReq.alignment,
    12608  GetMemoryTypeMinAlignment(memTypeIndex));
    12609 
    12610  res = AllocateMemoryOfType(
    12611  vkMemReq.size,
    12612  alignmentForMemType,
    12613  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12614  dedicatedBuffer,
    12615  dedicatedImage,
    12616  createInfo,
    12617  memTypeIndex,
    12618  suballocType,
    12619  pAllocation);
    12620  // Allocation from this alternative memory type succeeded.
    12621  if(res == VK_SUCCESS)
    12622  {
    12623  return res;
    12624  }
    12625  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12626  }
    12627  // No other matching memory type index could be found.
    12628  else
    12629  {
    12630  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12631  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12632  }
    12633  }
    12634  }
    12635  }
    12636  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12637  else
    12638  return res;
    12639  }
    12640 }
    12641 
    12642 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12643 {
    12644  VMA_ASSERT(allocation);
    12645 
    12646  if(TouchAllocation(allocation))
    12647  {
    12648  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12649  {
    12650  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12651  }
    12652 
    12653  switch(allocation->GetType())
    12654  {
    12655  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12656  {
    12657  VmaBlockVector* pBlockVector = VMA_NULL;
    12658  VmaPool hPool = allocation->GetPool();
    12659  if(hPool != VK_NULL_HANDLE)
    12660  {
    12661  pBlockVector = &hPool->m_BlockVector;
    12662  }
    12663  else
    12664  {
    12665  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12666  pBlockVector = m_pBlockVectors[memTypeIndex];
    12667  }
    12668  pBlockVector->Free(allocation);
    12669  }
    12670  break;
    12671  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12672  FreeDedicatedMemory(allocation);
    12673  break;
    12674  default:
    12675  VMA_ASSERT(0);
    12676  }
    12677  }
    12678 
    12679  allocation->SetUserData(this, VMA_NULL);
    12680  vma_delete(this, allocation);
    12681 }
    12682 
    12683 VkResult VmaAllocator_T::ResizeAllocation(
    12684  const VmaAllocation alloc,
    12685  VkDeviceSize newSize)
    12686 {
    12687  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12688  {
    12689  return VK_ERROR_VALIDATION_FAILED_EXT;
    12690  }
    12691  if(newSize == alloc->GetSize())
    12692  {
    12693  return VK_SUCCESS;
    12694  }
    12695 
    12696  switch(alloc->GetType())
    12697  {
    12698  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12699  return VK_ERROR_FEATURE_NOT_PRESENT;
    12700  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12701  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12702  {
    12703  alloc->ChangeSize(newSize);
    12704  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12705  return VK_SUCCESS;
    12706  }
    12707  else
    12708  {
    12709  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12710  }
    12711  default:
    12712  VMA_ASSERT(0);
    12713  return VK_ERROR_VALIDATION_FAILED_EXT;
    12714  }
    12715 }
    12716 
    12717 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12718 {
    12719  // Initialize.
    12720  InitStatInfo(pStats->total);
    12721  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12722  InitStatInfo(pStats->memoryType[i]);
    12723  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12724  InitStatInfo(pStats->memoryHeap[i]);
    12725 
    12726  // Process default pools.
    12727  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12728  {
    12729  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12730  VMA_ASSERT(pBlockVector);
    12731  pBlockVector->AddStats(pStats);
    12732  }
    12733 
    12734  // Process custom pools.
    12735  {
    12736  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12737  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12738  {
    12739  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12740  }
    12741  }
    12742 
    12743  // Process dedicated allocations.
    12744  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12745  {
    12746  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12747  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12748  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12749  VMA_ASSERT(pDedicatedAllocVector);
    12750  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12751  {
    12752  VmaStatInfo allocationStatInfo;
    12753  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12754  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12755  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12756  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12757  }
    12758  }
    12759 
    12760  // Postprocess.
    12761  VmaPostprocessCalcStatInfo(pStats->total);
    12762  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12763  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12764  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12765  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12766 }
    12767 
    12768 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12769 
    12770 VkResult VmaAllocator_T::Defragment(
    12771  VmaAllocation* pAllocations,
    12772  size_t allocationCount,
    12773  VkBool32* pAllocationsChanged,
    12774  const VmaDefragmentationInfo* pDefragmentationInfo,
    12775  VmaDefragmentationStats* pDefragmentationStats)
    12776 {
    12777  if(pAllocationsChanged != VMA_NULL)
    12778  {
    12779  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12780  }
    12781  if(pDefragmentationStats != VMA_NULL)
    12782  {
    12783  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12784  }
    12785 
    12786  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12787 
    12788  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12789 
    12790  const size_t poolCount = m_Pools.size();
    12791 
    12792  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12793  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12794  {
    12795  VmaAllocation hAlloc = pAllocations[allocIndex];
    12796  VMA_ASSERT(hAlloc);
    12797  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12798  // DedicatedAlloc cannot be defragmented.
    12799  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12800  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12801  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12802  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12803  // Lost allocation cannot be defragmented.
    12804  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12805  {
    12806  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12807 
    12808  const VmaPool hAllocPool = hAlloc->GetPool();
    12809  // This allocation belongs to custom pool.
    12810  if(hAllocPool != VK_NULL_HANDLE)
    12811  {
    12812  // Pools with linear or buddy algorithm are not defragmented.
    12813  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12814  {
    12815  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12816  }
    12817  }
    12818  // This allocation belongs to general pool.
    12819  else
    12820  {
    12821  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12822  }
    12823 
    12824  if(pAllocBlockVector != VMA_NULL)
    12825  {
    12826  VmaDefragmentator* const pDefragmentator =
    12827  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12828  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12829  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12830  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12831  }
    12832  }
    12833  }
    12834 
    12835  VkResult result = VK_SUCCESS;
    12836 
    12837  // ======== Main processing.
    12838 
    12839  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12840  uint32_t maxAllocationsToMove = UINT32_MAX;
    12841  if(pDefragmentationInfo != VMA_NULL)
    12842  {
    12843  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12844  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12845  }
    12846 
    12847  // Process standard memory.
    12848  for(uint32_t memTypeIndex = 0;
    12849  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12850  ++memTypeIndex)
    12851  {
    12852  // Only HOST_VISIBLE memory types can be defragmented.
    12853  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12854  {
    12855  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12856  pDefragmentationStats,
    12857  maxBytesToMove,
    12858  maxAllocationsToMove);
    12859  }
    12860  }
    12861 
    12862  // Process custom pools.
    12863  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12864  {
    12865  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12866  pDefragmentationStats,
    12867  maxBytesToMove,
    12868  maxAllocationsToMove);
    12869  }
    12870 
    12871  // ======== Destroy defragmentators.
    12872 
    12873  // Process custom pools.
    12874  for(size_t poolIndex = poolCount; poolIndex--; )
    12875  {
    12876  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12877  }
    12878 
    12879  // Process standard memory.
    12880  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12881  {
    12882  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12883  {
    12884  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12885  }
    12886  }
    12887 
    12888  return result;
    12889 }
    12890 
    12891 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12892 {
    12893  if(hAllocation->CanBecomeLost())
    12894  {
    12895  /*
    12896  Warning: This is a carefully designed algorithm.
    12897  Do not modify unless you really know what you're doing :)
    12898  */
    12899  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12900  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12901  for(;;)
    12902  {
    12903  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12904  {
    12905  pAllocationInfo->memoryType = UINT32_MAX;
    12906  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12907  pAllocationInfo->offset = 0;
    12908  pAllocationInfo->size = hAllocation->GetSize();
    12909  pAllocationInfo->pMappedData = VMA_NULL;
    12910  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12911  return;
    12912  }
    12913  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12914  {
    12915  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12916  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12917  pAllocationInfo->offset = hAllocation->GetOffset();
    12918  pAllocationInfo->size = hAllocation->GetSize();
    12919  pAllocationInfo->pMappedData = VMA_NULL;
    12920  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12921  return;
    12922  }
    12923  else // Last use time earlier than current time.
    12924  {
    12925  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12926  {
    12927  localLastUseFrameIndex = localCurrFrameIndex;
    12928  }
    12929  }
    12930  }
    12931  }
    12932  else
    12933  {
    12934 #if VMA_STATS_STRING_ENABLED
    12935  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12936  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12937  for(;;)
    12938  {
    12939  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12940  if(localLastUseFrameIndex == localCurrFrameIndex)
    12941  {
    12942  break;
    12943  }
    12944  else // Last use time earlier than current time.
    12945  {
    12946  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12947  {
    12948  localLastUseFrameIndex = localCurrFrameIndex;
    12949  }
    12950  }
    12951  }
    12952 #endif
    12953 
    12954  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12955  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12956  pAllocationInfo->offset = hAllocation->GetOffset();
    12957  pAllocationInfo->size = hAllocation->GetSize();
    12958  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12959  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12960  }
    12961 }
    12962 
    12963 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12964 {
    12965  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12966  if(hAllocation->CanBecomeLost())
    12967  {
    12968  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12969  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12970  for(;;)
    12971  {
    12972  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12973  {
    12974  return false;
    12975  }
    12976  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12977  {
    12978  return true;
    12979  }
    12980  else // Last use time earlier than current time.
    12981  {
    12982  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12983  {
    12984  localLastUseFrameIndex = localCurrFrameIndex;
    12985  }
    12986  }
    12987  }
    12988  }
    12989  else
    12990  {
    12991 #if VMA_STATS_STRING_ENABLED
    12992  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12993  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12994  for(;;)
    12995  {
    12996  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12997  if(localLastUseFrameIndex == localCurrFrameIndex)
    12998  {
    12999  break;
    13000  }
    13001  else // Last use time earlier than current time.
    13002  {
    13003  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13004  {
    13005  localLastUseFrameIndex = localCurrFrameIndex;
    13006  }
    13007  }
    13008  }
    13009 #endif
    13010 
    13011  return true;
    13012  }
    13013 }
    13014 
    13015 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13016 {
    13017  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13018 
    13019  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13020 
    13021  if(newCreateInfo.maxBlockCount == 0)
    13022  {
    13023  newCreateInfo.maxBlockCount = SIZE_MAX;
    13024  }
    13025  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13026  {
    13027  return VK_ERROR_INITIALIZATION_FAILED;
    13028  }
    13029 
    13030  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13031 
    13032  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13033 
    13034  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13035  if(res != VK_SUCCESS)
    13036  {
    13037  vma_delete(this, *pPool);
    13038  *pPool = VMA_NULL;
    13039  return res;
    13040  }
    13041 
    13042  // Add to m_Pools.
    13043  {
    13044  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13045  (*pPool)->SetId(m_NextPoolId++);
    13046  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13047  }
    13048 
    13049  return VK_SUCCESS;
    13050 }
    13051 
    13052 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13053 {
    13054  // Remove from m_Pools.
    13055  {
    13056  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13057  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13058  VMA_ASSERT(success && "Pool not found in Allocator.");
    13059  }
    13060 
    13061  vma_delete(this, pool);
    13062 }
    13063 
    13064 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13065 {
    13066  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13067 }
    13068 
    13069 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13070 {
    13071  m_CurrentFrameIndex.store(frameIndex);
    13072 }
    13073 
    13074 void VmaAllocator_T::MakePoolAllocationsLost(
    13075  VmaPool hPool,
    13076  size_t* pLostAllocationCount)
    13077 {
    13078  hPool->m_BlockVector.MakePoolAllocationsLost(
    13079  m_CurrentFrameIndex.load(),
    13080  pLostAllocationCount);
    13081 }
    13082 
    13083 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13084 {
    13085  return hPool->m_BlockVector.CheckCorruption();
    13086 }
    13087 
    13088 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13089 {
    13090  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13091 
    13092  // Process default pools.
    13093  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13094  {
    13095  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13096  {
    13097  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13098  VMA_ASSERT(pBlockVector);
    13099  VkResult localRes = pBlockVector->CheckCorruption();
    13100  switch(localRes)
    13101  {
    13102  case VK_ERROR_FEATURE_NOT_PRESENT:
    13103  break;
    13104  case VK_SUCCESS:
    13105  finalRes = VK_SUCCESS;
    13106  break;
    13107  default:
    13108  return localRes;
    13109  }
    13110  }
    13111  }
    13112 
    13113  // Process custom pools.
    13114  {
    13115  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13116  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13117  {
    13118  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13119  {
    13120  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13121  switch(localRes)
    13122  {
    13123  case VK_ERROR_FEATURE_NOT_PRESENT:
    13124  break;
    13125  case VK_SUCCESS:
    13126  finalRes = VK_SUCCESS;
    13127  break;
    13128  default:
    13129  return localRes;
    13130  }
    13131  }
    13132  }
    13133  }
    13134 
    13135  return finalRes;
    13136 }
    13137 
    13138 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13139 {
    13140  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13141  (*pAllocation)->InitLost();
    13142 }
    13143 
    13144 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13145 {
    13146  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13147 
    13148  VkResult res;
    13149  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13150  {
    13151  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13152  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13153  {
    13154  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13155  if(res == VK_SUCCESS)
    13156  {
    13157  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13158  }
    13159  }
    13160  else
    13161  {
    13162  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13163  }
    13164  }
    13165  else
    13166  {
    13167  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13168  }
    13169 
    13170  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13171  {
    13172  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13173  }
    13174 
    13175  return res;
    13176 }
    13177 
    13178 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13179 {
    13180  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13181  {
    13182  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13183  }
    13184 
    13185  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13186 
    13187  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13188  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13189  {
    13190  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13191  m_HeapSizeLimit[heapIndex] += size;
    13192  }
    13193 }
    13194 
    13195 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13196 {
    13197  if(hAllocation->CanBecomeLost())
    13198  {
    13199  return VK_ERROR_MEMORY_MAP_FAILED;
    13200  }
    13201 
    13202  switch(hAllocation->GetType())
    13203  {
    13204  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13205  {
    13206  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13207  char *pBytes = VMA_NULL;
    13208  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13209  if(res == VK_SUCCESS)
    13210  {
    13211  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13212  hAllocation->BlockAllocMap();
    13213  }
    13214  return res;
    13215  }
    13216  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13217  return hAllocation->DedicatedAllocMap(this, ppData);
    13218  default:
    13219  VMA_ASSERT(0);
    13220  return VK_ERROR_MEMORY_MAP_FAILED;
    13221  }
    13222 }
    13223 
    13224 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13225 {
    13226  switch(hAllocation->GetType())
    13227  {
    13228  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13229  {
    13230  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13231  hAllocation->BlockAllocUnmap();
    13232  pBlock->Unmap(this, 1);
    13233  }
    13234  break;
    13235  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13236  hAllocation->DedicatedAllocUnmap(this);
    13237  break;
    13238  default:
    13239  VMA_ASSERT(0);
    13240  }
    13241 }
    13242 
    13243 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13244 {
    13245  VkResult res = VK_SUCCESS;
    13246  switch(hAllocation->GetType())
    13247  {
    13248  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13249  res = GetVulkanFunctions().vkBindBufferMemory(
    13250  m_hDevice,
    13251  hBuffer,
    13252  hAllocation->GetMemory(),
    13253  0); //memoryOffset
    13254  break;
    13255  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13256  {
    13257  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13258  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13259  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13260  break;
    13261  }
    13262  default:
    13263  VMA_ASSERT(0);
    13264  }
    13265  return res;
    13266 }
    13267 
    13268 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13269 {
    13270  VkResult res = VK_SUCCESS;
    13271  switch(hAllocation->GetType())
    13272  {
    13273  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13274  res = GetVulkanFunctions().vkBindImageMemory(
    13275  m_hDevice,
    13276  hImage,
    13277  hAllocation->GetMemory(),
    13278  0); //memoryOffset
    13279  break;
    13280  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13281  {
    13282  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13283  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13284  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13285  break;
    13286  }
    13287  default:
    13288  VMA_ASSERT(0);
    13289  }
    13290  return res;
    13291 }
    13292 
    13293 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13294  VmaAllocation hAllocation,
    13295  VkDeviceSize offset, VkDeviceSize size,
    13296  VMA_CACHE_OPERATION op)
    13297 {
    13298  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13299  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13300  {
    13301  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13302  VMA_ASSERT(offset <= allocationSize);
    13303 
    13304  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13305 
    13306  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13307  memRange.memory = hAllocation->GetMemory();
    13308 
    13309  switch(hAllocation->GetType())
    13310  {
    13311  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13312  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13313  if(size == VK_WHOLE_SIZE)
    13314  {
    13315  memRange.size = allocationSize - memRange.offset;
    13316  }
    13317  else
    13318  {
    13319  VMA_ASSERT(offset + size <= allocationSize);
    13320  memRange.size = VMA_MIN(
    13321  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13322  allocationSize - memRange.offset);
    13323  }
    13324  break;
    13325 
    13326  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13327  {
    13328  // 1. Still within this allocation.
    13329  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13330  if(size == VK_WHOLE_SIZE)
    13331  {
    13332  size = allocationSize - offset;
    13333  }
    13334  else
    13335  {
    13336  VMA_ASSERT(offset + size <= allocationSize);
    13337  }
    13338  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13339 
    13340  // 2. Adjust to whole block.
    13341  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13342  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13343  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13344  memRange.offset += allocationOffset;
    13345  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13346 
    13347  break;
    13348  }
    13349 
    13350  default:
    13351  VMA_ASSERT(0);
    13352  }
    13353 
    13354  switch(op)
    13355  {
    13356  case VMA_CACHE_FLUSH:
    13357  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13358  break;
    13359  case VMA_CACHE_INVALIDATE:
    13360  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13361  break;
    13362  default:
    13363  VMA_ASSERT(0);
    13364  }
    13365  }
    13366  // else: Just ignore this call.
    13367 }
    13368 
    13369 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13370 {
    13371  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13372 
    13373  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13374  {
    13375  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13376  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13377  VMA_ASSERT(pDedicatedAllocations);
    13378  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13379  VMA_ASSERT(success);
    13380  }
    13381 
    13382  VkDeviceMemory hMemory = allocation->GetMemory();
    13383 
    13384  /*
    13385  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13386  before vkFreeMemory.
    13387 
    13388  if(allocation->GetMappedData() != VMA_NULL)
    13389  {
    13390  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13391  }
    13392  */
    13393 
    13394  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13395 
    13396  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13397 }
    13398 
    13399 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13400 {
    13401  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13402  !hAllocation->CanBecomeLost() &&
    13403  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13404  {
    13405  void* pData = VMA_NULL;
    13406  VkResult res = Map(hAllocation, &pData);
    13407  if(res == VK_SUCCESS)
    13408  {
    13409  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13410  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13411  Unmap(hAllocation);
    13412  }
    13413  else
    13414  {
    13415  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13416  }
    13417  }
    13418 }
    13419 
    13420 #if VMA_STATS_STRING_ENABLED
    13421 
    13422 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13423 {
    13424  bool dedicatedAllocationsStarted = false;
    13425  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13426  {
    13427  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13428  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13429  VMA_ASSERT(pDedicatedAllocVector);
    13430  if(pDedicatedAllocVector->empty() == false)
    13431  {
    13432  if(dedicatedAllocationsStarted == false)
    13433  {
    13434  dedicatedAllocationsStarted = true;
    13435  json.WriteString("DedicatedAllocations");
    13436  json.BeginObject();
    13437  }
    13438 
    13439  json.BeginString("Type ");
    13440  json.ContinueString(memTypeIndex);
    13441  json.EndString();
    13442 
    13443  json.BeginArray();
    13444 
    13445  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13446  {
    13447  json.BeginObject(true);
    13448  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13449  hAlloc->PrintParameters(json);
    13450  json.EndObject();
    13451  }
    13452 
    13453  json.EndArray();
    13454  }
    13455  }
    13456  if(dedicatedAllocationsStarted)
    13457  {
    13458  json.EndObject();
    13459  }
    13460 
    13461  {
    13462  bool allocationsStarted = false;
    13463  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13464  {
    13465  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13466  {
    13467  if(allocationsStarted == false)
    13468  {
    13469  allocationsStarted = true;
    13470  json.WriteString("DefaultPools");
    13471  json.BeginObject();
    13472  }
    13473 
    13474  json.BeginString("Type ");
    13475  json.ContinueString(memTypeIndex);
    13476  json.EndString();
    13477 
    13478  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13479  }
    13480  }
    13481  if(allocationsStarted)
    13482  {
    13483  json.EndObject();
    13484  }
    13485  }
    13486 
    13487  // Custom pools
    13488  {
    13489  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13490  const size_t poolCount = m_Pools.size();
    13491  if(poolCount > 0)
    13492  {
    13493  json.WriteString("Pools");
    13494  json.BeginObject();
    13495  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13496  {
    13497  json.BeginString();
    13498  json.ContinueString(m_Pools[poolIndex]->GetId());
    13499  json.EndString();
    13500 
    13501  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13502  }
    13503  json.EndObject();
    13504  }
    13505  }
    13506 }
    13507 
    13508 #endif // #if VMA_STATS_STRING_ENABLED
    13509 
    13511 // Public interface
    13512 
    13513 VkResult vmaCreateAllocator(
    13514  const VmaAllocatorCreateInfo* pCreateInfo,
    13515  VmaAllocator* pAllocator)
    13516 {
    13517  VMA_ASSERT(pCreateInfo && pAllocator);
    13518  VMA_DEBUG_LOG("vmaCreateAllocator");
    13519  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13520  return (*pAllocator)->Init(pCreateInfo);
    13521 }
    13522 
    13523 void vmaDestroyAllocator(
    13524  VmaAllocator allocator)
    13525 {
    13526  if(allocator != VK_NULL_HANDLE)
    13527  {
    13528  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13529  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13530  vma_delete(&allocationCallbacks, allocator);
    13531  }
    13532 }
    13533 
    13535  VmaAllocator allocator,
    13536  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13537 {
    13538  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13539  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13540 }
    13541 
    13543  VmaAllocator allocator,
    13544  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13545 {
    13546  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13547  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13548 }
    13549 
    13551  VmaAllocator allocator,
    13552  uint32_t memoryTypeIndex,
    13553  VkMemoryPropertyFlags* pFlags)
    13554 {
    13555  VMA_ASSERT(allocator && pFlags);
    13556  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13557  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13558 }
    13559 
    13561  VmaAllocator allocator,
    13562  uint32_t frameIndex)
    13563 {
    13564  VMA_ASSERT(allocator);
    13565  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13566 
    13567  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13568 
    13569  allocator->SetCurrentFrameIndex(frameIndex);
    13570 }
    13571 
    13572 void vmaCalculateStats(
    13573  VmaAllocator allocator,
    13574  VmaStats* pStats)
    13575 {
    13576  VMA_ASSERT(allocator && pStats);
    13577  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13578  allocator->CalculateStats(pStats);
    13579 }
    13580 
    13581 #if VMA_STATS_STRING_ENABLED
    13582 
    13583 void vmaBuildStatsString(
    13584  VmaAllocator allocator,
    13585  char** ppStatsString,
    13586  VkBool32 detailedMap)
    13587 {
    13588  VMA_ASSERT(allocator && ppStatsString);
    13589  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13590 
    13591  VmaStringBuilder sb(allocator);
    13592  {
    13593  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13594  json.BeginObject();
    13595 
    13596  VmaStats stats;
    13597  allocator->CalculateStats(&stats);
    13598 
    13599  json.WriteString("Total");
    13600  VmaPrintStatInfo(json, stats.total);
    13601 
    13602  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13603  {
    13604  json.BeginString("Heap ");
    13605  json.ContinueString(heapIndex);
    13606  json.EndString();
    13607  json.BeginObject();
    13608 
    13609  json.WriteString("Size");
    13610  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13611 
    13612  json.WriteString("Flags");
    13613  json.BeginArray(true);
    13614  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13615  {
    13616  json.WriteString("DEVICE_LOCAL");
    13617  }
    13618  json.EndArray();
    13619 
    13620  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13621  {
    13622  json.WriteString("Stats");
    13623  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13624  }
    13625 
    13626  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13627  {
    13628  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13629  {
    13630  json.BeginString("Type ");
    13631  json.ContinueString(typeIndex);
    13632  json.EndString();
    13633 
    13634  json.BeginObject();
    13635 
    13636  json.WriteString("Flags");
    13637  json.BeginArray(true);
    13638  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13639  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13640  {
    13641  json.WriteString("DEVICE_LOCAL");
    13642  }
    13643  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13644  {
    13645  json.WriteString("HOST_VISIBLE");
    13646  }
    13647  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13648  {
    13649  json.WriteString("HOST_COHERENT");
    13650  }
    13651  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13652  {
    13653  json.WriteString("HOST_CACHED");
    13654  }
    13655  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13656  {
    13657  json.WriteString("LAZILY_ALLOCATED");
    13658  }
    13659  json.EndArray();
    13660 
    13661  if(stats.memoryType[typeIndex].blockCount > 0)
    13662  {
    13663  json.WriteString("Stats");
    13664  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13665  }
    13666 
    13667  json.EndObject();
    13668  }
    13669  }
    13670 
    13671  json.EndObject();
    13672  }
    13673  if(detailedMap == VK_TRUE)
    13674  {
    13675  allocator->PrintDetailedMap(json);
    13676  }
    13677 
    13678  json.EndObject();
    13679  }
    13680 
    13681  const size_t len = sb.GetLength();
    13682  char* const pChars = vma_new_array(allocator, char, len + 1);
    13683  if(len > 0)
    13684  {
    13685  memcpy(pChars, sb.GetData(), len);
    13686  }
    13687  pChars[len] = '\0';
    13688  *ppStatsString = pChars;
    13689 }
    13690 
    13691 void vmaFreeStatsString(
    13692  VmaAllocator allocator,
    13693  char* pStatsString)
    13694 {
    13695  if(pStatsString != VMA_NULL)
    13696  {
    13697  VMA_ASSERT(allocator);
    13698  size_t len = strlen(pStatsString);
    13699  vma_delete_array(allocator, pStatsString, len + 1);
    13700  }
    13701 }
    13702 
    13703 #endif // #if VMA_STATS_STRING_ENABLED
    13704 
    13705 /*
    13706 This function is not protected by any mutex because it just reads immutable data.
    13707 */
    13708 VkResult vmaFindMemoryTypeIndex(
    13709  VmaAllocator allocator,
    13710  uint32_t memoryTypeBits,
    13711  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13712  uint32_t* pMemoryTypeIndex)
    13713 {
    13714  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13715  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13716  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13717 
    13718  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13719  {
    13720  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13721  }
    13722 
    13723  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13724  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13725 
    13726  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13727  if(mapped)
    13728  {
    13729  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13730  }
    13731 
    13732  // Convert usage to requiredFlags and preferredFlags.
    13733  switch(pAllocationCreateInfo->usage)
    13734  {
    13736  break;
    13738  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13739  {
    13740  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13741  }
    13742  break;
    13744  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13745  break;
    13747  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13748  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13749  {
    13750  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13751  }
    13752  break;
    13754  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13755  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13756  break;
    13757  default:
    13758  break;
    13759  }
    13760 
    13761  *pMemoryTypeIndex = UINT32_MAX;
    13762  uint32_t minCost = UINT32_MAX;
    13763  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13764  memTypeIndex < allocator->GetMemoryTypeCount();
    13765  ++memTypeIndex, memTypeBit <<= 1)
    13766  {
    13767  // This memory type is acceptable according to memoryTypeBits bitmask.
    13768  if((memTypeBit & memoryTypeBits) != 0)
    13769  {
    13770  const VkMemoryPropertyFlags currFlags =
    13771  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13772  // This memory type contains requiredFlags.
    13773  if((requiredFlags & ~currFlags) == 0)
    13774  {
    13775  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13776  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13777  // Remember memory type with lowest cost.
    13778  if(currCost < minCost)
    13779  {
    13780  *pMemoryTypeIndex = memTypeIndex;
    13781  if(currCost == 0)
    13782  {
    13783  return VK_SUCCESS;
    13784  }
    13785  minCost = currCost;
    13786  }
    13787  }
    13788  }
    13789  }
    13790  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13791 }
    13792 
    13794  VmaAllocator allocator,
    13795  const VkBufferCreateInfo* pBufferCreateInfo,
    13796  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13797  uint32_t* pMemoryTypeIndex)
    13798 {
    13799  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13800  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13801  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13802  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13803 
    13804  const VkDevice hDev = allocator->m_hDevice;
    13805  VkBuffer hBuffer = VK_NULL_HANDLE;
    13806  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13807  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13808  if(res == VK_SUCCESS)
    13809  {
    13810  VkMemoryRequirements memReq = {};
    13811  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13812  hDev, hBuffer, &memReq);
    13813 
    13814  res = vmaFindMemoryTypeIndex(
    13815  allocator,
    13816  memReq.memoryTypeBits,
    13817  pAllocationCreateInfo,
    13818  pMemoryTypeIndex);
    13819 
    13820  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13821  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13822  }
    13823  return res;
    13824 }
    13825 
    13827  VmaAllocator allocator,
    13828  const VkImageCreateInfo* pImageCreateInfo,
    13829  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13830  uint32_t* pMemoryTypeIndex)
    13831 {
    13832  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13833  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13834  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13835  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13836 
    13837  const VkDevice hDev = allocator->m_hDevice;
    13838  VkImage hImage = VK_NULL_HANDLE;
    13839  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13840  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13841  if(res == VK_SUCCESS)
    13842  {
    13843  VkMemoryRequirements memReq = {};
    13844  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13845  hDev, hImage, &memReq);
    13846 
    13847  res = vmaFindMemoryTypeIndex(
    13848  allocator,
    13849  memReq.memoryTypeBits,
    13850  pAllocationCreateInfo,
    13851  pMemoryTypeIndex);
    13852 
    13853  allocator->GetVulkanFunctions().vkDestroyImage(
    13854  hDev, hImage, allocator->GetAllocationCallbacks());
    13855  }
    13856  return res;
    13857 }
    13858 
    13859 VkResult vmaCreatePool(
    13860  VmaAllocator allocator,
    13861  const VmaPoolCreateInfo* pCreateInfo,
    13862  VmaPool* pPool)
    13863 {
    13864  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13865 
    13866  VMA_DEBUG_LOG("vmaCreatePool");
    13867 
    13868  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13869 
    13870  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13876  }
    13877 #endif
    13878 
    13879  return res;
    13880 }
    13881 
    13882 void vmaDestroyPool(
    13883  VmaAllocator allocator,
    13884  VmaPool pool)
    13885 {
    13886  VMA_ASSERT(allocator);
    13887 
    13888  if(pool == VK_NULL_HANDLE)
    13889  {
    13890  return;
    13891  }
    13892 
    13893  VMA_DEBUG_LOG("vmaDestroyPool");
    13894 
    13895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13896 
    13897 #if VMA_RECORDING_ENABLED
    13898  if(allocator->GetRecorder() != VMA_NULL)
    13899  {
    13900  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13901  }
    13902 #endif
    13903 
    13904  allocator->DestroyPool(pool);
    13905 }
    13906 
    13907 void vmaGetPoolStats(
    13908  VmaAllocator allocator,
    13909  VmaPool pool,
    13910  VmaPoolStats* pPoolStats)
    13911 {
    13912  VMA_ASSERT(allocator && pool && pPoolStats);
    13913 
    13914  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13915 
    13916  allocator->GetPoolStats(pool, pPoolStats);
    13917 }
    13918 
    13920  VmaAllocator allocator,
    13921  VmaPool pool,
    13922  size_t* pLostAllocationCount)
    13923 {
    13924  VMA_ASSERT(allocator && pool);
    13925 
    13926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13927 
    13928 #if VMA_RECORDING_ENABLED
    13929  if(allocator->GetRecorder() != VMA_NULL)
    13930  {
    13931  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13932  }
    13933 #endif
    13934 
    13935  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13936 }
    13937 
    13938 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13939 {
    13940  VMA_ASSERT(allocator && pool);
    13941 
    13942  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13943 
    13944  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13945 
    13946  return allocator->CheckPoolCorruption(pool);
    13947 }
    13948 
    13949 VkResult vmaAllocateMemory(
    13950  VmaAllocator allocator,
    13951  const VkMemoryRequirements* pVkMemoryRequirements,
    13952  const VmaAllocationCreateInfo* pCreateInfo,
    13953  VmaAllocation* pAllocation,
    13954  VmaAllocationInfo* pAllocationInfo)
    13955 {
    13956  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13957 
    13958  VMA_DEBUG_LOG("vmaAllocateMemory");
    13959 
    13960  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13961 
    13962  VkResult result = allocator->AllocateMemory(
    13963  *pVkMemoryRequirements,
    13964  false, // requiresDedicatedAllocation
    13965  false, // prefersDedicatedAllocation
    13966  VK_NULL_HANDLE, // dedicatedBuffer
    13967  VK_NULL_HANDLE, // dedicatedImage
    13968  *pCreateInfo,
    13969  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13970  pAllocation);
    13971 
    13972 #if VMA_RECORDING_ENABLED
    13973  if(allocator->GetRecorder() != VMA_NULL)
    13974  {
    13975  allocator->GetRecorder()->RecordAllocateMemory(
    13976  allocator->GetCurrentFrameIndex(),
    13977  *pVkMemoryRequirements,
    13978  *pCreateInfo,
    13979  *pAllocation);
    13980  }
    13981 #endif
    13982 
    13983  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13984  {
    13985  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13986  }
    13987 
    13988  return result;
    13989 }
    13990 
    13992  VmaAllocator allocator,
    13993  VkBuffer buffer,
    13994  const VmaAllocationCreateInfo* pCreateInfo,
    13995  VmaAllocation* pAllocation,
    13996  VmaAllocationInfo* pAllocationInfo)
    13997 {
    13998  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13999 
    14000  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14001 
    14002  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14003 
    14004  VkMemoryRequirements vkMemReq = {};
    14005  bool requiresDedicatedAllocation = false;
    14006  bool prefersDedicatedAllocation = false;
    14007  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14008  requiresDedicatedAllocation,
    14009  prefersDedicatedAllocation);
    14010 
    14011  VkResult result = allocator->AllocateMemory(
    14012  vkMemReq,
    14013  requiresDedicatedAllocation,
    14014  prefersDedicatedAllocation,
    14015  buffer, // dedicatedBuffer
    14016  VK_NULL_HANDLE, // dedicatedImage
    14017  *pCreateInfo,
    14018  VMA_SUBALLOCATION_TYPE_BUFFER,
    14019  pAllocation);
    14020 
    14021 #if VMA_RECORDING_ENABLED
    14022  if(allocator->GetRecorder() != VMA_NULL)
    14023  {
    14024  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14025  allocator->GetCurrentFrameIndex(),
    14026  vkMemReq,
    14027  requiresDedicatedAllocation,
    14028  prefersDedicatedAllocation,
    14029  *pCreateInfo,
    14030  *pAllocation);
    14031  }
    14032 #endif
    14033 
    14034  if(pAllocationInfo && result == VK_SUCCESS)
    14035  {
    14036  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14037  }
    14038 
    14039  return result;
    14040 }
    14041 
    14042 VkResult vmaAllocateMemoryForImage(
    14043  VmaAllocator allocator,
    14044  VkImage image,
    14045  const VmaAllocationCreateInfo* pCreateInfo,
    14046  VmaAllocation* pAllocation,
    14047  VmaAllocationInfo* pAllocationInfo)
    14048 {
    14049  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14050 
    14051  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14052 
    14053  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14054 
    14055  VkMemoryRequirements vkMemReq = {};
    14056  bool requiresDedicatedAllocation = false;
    14057  bool prefersDedicatedAllocation = false;
    14058  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14059  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14060 
    14061  VkResult result = allocator->AllocateMemory(
    14062  vkMemReq,
    14063  requiresDedicatedAllocation,
    14064  prefersDedicatedAllocation,
    14065  VK_NULL_HANDLE, // dedicatedBuffer
    14066  image, // dedicatedImage
    14067  *pCreateInfo,
    14068  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14069  pAllocation);
    14070 
    14071 #if VMA_RECORDING_ENABLED
    14072  if(allocator->GetRecorder() != VMA_NULL)
    14073  {
    14074  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14075  allocator->GetCurrentFrameIndex(),
    14076  vkMemReq,
    14077  requiresDedicatedAllocation,
    14078  prefersDedicatedAllocation,
    14079  *pCreateInfo,
    14080  *pAllocation);
    14081  }
    14082 #endif
    14083 
    14084  if(pAllocationInfo && result == VK_SUCCESS)
    14085  {
    14086  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14087  }
    14088 
    14089  return result;
    14090 }
    14091 
    14092 void vmaFreeMemory(
    14093  VmaAllocator allocator,
    14094  VmaAllocation allocation)
    14095 {
    14096  VMA_ASSERT(allocator);
    14097 
    14098  if(allocation == VK_NULL_HANDLE)
    14099  {
    14100  return;
    14101  }
    14102 
    14103  VMA_DEBUG_LOG("vmaFreeMemory");
    14104 
    14105  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14106 
    14107 #if VMA_RECORDING_ENABLED
    14108  if(allocator->GetRecorder() != VMA_NULL)
    14109  {
    14110  allocator->GetRecorder()->RecordFreeMemory(
    14111  allocator->GetCurrentFrameIndex(),
    14112  allocation);
    14113  }
    14114 #endif
    14115 
    14116  allocator->FreeMemory(allocation);
    14117 }
    14118 
    14119 VkResult vmaResizeAllocation(
    14120  VmaAllocator allocator,
    14121  VmaAllocation allocation,
    14122  VkDeviceSize newSize)
    14123 {
    14124  VMA_ASSERT(allocator && allocation);
    14125 
    14126  VMA_DEBUG_LOG("vmaResizeAllocation");
    14127 
    14128  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14129 
    14130 #if VMA_RECORDING_ENABLED
    14131  if(allocator->GetRecorder() != VMA_NULL)
    14132  {
    14133  allocator->GetRecorder()->RecordResizeAllocation(
    14134  allocator->GetCurrentFrameIndex(),
    14135  allocation,
    14136  newSize);
    14137  }
    14138 #endif
    14139 
    14140  return allocator->ResizeAllocation(allocation, newSize);
    14141 }
    14142 
    14144  VmaAllocator allocator,
    14145  VmaAllocation allocation,
    14146  VmaAllocationInfo* pAllocationInfo)
    14147 {
    14148  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14149 
    14150  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14151 
    14152 #if VMA_RECORDING_ENABLED
    14153  if(allocator->GetRecorder() != VMA_NULL)
    14154  {
    14155  allocator->GetRecorder()->RecordGetAllocationInfo(
    14156  allocator->GetCurrentFrameIndex(),
    14157  allocation);
    14158  }
    14159 #endif
    14160 
    14161  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14162 }
    14163 
    14164 VkBool32 vmaTouchAllocation(
    14165  VmaAllocator allocator,
    14166  VmaAllocation allocation)
    14167 {
    14168  VMA_ASSERT(allocator && allocation);
    14169 
    14170  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14171 
    14172 #if VMA_RECORDING_ENABLED
    14173  if(allocator->GetRecorder() != VMA_NULL)
    14174  {
    14175  allocator->GetRecorder()->RecordTouchAllocation(
    14176  allocator->GetCurrentFrameIndex(),
    14177  allocation);
    14178  }
    14179 #endif
    14180 
    14181  return allocator->TouchAllocation(allocation);
    14182 }
    14183 
    14185  VmaAllocator allocator,
    14186  VmaAllocation allocation,
    14187  void* pUserData)
    14188 {
    14189  VMA_ASSERT(allocator && allocation);
    14190 
    14191  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14192 
    14193  allocation->SetUserData(allocator, pUserData);
    14194 
    14195 #if VMA_RECORDING_ENABLED
    14196  if(allocator->GetRecorder() != VMA_NULL)
    14197  {
    14198  allocator->GetRecorder()->RecordSetAllocationUserData(
    14199  allocator->GetCurrentFrameIndex(),
    14200  allocation,
    14201  pUserData);
    14202  }
    14203 #endif
    14204 }
    14205 
    14207  VmaAllocator allocator,
    14208  VmaAllocation* pAllocation)
    14209 {
    14210  VMA_ASSERT(allocator && pAllocation);
    14211 
    14212  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14213 
    14214  allocator->CreateLostAllocation(pAllocation);
    14215 
    14216 #if VMA_RECORDING_ENABLED
    14217  if(allocator->GetRecorder() != VMA_NULL)
    14218  {
    14219  allocator->GetRecorder()->RecordCreateLostAllocation(
    14220  allocator->GetCurrentFrameIndex(),
    14221  *pAllocation);
    14222  }
    14223 #endif
    14224 }
    14225 
    14226 VkResult vmaMapMemory(
    14227  VmaAllocator allocator,
    14228  VmaAllocation allocation,
    14229  void** ppData)
    14230 {
    14231  VMA_ASSERT(allocator && allocation && ppData);
    14232 
    14233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14234 
    14235  VkResult res = allocator->Map(allocation, ppData);
    14236 
    14237 #if VMA_RECORDING_ENABLED
    14238  if(allocator->GetRecorder() != VMA_NULL)
    14239  {
    14240  allocator->GetRecorder()->RecordMapMemory(
    14241  allocator->GetCurrentFrameIndex(),
    14242  allocation);
    14243  }
    14244 #endif
    14245 
    14246  return res;
    14247 }
    14248 
    14249 void vmaUnmapMemory(
    14250  VmaAllocator allocator,
    14251  VmaAllocation allocation)
    14252 {
    14253  VMA_ASSERT(allocator && allocation);
    14254 
    14255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14256 
    14257 #if VMA_RECORDING_ENABLED
    14258  if(allocator->GetRecorder() != VMA_NULL)
    14259  {
    14260  allocator->GetRecorder()->RecordUnmapMemory(
    14261  allocator->GetCurrentFrameIndex(),
    14262  allocation);
    14263  }
    14264 #endif
    14265 
    14266  allocator->Unmap(allocation);
    14267 }
    14268 
    14269 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14270 {
    14271  VMA_ASSERT(allocator && allocation);
    14272 
    14273  VMA_DEBUG_LOG("vmaFlushAllocation");
    14274 
    14275  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14276 
    14277  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14278 
    14279 #if VMA_RECORDING_ENABLED
    14280  if(allocator->GetRecorder() != VMA_NULL)
    14281  {
    14282  allocator->GetRecorder()->RecordFlushAllocation(
    14283  allocator->GetCurrentFrameIndex(),
    14284  allocation, offset, size);
    14285  }
    14286 #endif
    14287 }
    14288 
    14289 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14290 {
    14291  VMA_ASSERT(allocator && allocation);
    14292 
    14293  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14294 
    14295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14296 
    14297  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14298 
    14299 #if VMA_RECORDING_ENABLED
    14300  if(allocator->GetRecorder() != VMA_NULL)
    14301  {
    14302  allocator->GetRecorder()->RecordInvalidateAllocation(
    14303  allocator->GetCurrentFrameIndex(),
    14304  allocation, offset, size);
    14305  }
    14306 #endif
    14307 }
    14308 
    14309 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14310 {
    14311  VMA_ASSERT(allocator);
    14312 
    14313  VMA_DEBUG_LOG("vmaCheckCorruption");
    14314 
    14315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14316 
    14317  return allocator->CheckCorruption(memoryTypeBits);
    14318 }
    14319 
    14320 VkResult vmaDefragment(
    14321  VmaAllocator allocator,
    14322  VmaAllocation* pAllocations,
    14323  size_t allocationCount,
    14324  VkBool32* pAllocationsChanged,
    14325  const VmaDefragmentationInfo *pDefragmentationInfo,
    14326  VmaDefragmentationStats* pDefragmentationStats)
    14327 {
    14328  VMA_ASSERT(allocator && pAllocations);
    14329 
    14330  VMA_DEBUG_LOG("vmaDefragment");
    14331 
    14332  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14333 
    14334  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14335 }
    14336 
    14337 VkResult vmaBindBufferMemory(
    14338  VmaAllocator allocator,
    14339  VmaAllocation allocation,
    14340  VkBuffer buffer)
    14341 {
    14342  VMA_ASSERT(allocator && allocation && buffer);
    14343 
    14344  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14345 
    14346  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14347 
    14348  return allocator->BindBufferMemory(allocation, buffer);
    14349 }
    14350 
    14351 VkResult vmaBindImageMemory(
    14352  VmaAllocator allocator,
    14353  VmaAllocation allocation,
    14354  VkImage image)
    14355 {
    14356  VMA_ASSERT(allocator && allocation && image);
    14357 
    14358  VMA_DEBUG_LOG("vmaBindImageMemory");
    14359 
    14360  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14361 
    14362  return allocator->BindImageMemory(allocation, image);
    14363 }
    14364 
    14365 VkResult vmaCreateBuffer(
    14366  VmaAllocator allocator,
    14367  const VkBufferCreateInfo* pBufferCreateInfo,
    14368  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14369  VkBuffer* pBuffer,
    14370  VmaAllocation* pAllocation,
    14371  VmaAllocationInfo* pAllocationInfo)
    14372 {
    14373  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14374 
    14375  if(pBufferCreateInfo->size == 0)
    14376  {
    14377  return VK_ERROR_VALIDATION_FAILED_EXT;
    14378  }
    14379 
    14380  VMA_DEBUG_LOG("vmaCreateBuffer");
    14381 
    14382  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14383 
    14384  *pBuffer = VK_NULL_HANDLE;
    14385  *pAllocation = VK_NULL_HANDLE;
    14386 
    14387  // 1. Create VkBuffer.
    14388  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14389  allocator->m_hDevice,
    14390  pBufferCreateInfo,
    14391  allocator->GetAllocationCallbacks(),
    14392  pBuffer);
    14393  if(res >= 0)
    14394  {
    14395  // 2. vkGetBufferMemoryRequirements.
    14396  VkMemoryRequirements vkMemReq = {};
    14397  bool requiresDedicatedAllocation = false;
    14398  bool prefersDedicatedAllocation = false;
    14399  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14400  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14401 
    14402  // Make sure alignment requirements for specific buffer usages reported
    14403  // in Physical Device Properties are included in alignment reported by memory requirements.
    14404  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14405  {
    14406  VMA_ASSERT(vkMemReq.alignment %
    14407  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14408  }
    14409  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14410  {
    14411  VMA_ASSERT(vkMemReq.alignment %
    14412  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14413  }
    14414  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14415  {
    14416  VMA_ASSERT(vkMemReq.alignment %
    14417  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14418  }
    14419 
    14420  // 3. Allocate memory using allocator.
    14421  res = allocator->AllocateMemory(
    14422  vkMemReq,
    14423  requiresDedicatedAllocation,
    14424  prefersDedicatedAllocation,
    14425  *pBuffer, // dedicatedBuffer
    14426  VK_NULL_HANDLE, // dedicatedImage
    14427  *pAllocationCreateInfo,
    14428  VMA_SUBALLOCATION_TYPE_BUFFER,
    14429  pAllocation);
    14430 
    14431 #if VMA_RECORDING_ENABLED
    14432  if(allocator->GetRecorder() != VMA_NULL)
    14433  {
    14434  allocator->GetRecorder()->RecordCreateBuffer(
    14435  allocator->GetCurrentFrameIndex(),
    14436  *pBufferCreateInfo,
    14437  *pAllocationCreateInfo,
    14438  *pAllocation);
    14439  }
    14440 #endif
    14441 
    14442  if(res >= 0)
    14443  {
    14444  // 3. Bind buffer with memory.
    14445  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14446  if(res >= 0)
    14447  {
    14448  // All steps succeeded.
    14449  #if VMA_STATS_STRING_ENABLED
    14450  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14451  #endif
    14452  if(pAllocationInfo != VMA_NULL)
    14453  {
    14454  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14455  }
    14456 
    14457  return VK_SUCCESS;
    14458  }
    14459  allocator->FreeMemory(*pAllocation);
    14460  *pAllocation = VK_NULL_HANDLE;
    14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14462  *pBuffer = VK_NULL_HANDLE;
    14463  return res;
    14464  }
    14465  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14466  *pBuffer = VK_NULL_HANDLE;
    14467  return res;
    14468  }
    14469  return res;
    14470 }
    14471 
    14472 void vmaDestroyBuffer(
    14473  VmaAllocator allocator,
    14474  VkBuffer buffer,
    14475  VmaAllocation allocation)
    14476 {
    14477  VMA_ASSERT(allocator);
    14478 
    14479  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14480  {
    14481  return;
    14482  }
    14483 
    14484  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14485 
    14486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14487 
    14488 #if VMA_RECORDING_ENABLED
    14489  if(allocator->GetRecorder() != VMA_NULL)
    14490  {
    14491  allocator->GetRecorder()->RecordDestroyBuffer(
    14492  allocator->GetCurrentFrameIndex(),
    14493  allocation);
    14494  }
    14495 #endif
    14496 
    14497  if(buffer != VK_NULL_HANDLE)
    14498  {
    14499  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14500  }
    14501 
    14502  if(allocation != VK_NULL_HANDLE)
    14503  {
    14504  allocator->FreeMemory(allocation);
    14505  }
    14506 }
    14507 
    14508 VkResult vmaCreateImage(
    14509  VmaAllocator allocator,
    14510  const VkImageCreateInfo* pImageCreateInfo,
    14511  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14512  VkImage* pImage,
    14513  VmaAllocation* pAllocation,
    14514  VmaAllocationInfo* pAllocationInfo)
    14515 {
    14516  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14517 
    14518  if(pImageCreateInfo->extent.width == 0 ||
    14519  pImageCreateInfo->extent.height == 0 ||
    14520  pImageCreateInfo->extent.depth == 0 ||
    14521  pImageCreateInfo->mipLevels == 0 ||
    14522  pImageCreateInfo->arrayLayers == 0)
    14523  {
    14524  return VK_ERROR_VALIDATION_FAILED_EXT;
    14525  }
    14526 
    14527  VMA_DEBUG_LOG("vmaCreateImage");
    14528 
    14529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14530 
    14531  *pImage = VK_NULL_HANDLE;
    14532  *pAllocation = VK_NULL_HANDLE;
    14533 
    14534  // 1. Create VkImage.
    14535  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14536  allocator->m_hDevice,
    14537  pImageCreateInfo,
    14538  allocator->GetAllocationCallbacks(),
    14539  pImage);
    14540  if(res >= 0)
    14541  {
    14542  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14543  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14544  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14545 
    14546  // 2. Allocate memory using allocator.
    14547  VkMemoryRequirements vkMemReq = {};
    14548  bool requiresDedicatedAllocation = false;
    14549  bool prefersDedicatedAllocation = false;
    14550  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14551  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14552 
    14553  res = allocator->AllocateMemory(
    14554  vkMemReq,
    14555  requiresDedicatedAllocation,
    14556  prefersDedicatedAllocation,
    14557  VK_NULL_HANDLE, // dedicatedBuffer
    14558  *pImage, // dedicatedImage
    14559  *pAllocationCreateInfo,
    14560  suballocType,
    14561  pAllocation);
    14562 
    14563 #if VMA_RECORDING_ENABLED
    14564  if(allocator->GetRecorder() != VMA_NULL)
    14565  {
    14566  allocator->GetRecorder()->RecordCreateImage(
    14567  allocator->GetCurrentFrameIndex(),
    14568  *pImageCreateInfo,
    14569  *pAllocationCreateInfo,
    14570  *pAllocation);
    14571  }
    14572 #endif
    14573 
    14574  if(res >= 0)
    14575  {
    14576  // 3. Bind image with memory.
    14577  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14578  if(res >= 0)
    14579  {
    14580  // All steps succeeded.
    14581  #if VMA_STATS_STRING_ENABLED
    14582  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14583  #endif
    14584  if(pAllocationInfo != VMA_NULL)
    14585  {
    14586  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14587  }
    14588 
    14589  return VK_SUCCESS;
    14590  }
    14591  allocator->FreeMemory(*pAllocation);
    14592  *pAllocation = VK_NULL_HANDLE;
    14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14594  *pImage = VK_NULL_HANDLE;
    14595  return res;
    14596  }
    14597  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14598  *pImage = VK_NULL_HANDLE;
    14599  return res;
    14600  }
    14601  return res;
    14602 }
    14603 
    14604 void vmaDestroyImage(
    14605  VmaAllocator allocator,
    14606  VkImage image,
    14607  VmaAllocation allocation)
    14608 {
    14609  VMA_ASSERT(allocator);
    14610 
    14611  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14612  {
    14613  return;
    14614  }
    14615 
    14616  VMA_DEBUG_LOG("vmaDestroyImage");
    14617 
    14618  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14619 
    14620 #if VMA_RECORDING_ENABLED
    14621  if(allocator->GetRecorder() != VMA_NULL)
    14622  {
    14623  allocator->GetRecorder()->RecordDestroyImage(
    14624  allocator->GetCurrentFrameIndex(),
    14625  allocation);
    14626  }
    14627 #endif
    14628 
    14629  if(image != VK_NULL_HANDLE)
    14630  {
    14631  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14632  }
    14633  if(allocation != VK_NULL_HANDLE)
    14634  {
    14635  allocator->FreeMemory(allocation);
    14636  }
    14637 }
    14638 
    14639 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1888
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1480 /*
    1481 Define this macro to 0/1 to disable/enable support for recording functionality,
    1482 available through VmaAllocatorCreateInfo::pRecordSettings.
    1483 */
    1484 #ifndef VMA_RECORDING_ENABLED
    1485  #ifdef _WIN32
    1486  #define VMA_RECORDING_ENABLED 1
    1487  #else
    1488  #define VMA_RECORDING_ENABLED 0
    1489  #endif
    1490 #endif
    1491 
    1492 #ifndef NOMINMAX
    1493  #define NOMINMAX // For windows.h
    1494 #endif
    1495 
    1496 #ifndef VULKAN_H_
    1497  #include <vulkan/vulkan.h>
    1498 #endif
    1499 
    1500 #if VMA_RECORDING_ENABLED
    1501  #include <windows.h>
    1502 #endif
    1503 
    1504 #if !defined(VMA_DEDICATED_ALLOCATION)
    1505  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1506  #define VMA_DEDICATED_ALLOCATION 1
    1507  #else
    1508  #define VMA_DEDICATED_ALLOCATION 0
    1509  #endif
    1510 #endif
    1511 
    1521 VK_DEFINE_HANDLE(VmaAllocator)
    1522 
    1523 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1525  VmaAllocator allocator,
    1526  uint32_t memoryType,
    1527  VkDeviceMemory memory,
    1528  VkDeviceSize size);
    1530 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1531  VmaAllocator allocator,
    1532  uint32_t memoryType,
    1533  VkDeviceMemory memory,
    1534  VkDeviceSize size);
    1535 
    1549 
    1579 
    1582 typedef VkFlags VmaAllocatorCreateFlags;
    1583 
    1588 typedef struct VmaVulkanFunctions {
    1589  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1590  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1591  PFN_vkAllocateMemory vkAllocateMemory;
    1592  PFN_vkFreeMemory vkFreeMemory;
    1593  PFN_vkMapMemory vkMapMemory;
    1594  PFN_vkUnmapMemory vkUnmapMemory;
    1595  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1596  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1597  PFN_vkBindBufferMemory vkBindBufferMemory;
    1598  PFN_vkBindImageMemory vkBindImageMemory;
    1599  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1600  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1601  PFN_vkCreateBuffer vkCreateBuffer;
    1602  PFN_vkDestroyBuffer vkDestroyBuffer;
    1603  PFN_vkCreateImage vkCreateImage;
    1604  PFN_vkDestroyImage vkDestroyImage;
    1605 #if VMA_DEDICATED_ALLOCATION
    1606  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1607  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1608 #endif
    1610 
    1612 typedef enum VmaRecordFlagBits {
    1619 
    1622 typedef VkFlags VmaRecordFlags;
    1623 
    1625 typedef struct VmaRecordSettings
    1626 {
    1636  const char* pFilePath;
    1638 
    1641 {
    1645 
    1646  VkPhysicalDevice physicalDevice;
    1648 
    1649  VkDevice device;
    1651 
    1654 
    1655  const VkAllocationCallbacks* pAllocationCallbacks;
    1657 
    1697  const VkDeviceSize* pHeapSizeLimit;
    1718 
    1720 VkResult vmaCreateAllocator(
    1721  const VmaAllocatorCreateInfo* pCreateInfo,
    1722  VmaAllocator* pAllocator);
    1723 
    1725 void vmaDestroyAllocator(
    1726  VmaAllocator allocator);
    1727 
    1733  VmaAllocator allocator,
    1734  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1735 
    1741  VmaAllocator allocator,
    1742  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1743 
    1751  VmaAllocator allocator,
    1752  uint32_t memoryTypeIndex,
    1753  VkMemoryPropertyFlags* pFlags);
    1754 
    1764  VmaAllocator allocator,
    1765  uint32_t frameIndex);
    1766 
    1769 typedef struct VmaStatInfo
    1770 {
    1772  uint32_t blockCount;
    1778  VkDeviceSize usedBytes;
    1780  VkDeviceSize unusedBytes;
    1783 } VmaStatInfo;
    1784 
    1786 typedef struct VmaStats
    1787 {
    1788  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1789  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1791 } VmaStats;
    1792 
    1794 void vmaCalculateStats(
    1795  VmaAllocator allocator,
    1796  VmaStats* pStats);
    1797 
    1798 #define VMA_STATS_STRING_ENABLED 1
    1799 
    1800 #if VMA_STATS_STRING_ENABLED
    1801 
    1803 
    1805 void vmaBuildStatsString(
    1806  VmaAllocator allocator,
    1807  char** ppStatsString,
    1808  VkBool32 detailedMap);
    1809 
    1810 void vmaFreeStatsString(
    1811  VmaAllocator allocator,
    1812  char* pStatsString);
    1813 
    1814 #endif // #if VMA_STATS_STRING_ENABLED
    1815 
    1824 VK_DEFINE_HANDLE(VmaPool)
    1825 
    1826 typedef enum VmaMemoryUsage
    1827 {
    1876 } VmaMemoryUsage;
    1877 
    1892 
    1947 
    1963 
    1973 
    1980 
    1984 
    1986 {
    1999  VkMemoryPropertyFlags requiredFlags;
    2004  VkMemoryPropertyFlags preferredFlags;
    2012  uint32_t memoryTypeBits;
    2025  void* pUserData;
    2027 
    2044 VkResult vmaFindMemoryTypeIndex(
    2045  VmaAllocator allocator,
    2046  uint32_t memoryTypeBits,
    2047  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2048  uint32_t* pMemoryTypeIndex);
    2049 
    2063  VmaAllocator allocator,
    2064  const VkBufferCreateInfo* pBufferCreateInfo,
    2065  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2066  uint32_t* pMemoryTypeIndex);
    2067 
    2081  VmaAllocator allocator,
    2082  const VkImageCreateInfo* pImageCreateInfo,
    2083  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2084  uint32_t* pMemoryTypeIndex);
    2085 
    2106 
    2123 
    2134 
    2140 
    2143 typedef VkFlags VmaPoolCreateFlags;
    2144 
    2147 typedef struct VmaPoolCreateInfo {
    2162  VkDeviceSize blockSize;
    2191 
    2194 typedef struct VmaPoolStats {
    2197  VkDeviceSize size;
    2200  VkDeviceSize unusedSize;
    2213  VkDeviceSize unusedRangeSizeMax;
    2216  size_t blockCount;
    2217 } VmaPoolStats;
    2218 
    2225 VkResult vmaCreatePool(
    2226  VmaAllocator allocator,
    2227  const VmaPoolCreateInfo* pCreateInfo,
    2228  VmaPool* pPool);
    2229 
    2232 void vmaDestroyPool(
    2233  VmaAllocator allocator,
    2234  VmaPool pool);
    2235 
    2242 void vmaGetPoolStats(
    2243  VmaAllocator allocator,
    2244  VmaPool pool,
    2245  VmaPoolStats* pPoolStats);
    2246 
    2254  VmaAllocator allocator,
    2255  VmaPool pool,
    2256  size_t* pLostAllocationCount);
    2257 
    2272 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2273 
    2298 VK_DEFINE_HANDLE(VmaAllocation)
    2299 
    2300 
    2302 typedef struct VmaAllocationInfo {
    2307  uint32_t memoryType;
    2316  VkDeviceMemory deviceMemory;
    2321  VkDeviceSize offset;
    2326  VkDeviceSize size;
    2340  void* pUserData;
    2342 
    2353 VkResult vmaAllocateMemory(
    2354  VmaAllocator allocator,
    2355  const VkMemoryRequirements* pVkMemoryRequirements,
    2356  const VmaAllocationCreateInfo* pCreateInfo,
    2357  VmaAllocation* pAllocation,
    2358  VmaAllocationInfo* pAllocationInfo);
    2359 
    2367  VmaAllocator allocator,
    2368  VkBuffer buffer,
    2369  const VmaAllocationCreateInfo* pCreateInfo,
    2370  VmaAllocation* pAllocation,
    2371  VmaAllocationInfo* pAllocationInfo);
    2372 
    2374 VkResult vmaAllocateMemoryForImage(
    2375  VmaAllocator allocator,
    2376  VkImage image,
    2377  const VmaAllocationCreateInfo* pCreateInfo,
    2378  VmaAllocation* pAllocation,
    2379  VmaAllocationInfo* pAllocationInfo);
    2380 
    2382 void vmaFreeMemory(
    2383  VmaAllocator allocator,
    2384  VmaAllocation allocation);
    2385 
    2406 VkResult vmaResizeAllocation(
    2407  VmaAllocator allocator,
    2408  VmaAllocation allocation,
    2409  VkDeviceSize newSize);
    2410 
    2428  VmaAllocator allocator,
    2429  VmaAllocation allocation,
    2430  VmaAllocationInfo* pAllocationInfo);
    2431 
    2446 VkBool32 vmaTouchAllocation(
    2447  VmaAllocator allocator,
    2448  VmaAllocation allocation);
    2449 
    2464  VmaAllocator allocator,
    2465  VmaAllocation allocation,
    2466  void* pUserData);
    2467 
    2479  VmaAllocator allocator,
    2480  VmaAllocation* pAllocation);
    2481 
    2516 VkResult vmaMapMemory(
    2517  VmaAllocator allocator,
    2518  VmaAllocation allocation,
    2519  void** ppData);
    2520 
    2525 void vmaUnmapMemory(
    2526  VmaAllocator allocator,
    2527  VmaAllocation allocation);
    2528 
    2541 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2542 
    2555 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2556 
    2573 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2574 
    2576 typedef struct VmaDefragmentationInfo {
    2581  VkDeviceSize maxBytesToMove;
    2588 
    2590 typedef struct VmaDefragmentationStats {
    2592  VkDeviceSize bytesMoved;
    2594  VkDeviceSize bytesFreed;
    2600 
    2639 VkResult vmaDefragment(
    2640  VmaAllocator allocator,
    2641  VmaAllocation* pAllocations,
    2642  size_t allocationCount,
    2643  VkBool32* pAllocationsChanged,
    2644  const VmaDefragmentationInfo *pDefragmentationInfo,
    2645  VmaDefragmentationStats* pDefragmentationStats);
    2646 
    2659 VkResult vmaBindBufferMemory(
    2660  VmaAllocator allocator,
    2661  VmaAllocation allocation,
    2662  VkBuffer buffer);
    2663 
    2676 VkResult vmaBindImageMemory(
    2677  VmaAllocator allocator,
    2678  VmaAllocation allocation,
    2679  VkImage image);
    2680 
    2707 VkResult vmaCreateBuffer(
    2708  VmaAllocator allocator,
    2709  const VkBufferCreateInfo* pBufferCreateInfo,
    2710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2711  VkBuffer* pBuffer,
    2712  VmaAllocation* pAllocation,
    2713  VmaAllocationInfo* pAllocationInfo);
    2714 
    2726 void vmaDestroyBuffer(
    2727  VmaAllocator allocator,
    2728  VkBuffer buffer,
    2729  VmaAllocation allocation);
    2730 
    2732 VkResult vmaCreateImage(
    2733  VmaAllocator allocator,
    2734  const VkImageCreateInfo* pImageCreateInfo,
    2735  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2736  VkImage* pImage,
    2737  VmaAllocation* pAllocation,
    2738  VmaAllocationInfo* pAllocationInfo);
    2739 
    2751 void vmaDestroyImage(
    2752  VmaAllocator allocator,
    2753  VkImage image,
    2754  VmaAllocation allocation);
    2755 
    2756 #ifdef __cplusplus
    2757 }
    2758 #endif
    2759 
    2760 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2761 
    2762 // For Visual Studio IntelliSense.
    2763 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2764 #define VMA_IMPLEMENTATION
    2765 #endif
    2766 
    2767 #ifdef VMA_IMPLEMENTATION
    2768 #undef VMA_IMPLEMENTATION
    2769 
    2770 #include <cstdint>
    2771 #include <cstdlib>
    2772 #include <cstring>
    2773 
    2774 /*******************************************************************************
    2775 CONFIGURATION SECTION
    2776 
    2777 Define some of these macros before each #include of this header or change them
    2778 here if you need other then default behavior depending on your environment.
    2779 */
    2780 
    2781 /*
    2782 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2783 internally, like:
    2784 
    2785  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2786 
    2787 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2788 VmaAllocatorCreateInfo::pVulkanFunctions.
    2789 */
    2790 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2791 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2792 #endif
    2793 
    2794 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2795 //#define VMA_USE_STL_CONTAINERS 1
    2796 
    2797 /* Set this macro to 1 to make the library including and using STL containers:
    2798 std::pair, std::vector, std::list, std::unordered_map.
    2799 
    2800 Set it to 0 or undefined to make the library using its own implementation of
    2801 the containers.
    2802 */
    2803 #if VMA_USE_STL_CONTAINERS
    2804  #define VMA_USE_STL_VECTOR 1
    2805  #define VMA_USE_STL_UNORDERED_MAP 1
    2806  #define VMA_USE_STL_LIST 1
    2807 #endif
    2808 
    2809 #if VMA_USE_STL_VECTOR
    2810  #include <vector>
    2811 #endif
    2812 
    2813 #if VMA_USE_STL_UNORDERED_MAP
    2814  #include <unordered_map>
    2815 #endif
    2816 
    2817 #if VMA_USE_STL_LIST
    2818  #include <list>
    2819 #endif
    2820 
    2821 /*
    2822 Following headers are used in this CONFIGURATION section only, so feel free to
    2823 remove them if not needed.
    2824 */
    2825 #include <cassert> // for assert
    2826 #include <algorithm> // for min, max
    2827 #include <mutex> // for std::mutex
    2828 #include <atomic> // for std::atomic
    2829 
    2830 #ifndef VMA_NULL
    2831  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2832  #define VMA_NULL nullptr
    2833 #endif
    2834 
    2835 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2836 #include <cstdlib>
    2837 void *aligned_alloc(size_t alignment, size_t size)
    2838 {
    2839  // alignment must be >= sizeof(void*)
    2840  if(alignment < sizeof(void*))
    2841  {
    2842  alignment = sizeof(void*);
    2843  }
    2844 
    2845  return memalign(alignment, size);
    2846 }
    2847 #elif defined(__APPLE__) || defined(__ANDROID__)
    2848 #include <cstdlib>
    2849 void *aligned_alloc(size_t alignment, size_t size)
    2850 {
    2851  // alignment must be >= sizeof(void*)
    2852  if(alignment < sizeof(void*))
    2853  {
    2854  alignment = sizeof(void*);
    2855  }
    2856 
    2857  void *pointer;
    2858  if(posix_memalign(&pointer, alignment, size) == 0)
    2859  return pointer;
    2860  return VMA_NULL;
    2861 }
    2862 #endif
    2863 
    2864 // If your compiler is not compatible with C++11 and definition of
    2865 // aligned_alloc() function is missing, uncommeting following line may help:
    2866 
    2867 //#include <malloc.h>
    2868 
    2869 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2870 #ifndef VMA_ASSERT
    2871  #ifdef _DEBUG
    2872  #define VMA_ASSERT(expr) assert(expr)
    2873  #else
    2874  #define VMA_ASSERT(expr)
    2875  #endif
    2876 #endif
    2877 
    2878 // Assert that will be called very often, like inside data structures e.g. operator[].
    2879 // Making it non-empty can make program slow.
    2880 #ifndef VMA_HEAVY_ASSERT
    2881  #ifdef _DEBUG
    2882  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2883  #else
    2884  #define VMA_HEAVY_ASSERT(expr)
    2885  #endif
    2886 #endif
    2887 
    2888 #ifndef VMA_ALIGN_OF
    2889  #define VMA_ALIGN_OF(type) (__alignof(type))
    2890 #endif
    2891 
    2892 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2893  #if defined(_WIN32)
    2894  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2895  #else
    2896  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2897  #endif
    2898 #endif
    2899 
    2900 #ifndef VMA_SYSTEM_FREE
    2901  #if defined(_WIN32)
    2902  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2903  #else
    2904  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2905  #endif
    2906 #endif
    2907 
    2908 #ifndef VMA_MIN
    2909  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2910 #endif
    2911 
    2912 #ifndef VMA_MAX
    2913  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2914 #endif
    2915 
    2916 #ifndef VMA_SWAP
    2917  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2918 #endif
    2919 
    2920 #ifndef VMA_SORT
    2921  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2922 #endif
    2923 
    2924 #ifndef VMA_DEBUG_LOG
    2925  #define VMA_DEBUG_LOG(format, ...)
    2926  /*
    2927  #define VMA_DEBUG_LOG(format, ...) do { \
    2928  printf(format, __VA_ARGS__); \
    2929  printf("\n"); \
    2930  } while(false)
    2931  */
    2932 #endif
    2933 
    2934 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2935 #if VMA_STATS_STRING_ENABLED
    2936  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2937  {
    2938  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2939  }
    2940  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2941  {
    2942  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2943  }
    2944  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2945  {
    2946  snprintf(outStr, strLen, "%p", ptr);
    2947  }
    2948 #endif
    2949 
    2950 #ifndef VMA_MUTEX
    2951  class VmaMutex
    2952  {
    2953  public:
    2954  VmaMutex() { }
    2955  ~VmaMutex() { }
    2956  void Lock() { m_Mutex.lock(); }
    2957  void Unlock() { m_Mutex.unlock(); }
    2958  private:
    2959  std::mutex m_Mutex;
    2960  };
    2961  #define VMA_MUTEX VmaMutex
    2962 #endif
    2963 
    2964 /*
    2965 If providing your own implementation, you need to implement a subset of std::atomic:
    2966 
    2967 - Constructor(uint32_t desired)
    2968 - uint32_t load() const
    2969 - void store(uint32_t desired)
    2970 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2971 */
    2972 #ifndef VMA_ATOMIC_UINT32
    2973  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2974 #endif
    2975 
    2976 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2977 
    2981  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2982 #endif
    2983 
    2984 #ifndef VMA_DEBUG_ALIGNMENT
    2985 
    2989  #define VMA_DEBUG_ALIGNMENT (1)
    2990 #endif
    2991 
    2992 #ifndef VMA_DEBUG_MARGIN
    2993 
    2997  #define VMA_DEBUG_MARGIN (0)
    2998 #endif
    2999 
    3000 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3001 
    3005  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3006 #endif
    3007 
    3008 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3009 
    3014  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3015 #endif
    3016 
    3017 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3018 
    3022  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3023 #endif
    3024 
    3025 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3026 
    3030  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3031 #endif
    3032 
    3033 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3034  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3036 #endif
    3037 
    3038 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3039  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3041 #endif
    3042 
    3043 #ifndef VMA_CLASS_NO_COPY
    3044  #define VMA_CLASS_NO_COPY(className) \
    3045  private: \
    3046  className(const className&) = delete; \
    3047  className& operator=(const className&) = delete;
    3048 #endif
    3049 
    3050 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3051 
    3052 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3053 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3054 
    3055 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3056 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3057 
    3058 /*******************************************************************************
    3059 END OF CONFIGURATION
    3060 */
    3061 
    3062 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3063  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3064 
    3065 // Returns number of bits set to 1 in (v).
    3066 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3067 {
    3068  uint32_t c = v - ((v >> 1) & 0x55555555);
    3069  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3070  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3071  c = ((c >> 8) + c) & 0x00FF00FF;
    3072  c = ((c >> 16) + c) & 0x0000FFFF;
    3073  return c;
    3074 }
    3075 
    3076 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3077 // Use types like uint32_t, uint64_t as T.
    3078 template <typename T>
    3079 static inline T VmaAlignUp(T val, T align)
    3080 {
    3081  return (val + align - 1) / align * align;
    3082 }
    3083 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3084 // Use types like uint32_t, uint64_t as T.
    3085 template <typename T>
    3086 static inline T VmaAlignDown(T val, T align)
    3087 {
    3088  return val / align * align;
    3089 }
    3090 
    3091 // Division with mathematical rounding to nearest number.
    3092 template <typename T>
    3093 static inline T VmaRoundDiv(T x, T y)
    3094 {
    3095  return (x + (y / (T)2)) / y;
    3096 }
    3097 
    3098 /*
    3099 Returns true if given number is a power of two.
    3100 T must be unsigned integer number or signed integer but always nonnegative.
    3101 For 0 returns true.
    3102 */
    3103 template <typename T>
    3104 inline bool VmaIsPow2(T x)
    3105 {
    3106  return (x & (x-1)) == 0;
    3107 }
    3108 
    3109 // Returns smallest power of 2 greater or equal to v.
    3110 static inline uint32_t VmaNextPow2(uint32_t v)
    3111 {
    3112  v--;
    3113  v |= v >> 1;
    3114  v |= v >> 2;
    3115  v |= v >> 4;
    3116  v |= v >> 8;
    3117  v |= v >> 16;
    3118  v++;
    3119  return v;
    3120 }
    3121 static inline uint64_t VmaNextPow2(uint64_t v)
    3122 {
    3123  v--;
    3124  v |= v >> 1;
    3125  v |= v >> 2;
    3126  v |= v >> 4;
    3127  v |= v >> 8;
    3128  v |= v >> 16;
    3129  v |= v >> 32;
    3130  v++;
    3131  return v;
    3132 }
    3133 
    3134 // Returns largest power of 2 less or equal to v.
    3135 static inline uint32_t VmaPrevPow2(uint32_t v)
    3136 {
    3137  v |= v >> 1;
    3138  v |= v >> 2;
    3139  v |= v >> 4;
    3140  v |= v >> 8;
    3141  v |= v >> 16;
    3142  v = v ^ (v >> 1);
    3143  return v;
    3144 }
    3145 static inline uint64_t VmaPrevPow2(uint64_t v)
    3146 {
    3147  v |= v >> 1;
    3148  v |= v >> 2;
    3149  v |= v >> 4;
    3150  v |= v >> 8;
    3151  v |= v >> 16;
    3152  v |= v >> 32;
    3153  v = v ^ (v >> 1);
    3154  return v;
    3155 }
    3156 
    3157 static inline bool VmaStrIsEmpty(const char* pStr)
    3158 {
    3159  return pStr == VMA_NULL || *pStr == '\0';
    3160 }
    3161 
    3162 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3163 {
    3164  switch(algorithm)
    3165  {
    3167  return "Linear";
    3169  return "Buddy";
    3170  case 0:
    3171  return "Default";
    3172  default:
    3173  VMA_ASSERT(0);
    3174  return "";
    3175  }
    3176 }
    3177 
    3178 #ifndef VMA_SORT
    3179 
    3180 template<typename Iterator, typename Compare>
    3181 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3182 {
    3183  Iterator centerValue = end; --centerValue;
    3184  Iterator insertIndex = beg;
    3185  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3186  {
    3187  if(cmp(*memTypeIndex, *centerValue))
    3188  {
    3189  if(insertIndex != memTypeIndex)
    3190  {
    3191  VMA_SWAP(*memTypeIndex, *insertIndex);
    3192  }
    3193  ++insertIndex;
    3194  }
    3195  }
    3196  if(insertIndex != centerValue)
    3197  {
    3198  VMA_SWAP(*insertIndex, *centerValue);
    3199  }
    3200  return insertIndex;
    3201 }
    3202 
    3203 template<typename Iterator, typename Compare>
    3204 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3205 {
    3206  if(beg < end)
    3207  {
    3208  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3209  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3210  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3211  }
    3212 }
    3213 
    3214 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3215 
    3216 #endif // #ifndef VMA_SORT
    3217 
    3218 /*
    3219 Returns true if two memory blocks occupy overlapping pages.
    3220 ResourceA must be in less memory offset than ResourceB.
    3221 
    3222 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3223 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3224 */
    3225 static inline bool VmaBlocksOnSamePage(
    3226  VkDeviceSize resourceAOffset,
    3227  VkDeviceSize resourceASize,
    3228  VkDeviceSize resourceBOffset,
    3229  VkDeviceSize pageSize)
    3230 {
    3231  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3232  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3233  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3234  VkDeviceSize resourceBStart = resourceBOffset;
    3235  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3236  return resourceAEndPage == resourceBStartPage;
    3237 }
    3238 
    3239 enum VmaSuballocationType
    3240 {
    3241  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3242  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3243  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3244  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3245  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3246  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3247  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3248 };
    3249 
    3250 /*
    3251 Returns true if given suballocation types could conflict and must respect
    3252 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3253 or linear image and another one is optimal image. If type is unknown, behave
    3254 conservatively.
    3255 */
    3256 static inline bool VmaIsBufferImageGranularityConflict(
    3257  VmaSuballocationType suballocType1,
    3258  VmaSuballocationType suballocType2)
    3259 {
    3260  if(suballocType1 > suballocType2)
    3261  {
    3262  VMA_SWAP(suballocType1, suballocType2);
    3263  }
    3264 
    3265  switch(suballocType1)
    3266  {
    3267  case VMA_SUBALLOCATION_TYPE_FREE:
    3268  return false;
    3269  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3270  return true;
    3271  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3275  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3276  return
    3277  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3278  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3279  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3280  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3281  return
    3282  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3283  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3284  return false;
    3285  default:
    3286  VMA_ASSERT(0);
    3287  return true;
    3288  }
    3289 }
    3290 
    3291 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3292 {
    3293  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3294  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3295  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3296  {
    3297  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3298  }
    3299 }
    3300 
    3301 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3302 {
    3303  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3304  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3305  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3306  {
    3307  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3308  {
    3309  return false;
    3310  }
    3311  }
    3312  return true;
    3313 }
    3314 
    3315 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3316 struct VmaMutexLock
    3317 {
    3318  VMA_CLASS_NO_COPY(VmaMutexLock)
    3319 public:
    3320  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3321  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3322  {
    3323  if(m_pMutex)
    3324  {
    3325  m_pMutex->Lock();
    3326  }
    3327  }
    3328 
    3329  ~VmaMutexLock()
    3330  {
    3331  if(m_pMutex)
    3332  {
    3333  m_pMutex->Unlock();
    3334  }
    3335  }
    3336 
    3337 private:
    3338  VMA_MUTEX* m_pMutex;
    3339 };
    3340 
    3341 #if VMA_DEBUG_GLOBAL_MUTEX
    3342  static VMA_MUTEX gDebugGlobalMutex;
    3343  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3344 #else
    3345  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3346 #endif
    3347 
    3348 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3349 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3350 
    3351 /*
    3352 Performs binary search and returns iterator to first element that is greater or
    3353 equal to (key), according to comparison (cmp).
    3354 
    3355 Cmp should return true if first argument is less than second argument.
    3356 
    3357 Returned value is the found element, if present in the collection or place where
    3358 new element with value (key) should be inserted.
    3359 */
    3360 template <typename CmpLess, typename IterT, typename KeyT>
    3361 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3362 {
    3363  size_t down = 0, up = (end - beg);
    3364  while(down < up)
    3365  {
    3366  const size_t mid = (down + up) / 2;
    3367  if(cmp(*(beg+mid), key))
    3368  {
    3369  down = mid + 1;
    3370  }
    3371  else
    3372  {
    3373  up = mid;
    3374  }
    3375  }
    3376  return beg + down;
    3377 }
    3378 
    3380 // Memory allocation
    3381 
    3382 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3383 {
    3384  if((pAllocationCallbacks != VMA_NULL) &&
    3385  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3386  {
    3387  return (*pAllocationCallbacks->pfnAllocation)(
    3388  pAllocationCallbacks->pUserData,
    3389  size,
    3390  alignment,
    3391  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3392  }
    3393  else
    3394  {
    3395  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3396  }
    3397 }
    3398 
    3399 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3400 {
    3401  if((pAllocationCallbacks != VMA_NULL) &&
    3402  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3403  {
    3404  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3405  }
    3406  else
    3407  {
    3408  VMA_SYSTEM_FREE(ptr);
    3409  }
    3410 }
    3411 
    3412 template<typename T>
    3413 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3414 {
    3415  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3416 }
    3417 
    3418 template<typename T>
    3419 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3420 {
    3421  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3422 }
    3423 
    3424 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3425 
    3426 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3427 
    3428 template<typename T>
    3429 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3430 {
    3431  ptr->~T();
    3432  VmaFree(pAllocationCallbacks, ptr);
    3433 }
    3434 
    3435 template<typename T>
    3436 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3437 {
    3438  if(ptr != VMA_NULL)
    3439  {
    3440  for(size_t i = count; i--; )
    3441  {
    3442  ptr[i].~T();
    3443  }
    3444  VmaFree(pAllocationCallbacks, ptr);
    3445  }
    3446 }
    3447 
    3448 // STL-compatible allocator.
    3449 template<typename T>
    3450 class VmaStlAllocator
    3451 {
    3452 public:
    3453  const VkAllocationCallbacks* const m_pCallbacks;
    3454  typedef T value_type;
    3455 
    3456  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3457  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3458 
    3459  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3460  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3461 
    3462  template<typename U>
    3463  bool operator==(const VmaStlAllocator<U>& rhs) const
    3464  {
    3465  return m_pCallbacks == rhs.m_pCallbacks;
    3466  }
    3467  template<typename U>
    3468  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3469  {
    3470  return m_pCallbacks != rhs.m_pCallbacks;
    3471  }
    3472 
    3473  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3474 };
    3475 
    3476 #if VMA_USE_STL_VECTOR
    3477 
    3478 #define VmaVector std::vector
    3479 
    3480 template<typename T, typename allocatorT>
    3481 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3482 {
    3483  vec.insert(vec.begin() + index, item);
    3484 }
    3485 
    3486 template<typename T, typename allocatorT>
    3487 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3488 {
    3489  vec.erase(vec.begin() + index);
    3490 }
    3491 
    3492 #else // #if VMA_USE_STL_VECTOR
    3493 
    3494 /* Class with interface compatible with subset of std::vector.
    3495 T must be POD because constructors and destructors are not called and memcpy is
    3496 used for these objects. */
    3497 template<typename T, typename AllocatorT>
    3498 class VmaVector
    3499 {
    3500 public:
    3501  typedef T value_type;
    3502 
    3503  VmaVector(const AllocatorT& allocator) :
    3504  m_Allocator(allocator),
    3505  m_pArray(VMA_NULL),
    3506  m_Count(0),
    3507  m_Capacity(0)
    3508  {
    3509  }
    3510 
    3511  VmaVector(size_t count, const AllocatorT& allocator) :
    3512  m_Allocator(allocator),
    3513  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3514  m_Count(count),
    3515  m_Capacity(count)
    3516  {
    3517  }
    3518 
    3519  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3520  m_Allocator(src.m_Allocator),
    3521  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3522  m_Count(src.m_Count),
    3523  m_Capacity(src.m_Count)
    3524  {
    3525  if(m_Count != 0)
    3526  {
    3527  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3528  }
    3529  }
    3530 
    3531  ~VmaVector()
    3532  {
    3533  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3534  }
    3535 
    3536  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3537  {
    3538  if(&rhs != this)
    3539  {
    3540  resize(rhs.m_Count);
    3541  if(m_Count != 0)
    3542  {
    3543  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3544  }
    3545  }
    3546  return *this;
    3547  }
    3548 
    3549  bool empty() const { return m_Count == 0; }
    3550  size_t size() const { return m_Count; }
    3551  T* data() { return m_pArray; }
    3552  const T* data() const { return m_pArray; }
    3553 
    3554  T& operator[](size_t index)
    3555  {
    3556  VMA_HEAVY_ASSERT(index < m_Count);
    3557  return m_pArray[index];
    3558  }
    3559  const T& operator[](size_t index) const
    3560  {
    3561  VMA_HEAVY_ASSERT(index < m_Count);
    3562  return m_pArray[index];
    3563  }
    3564 
    3565  T& front()
    3566  {
    3567  VMA_HEAVY_ASSERT(m_Count > 0);
    3568  return m_pArray[0];
    3569  }
    3570  const T& front() const
    3571  {
    3572  VMA_HEAVY_ASSERT(m_Count > 0);
    3573  return m_pArray[0];
    3574  }
    3575  T& back()
    3576  {
    3577  VMA_HEAVY_ASSERT(m_Count > 0);
    3578  return m_pArray[m_Count - 1];
    3579  }
    3580  const T& back() const
    3581  {
    3582  VMA_HEAVY_ASSERT(m_Count > 0);
    3583  return m_pArray[m_Count - 1];
    3584  }
    3585 
    3586  void reserve(size_t newCapacity, bool freeMemory = false)
    3587  {
    3588  newCapacity = VMA_MAX(newCapacity, m_Count);
    3589 
    3590  if((newCapacity < m_Capacity) && !freeMemory)
    3591  {
    3592  newCapacity = m_Capacity;
    3593  }
    3594 
    3595  if(newCapacity != m_Capacity)
    3596  {
    3597  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3598  if(m_Count != 0)
    3599  {
    3600  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3601  }
    3602  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3603  m_Capacity = newCapacity;
    3604  m_pArray = newArray;
    3605  }
    3606  }
    3607 
    3608  void resize(size_t newCount, bool freeMemory = false)
    3609  {
    3610  size_t newCapacity = m_Capacity;
    3611  if(newCount > m_Capacity)
    3612  {
    3613  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3614  }
    3615  else if(freeMemory)
    3616  {
    3617  newCapacity = newCount;
    3618  }
    3619 
    3620  if(newCapacity != m_Capacity)
    3621  {
    3622  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3623  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3624  if(elementsToCopy != 0)
    3625  {
    3626  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3627  }
    3628  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3629  m_Capacity = newCapacity;
    3630  m_pArray = newArray;
    3631  }
    3632 
    3633  m_Count = newCount;
    3634  }
    3635 
    3636  void clear(bool freeMemory = false)
    3637  {
    3638  resize(0, freeMemory);
    3639  }
    3640 
    3641  void insert(size_t index, const T& src)
    3642  {
    3643  VMA_HEAVY_ASSERT(index <= m_Count);
    3644  const size_t oldCount = size();
    3645  resize(oldCount + 1);
    3646  if(index < oldCount)
    3647  {
    3648  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3649  }
    3650  m_pArray[index] = src;
    3651  }
    3652 
    3653  void remove(size_t index)
    3654  {
    3655  VMA_HEAVY_ASSERT(index < m_Count);
    3656  const size_t oldCount = size();
    3657  if(index < oldCount - 1)
    3658  {
    3659  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3660  }
    3661  resize(oldCount - 1);
    3662  }
    3663 
    3664  void push_back(const T& src)
    3665  {
    3666  const size_t newIndex = size();
    3667  resize(newIndex + 1);
    3668  m_pArray[newIndex] = src;
    3669  }
    3670 
    3671  void pop_back()
    3672  {
    3673  VMA_HEAVY_ASSERT(m_Count > 0);
    3674  resize(size() - 1);
    3675  }
    3676 
    3677  void push_front(const T& src)
    3678  {
    3679  insert(0, src);
    3680  }
    3681 
    3682  void pop_front()
    3683  {
    3684  VMA_HEAVY_ASSERT(m_Count > 0);
    3685  remove(0);
    3686  }
    3687 
    3688  typedef T* iterator;
    3689 
    3690  iterator begin() { return m_pArray; }
    3691  iterator end() { return m_pArray + m_Count; }
    3692 
    3693 private:
    3694  AllocatorT m_Allocator;
    3695  T* m_pArray;
    3696  size_t m_Count;
    3697  size_t m_Capacity;
    3698 };
    3699 
    3700 template<typename T, typename allocatorT>
    3701 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3702 {
    3703  vec.insert(index, item);
    3704 }
    3705 
    3706 template<typename T, typename allocatorT>
    3707 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3708 {
    3709  vec.remove(index);
    3710 }
    3711 
    3712 #endif // #if VMA_USE_STL_VECTOR
    3713 
    3714 template<typename CmpLess, typename VectorT>
    3715 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3716 {
    3717  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3718  vector.data(),
    3719  vector.data() + vector.size(),
    3720  value,
    3721  CmpLess()) - vector.data();
    3722  VmaVectorInsert(vector, indexToInsert, value);
    3723  return indexToInsert;
    3724 }
    3725 
    3726 template<typename CmpLess, typename VectorT>
    3727 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3728 {
    3729  CmpLess comparator;
    3730  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3731  vector.begin(),
    3732  vector.end(),
    3733  value,
    3734  comparator);
    3735  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3736  {
    3737  size_t indexToRemove = it - vector.begin();
    3738  VmaVectorRemove(vector, indexToRemove);
    3739  return true;
    3740  }
    3741  return false;
    3742 }
    3743 
    3744 template<typename CmpLess, typename IterT, typename KeyT>
    3745 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3746 {
    3747  CmpLess comparator;
    3748  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3749  beg, end, value, comparator);
    3750  if(it == end ||
    3751  (!comparator(*it, value) && !comparator(value, *it)))
    3752  {
    3753  return it;
    3754  }
    3755  return end;
    3756 }
    3757 
    3759 // class VmaPoolAllocator
    3760 
    3761 /*
    3762 Allocator for objects of type T using a list of arrays (pools) to speed up
    3763 allocation. Number of elements that can be allocated is not bounded because
    3764 allocator can create multiple blocks.
    3765 */
    3766 template<typename T>
    3767 class VmaPoolAllocator
    3768 {
    3769  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3770 public:
    3771  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3772  ~VmaPoolAllocator();
    3773  void Clear();
    3774  T* Alloc();
    3775  void Free(T* ptr);
    3776 
    3777 private:
    3778  union Item
    3779  {
    3780  uint32_t NextFreeIndex;
    3781  T Value;
    3782  };
    3783 
    3784  struct ItemBlock
    3785  {
    3786  Item* pItems;
    3787  uint32_t FirstFreeIndex;
    3788  };
    3789 
    3790  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3791  size_t m_ItemsPerBlock;
    3792  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3793 
    3794  ItemBlock& CreateNewBlock();
    3795 };
    3796 
    3797 template<typename T>
    3798 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3799  m_pAllocationCallbacks(pAllocationCallbacks),
    3800  m_ItemsPerBlock(itemsPerBlock),
    3801  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3802 {
    3803  VMA_ASSERT(itemsPerBlock > 0);
    3804 }
    3805 
    3806 template<typename T>
    3807 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3808 {
    3809  Clear();
    3810 }
    3811 
    3812 template<typename T>
    3813 void VmaPoolAllocator<T>::Clear()
    3814 {
    3815  for(size_t i = m_ItemBlocks.size(); i--; )
    3816  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3817  m_ItemBlocks.clear();
    3818 }
    3819 
    3820 template<typename T>
    3821 T* VmaPoolAllocator<T>::Alloc()
    3822 {
    3823  for(size_t i = m_ItemBlocks.size(); i--; )
    3824  {
    3825  ItemBlock& block = m_ItemBlocks[i];
    3826  // This block has some free items: Use first one.
    3827  if(block.FirstFreeIndex != UINT32_MAX)
    3828  {
    3829  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3830  block.FirstFreeIndex = pItem->NextFreeIndex;
    3831  return &pItem->Value;
    3832  }
    3833  }
    3834 
    3835  // No block has free item: Create new one and use it.
    3836  ItemBlock& newBlock = CreateNewBlock();
    3837  Item* const pItem = &newBlock.pItems[0];
    3838  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3839  return &pItem->Value;
    3840 }
    3841 
    3842 template<typename T>
    3843 void VmaPoolAllocator<T>::Free(T* ptr)
    3844 {
    3845  // Search all memory blocks to find ptr.
    3846  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3847  {
    3848  ItemBlock& block = m_ItemBlocks[i];
    3849 
    3850  // Casting to union.
    3851  Item* pItemPtr;
    3852  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3853 
    3854  // Check if pItemPtr is in address range of this block.
    3855  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3856  {
    3857  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3858  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3859  block.FirstFreeIndex = index;
    3860  return;
    3861  }
    3862  }
    3863  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3864 }
    3865 
    3866 template<typename T>
    3867 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3868 {
    3869  ItemBlock newBlock = {
    3870  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3871 
    3872  m_ItemBlocks.push_back(newBlock);
    3873 
    3874  // Setup singly-linked list of all free items in this block.
    3875  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3876  newBlock.pItems[i].NextFreeIndex = i + 1;
    3877  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3878  return m_ItemBlocks.back();
    3879 }
    3880 
    3882 // class VmaRawList, VmaList
    3883 
    3884 #if VMA_USE_STL_LIST
    3885 
    3886 #define VmaList std::list
    3887 
    3888 #else // #if VMA_USE_STL_LIST
    3889 
    3890 template<typename T>
    3891 struct VmaListItem
    3892 {
    3893  VmaListItem* pPrev;
    3894  VmaListItem* pNext;
    3895  T Value;
    3896 };
    3897 
    3898 // Doubly linked list.
    3899 template<typename T>
    3900 class VmaRawList
    3901 {
    3902  VMA_CLASS_NO_COPY(VmaRawList)
    3903 public:
    3904  typedef VmaListItem<T> ItemType;
    3905 
    3906  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3907  ~VmaRawList();
    3908  void Clear();
    3909 
    3910  size_t GetCount() const { return m_Count; }
    3911  bool IsEmpty() const { return m_Count == 0; }
    3912 
    3913  ItemType* Front() { return m_pFront; }
    3914  const ItemType* Front() const { return m_pFront; }
    3915  ItemType* Back() { return m_pBack; }
    3916  const ItemType* Back() const { return m_pBack; }
    3917 
    3918  ItemType* PushBack();
    3919  ItemType* PushFront();
    3920  ItemType* PushBack(const T& value);
    3921  ItemType* PushFront(const T& value);
    3922  void PopBack();
    3923  void PopFront();
    3924 
    3925  // Item can be null - it means PushBack.
    3926  ItemType* InsertBefore(ItemType* pItem);
    3927  // Item can be null - it means PushFront.
    3928  ItemType* InsertAfter(ItemType* pItem);
    3929 
    3930  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3931  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3932 
    3933  void Remove(ItemType* pItem);
    3934 
    3935 private:
    3936  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3937  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3938  ItemType* m_pFront;
    3939  ItemType* m_pBack;
    3940  size_t m_Count;
    3941 };
    3942 
    3943 template<typename T>
    3944 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3945  m_pAllocationCallbacks(pAllocationCallbacks),
    3946  m_ItemAllocator(pAllocationCallbacks, 128),
    3947  m_pFront(VMA_NULL),
    3948  m_pBack(VMA_NULL),
    3949  m_Count(0)
    3950 {
    3951 }
    3952 
    3953 template<typename T>
    3954 VmaRawList<T>::~VmaRawList()
    3955 {
    3956  // Intentionally not calling Clear, because that would be unnecessary
    3957  // computations to return all items to m_ItemAllocator as free.
    3958 }
    3959 
    3960 template<typename T>
    3961 void VmaRawList<T>::Clear()
    3962 {
    3963  if(IsEmpty() == false)
    3964  {
    3965  ItemType* pItem = m_pBack;
    3966  while(pItem != VMA_NULL)
    3967  {
    3968  ItemType* const pPrevItem = pItem->pPrev;
    3969  m_ItemAllocator.Free(pItem);
    3970  pItem = pPrevItem;
    3971  }
    3972  m_pFront = VMA_NULL;
    3973  m_pBack = VMA_NULL;
    3974  m_Count = 0;
    3975  }
    3976 }
    3977 
    3978 template<typename T>
    3979 VmaListItem<T>* VmaRawList<T>::PushBack()
    3980 {
    3981  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3982  pNewItem->pNext = VMA_NULL;
    3983  if(IsEmpty())
    3984  {
    3985  pNewItem->pPrev = VMA_NULL;
    3986  m_pFront = pNewItem;
    3987  m_pBack = pNewItem;
    3988  m_Count = 1;
    3989  }
    3990  else
    3991  {
    3992  pNewItem->pPrev = m_pBack;
    3993  m_pBack->pNext = pNewItem;
    3994  m_pBack = pNewItem;
    3995  ++m_Count;
    3996  }
    3997  return pNewItem;
    3998 }
    3999 
    4000 template<typename T>
    4001 VmaListItem<T>* VmaRawList<T>::PushFront()
    4002 {
    4003  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4004  pNewItem->pPrev = VMA_NULL;
    4005  if(IsEmpty())
    4006  {
    4007  pNewItem->pNext = VMA_NULL;
    4008  m_pFront = pNewItem;
    4009  m_pBack = pNewItem;
    4010  m_Count = 1;
    4011  }
    4012  else
    4013  {
    4014  pNewItem->pNext = m_pFront;
    4015  m_pFront->pPrev = pNewItem;
    4016  m_pFront = pNewItem;
    4017  ++m_Count;
    4018  }
    4019  return pNewItem;
    4020 }
    4021 
    4022 template<typename T>
    4023 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4024 {
    4025  ItemType* const pNewItem = PushBack();
    4026  pNewItem->Value = value;
    4027  return pNewItem;
    4028 }
    4029 
    4030 template<typename T>
    4031 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4032 {
    4033  ItemType* const pNewItem = PushFront();
    4034  pNewItem->Value = value;
    4035  return pNewItem;
    4036 }
    4037 
    4038 template<typename T>
    4039 void VmaRawList<T>::PopBack()
    4040 {
    4041  VMA_HEAVY_ASSERT(m_Count > 0);
    4042  ItemType* const pBackItem = m_pBack;
    4043  ItemType* const pPrevItem = pBackItem->pPrev;
    4044  if(pPrevItem != VMA_NULL)
    4045  {
    4046  pPrevItem->pNext = VMA_NULL;
    4047  }
    4048  m_pBack = pPrevItem;
    4049  m_ItemAllocator.Free(pBackItem);
    4050  --m_Count;
    4051 }
    4052 
    4053 template<typename T>
    4054 void VmaRawList<T>::PopFront()
    4055 {
    4056  VMA_HEAVY_ASSERT(m_Count > 0);
    4057  ItemType* const pFrontItem = m_pFront;
    4058  ItemType* const pNextItem = pFrontItem->pNext;
    4059  if(pNextItem != VMA_NULL)
    4060  {
    4061  pNextItem->pPrev = VMA_NULL;
    4062  }
    4063  m_pFront = pNextItem;
    4064  m_ItemAllocator.Free(pFrontItem);
    4065  --m_Count;
    4066 }
    4067 
    4068 template<typename T>
    4069 void VmaRawList<T>::Remove(ItemType* pItem)
    4070 {
    4071  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4072  VMA_HEAVY_ASSERT(m_Count > 0);
    4073 
    4074  if(pItem->pPrev != VMA_NULL)
    4075  {
    4076  pItem->pPrev->pNext = pItem->pNext;
    4077  }
    4078  else
    4079  {
    4080  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4081  m_pFront = pItem->pNext;
    4082  }
    4083 
    4084  if(pItem->pNext != VMA_NULL)
    4085  {
    4086  pItem->pNext->pPrev = pItem->pPrev;
    4087  }
    4088  else
    4089  {
    4090  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4091  m_pBack = pItem->pPrev;
    4092  }
    4093 
    4094  m_ItemAllocator.Free(pItem);
    4095  --m_Count;
    4096 }
    4097 
    4098 template<typename T>
    4099 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4100 {
    4101  if(pItem != VMA_NULL)
    4102  {
    4103  ItemType* const prevItem = pItem->pPrev;
    4104  ItemType* const newItem = m_ItemAllocator.Alloc();
    4105  newItem->pPrev = prevItem;
    4106  newItem->pNext = pItem;
    4107  pItem->pPrev = newItem;
    4108  if(prevItem != VMA_NULL)
    4109  {
    4110  prevItem->pNext = newItem;
    4111  }
    4112  else
    4113  {
    4114  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4115  m_pFront = newItem;
    4116  }
    4117  ++m_Count;
    4118  return newItem;
    4119  }
    4120  else
    4121  return PushBack();
    4122 }
    4123 
    4124 template<typename T>
    4125 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4126 {
    4127  if(pItem != VMA_NULL)
    4128  {
    4129  ItemType* const nextItem = pItem->pNext;
    4130  ItemType* const newItem = m_ItemAllocator.Alloc();
    4131  newItem->pNext = nextItem;
    4132  newItem->pPrev = pItem;
    4133  pItem->pNext = newItem;
    4134  if(nextItem != VMA_NULL)
    4135  {
    4136  nextItem->pPrev = newItem;
    4137  }
    4138  else
    4139  {
    4140  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4141  m_pBack = newItem;
    4142  }
    4143  ++m_Count;
    4144  return newItem;
    4145  }
    4146  else
    4147  return PushFront();
    4148 }
    4149 
    4150 template<typename T>
    4151 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4152 {
    4153  ItemType* const newItem = InsertBefore(pItem);
    4154  newItem->Value = value;
    4155  return newItem;
    4156 }
    4157 
    4158 template<typename T>
    4159 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4160 {
    4161  ItemType* const newItem = InsertAfter(pItem);
    4162  newItem->Value = value;
    4163  return newItem;
    4164 }
    4165 
    4166 template<typename T, typename AllocatorT>
    4167 class VmaList
    4168 {
    4169  VMA_CLASS_NO_COPY(VmaList)
    4170 public:
    4171  class iterator
    4172  {
    4173  public:
    4174  iterator() :
    4175  m_pList(VMA_NULL),
    4176  m_pItem(VMA_NULL)
    4177  {
    4178  }
    4179 
    4180  T& operator*() const
    4181  {
    4182  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4183  return m_pItem->Value;
    4184  }
    4185  T* operator->() const
    4186  {
    4187  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4188  return &m_pItem->Value;
    4189  }
    4190 
    4191  iterator& operator++()
    4192  {
    4193  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4194  m_pItem = m_pItem->pNext;
    4195  return *this;
    4196  }
    4197  iterator& operator--()
    4198  {
    4199  if(m_pItem != VMA_NULL)
    4200  {
    4201  m_pItem = m_pItem->pPrev;
    4202  }
    4203  else
    4204  {
    4205  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4206  m_pItem = m_pList->Back();
    4207  }
    4208  return *this;
    4209  }
    4210 
    4211  iterator operator++(int)
    4212  {
    4213  iterator result = *this;
    4214  ++*this;
    4215  return result;
    4216  }
    4217  iterator operator--(int)
    4218  {
    4219  iterator result = *this;
    4220  --*this;
    4221  return result;
    4222  }
    4223 
    4224  bool operator==(const iterator& rhs) const
    4225  {
    4226  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4227  return m_pItem == rhs.m_pItem;
    4228  }
    4229  bool operator!=(const iterator& rhs) const
    4230  {
    4231  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4232  return m_pItem != rhs.m_pItem;
    4233  }
    4234 
    4235  private:
    4236  VmaRawList<T>* m_pList;
    4237  VmaListItem<T>* m_pItem;
    4238 
    4239  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4240  m_pList(pList),
    4241  m_pItem(pItem)
    4242  {
    4243  }
    4244 
    4245  friend class VmaList<T, AllocatorT>;
    4246  };
    4247 
    4248  class const_iterator
    4249  {
    4250  public:
    4251  const_iterator() :
    4252  m_pList(VMA_NULL),
    4253  m_pItem(VMA_NULL)
    4254  {
    4255  }
    4256 
    4257  const_iterator(const iterator& src) :
    4258  m_pList(src.m_pList),
    4259  m_pItem(src.m_pItem)
    4260  {
    4261  }
    4262 
    4263  const T& operator*() const
    4264  {
    4265  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4266  return m_pItem->Value;
    4267  }
    4268  const T* operator->() const
    4269  {
    4270  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4271  return &m_pItem->Value;
    4272  }
    4273 
    4274  const_iterator& operator++()
    4275  {
    4276  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4277  m_pItem = m_pItem->pNext;
    4278  return *this;
    4279  }
    4280  const_iterator& operator--()
    4281  {
    4282  if(m_pItem != VMA_NULL)
    4283  {
    4284  m_pItem = m_pItem->pPrev;
    4285  }
    4286  else
    4287  {
    4288  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4289  m_pItem = m_pList->Back();
    4290  }
    4291  return *this;
    4292  }
    4293 
    4294  const_iterator operator++(int)
    4295  {
    4296  const_iterator result = *this;
    4297  ++*this;
    4298  return result;
    4299  }
    4300  const_iterator operator--(int)
    4301  {
    4302  const_iterator result = *this;
    4303  --*this;
    4304  return result;
    4305  }
    4306 
    4307  bool operator==(const const_iterator& rhs) const
    4308  {
    4309  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4310  return m_pItem == rhs.m_pItem;
    4311  }
    4312  bool operator!=(const const_iterator& rhs) const
    4313  {
    4314  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4315  return m_pItem != rhs.m_pItem;
    4316  }
    4317 
    4318  private:
    4319  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4320  m_pList(pList),
    4321  m_pItem(pItem)
    4322  {
    4323  }
    4324 
    4325  const VmaRawList<T>* m_pList;
    4326  const VmaListItem<T>* m_pItem;
    4327 
    4328  friend class VmaList<T, AllocatorT>;
    4329  };
    4330 
    4331  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4332 
    4333  bool empty() const { return m_RawList.IsEmpty(); }
    4334  size_t size() const { return m_RawList.GetCount(); }
    4335 
    4336  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4337  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4338 
    4339  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4340  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4341 
    4342  void clear() { m_RawList.Clear(); }
    4343  void push_back(const T& value) { m_RawList.PushBack(value); }
    4344  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4345  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4346 
    4347 private:
    4348  VmaRawList<T> m_RawList;
    4349 };
    4350 
    4351 #endif // #if VMA_USE_STL_LIST
    4352 
    4354 // class VmaMap
    4355 
    4356 // Unused in this version.
    4357 #if 0
    4358 
    4359 #if VMA_USE_STL_UNORDERED_MAP
    4360 
    4361 #define VmaPair std::pair
    4362 
    4363 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4364  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4365 
    4366 #else // #if VMA_USE_STL_UNORDERED_MAP
    4367 
    4368 template<typename T1, typename T2>
    4369 struct VmaPair
    4370 {
    4371  T1 first;
    4372  T2 second;
    4373 
    4374  VmaPair() : first(), second() { }
    4375  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4376 };
    4377 
    4378 /* Class compatible with subset of interface of std::unordered_map.
    4379 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4380 */
    4381 template<typename KeyT, typename ValueT>
    4382 class VmaMap
    4383 {
    4384 public:
    4385  typedef VmaPair<KeyT, ValueT> PairType;
    4386  typedef PairType* iterator;
    4387 
    4388  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4389 
    4390  iterator begin() { return m_Vector.begin(); }
    4391  iterator end() { return m_Vector.end(); }
    4392 
    4393  void insert(const PairType& pair);
    4394  iterator find(const KeyT& key);
    4395  void erase(iterator it);
    4396 
    4397 private:
    4398  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4399 };
    4400 
    4401 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4402 
    4403 template<typename FirstT, typename SecondT>
    4404 struct VmaPairFirstLess
    4405 {
    4406  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4407  {
    4408  return lhs.first < rhs.first;
    4409  }
    4410  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4411  {
    4412  return lhs.first < rhsFirst;
    4413  }
    4414 };
    4415 
    4416 template<typename KeyT, typename ValueT>
    4417 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4418 {
    4419  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4420  m_Vector.data(),
    4421  m_Vector.data() + m_Vector.size(),
    4422  pair,
    4423  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4424  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4425 }
    4426 
    4427 template<typename KeyT, typename ValueT>
    4428 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4429 {
    4430  PairType* it = VmaBinaryFindFirstNotLess(
    4431  m_Vector.data(),
    4432  m_Vector.data() + m_Vector.size(),
    4433  key,
    4434  VmaPairFirstLess<KeyT, ValueT>());
    4435  if((it != m_Vector.end()) && (it->first == key))
    4436  {
    4437  return it;
    4438  }
    4439  else
    4440  {
    4441  return m_Vector.end();
    4442  }
    4443 }
    4444 
    4445 template<typename KeyT, typename ValueT>
    4446 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4447 {
    4448  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4449 }
    4450 
    4451 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4452 
    4453 #endif // #if 0
    4454 
    4456 
    4457 class VmaDeviceMemoryBlock;
    4458 
    4459 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4460 
    4461 struct VmaAllocation_T
    4462 {
    4463  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4464 private:
    4465  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4466 
    4467  enum FLAGS
    4468  {
    4469  FLAG_USER_DATA_STRING = 0x01,
    4470  };
    4471 
    4472 public:
    4473  enum ALLOCATION_TYPE
    4474  {
    4475  ALLOCATION_TYPE_NONE,
    4476  ALLOCATION_TYPE_BLOCK,
    4477  ALLOCATION_TYPE_DEDICATED,
    4478  };
    4479 
    4480  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4481  m_Alignment(1),
    4482  m_Size(0),
    4483  m_pUserData(VMA_NULL),
    4484  m_LastUseFrameIndex(currentFrameIndex),
    4485  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4486  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4487  m_MapCount(0),
    4488  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4489  {
    4490 #if VMA_STATS_STRING_ENABLED
    4491  m_CreationFrameIndex = currentFrameIndex;
    4492  m_BufferImageUsage = 0;
    4493 #endif
    4494  }
    4495 
    4496  ~VmaAllocation_T()
    4497  {
    4498  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4499 
    4500  // Check if owned string was freed.
    4501  VMA_ASSERT(m_pUserData == VMA_NULL);
    4502  }
    4503 
    4504  void InitBlockAllocation(
    4505  VmaPool hPool,
    4506  VmaDeviceMemoryBlock* block,
    4507  VkDeviceSize offset,
    4508  VkDeviceSize alignment,
    4509  VkDeviceSize size,
    4510  VmaSuballocationType suballocationType,
    4511  bool mapped,
    4512  bool canBecomeLost)
    4513  {
    4514  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4515  VMA_ASSERT(block != VMA_NULL);
    4516  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4517  m_Alignment = alignment;
    4518  m_Size = size;
    4519  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4520  m_SuballocationType = (uint8_t)suballocationType;
    4521  m_BlockAllocation.m_hPool = hPool;
    4522  m_BlockAllocation.m_Block = block;
    4523  m_BlockAllocation.m_Offset = offset;
    4524  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4525  }
    4526 
    4527  void InitLost()
    4528  {
    4529  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4530  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4531  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4532  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4533  m_BlockAllocation.m_Block = VMA_NULL;
    4534  m_BlockAllocation.m_Offset = 0;
    4535  m_BlockAllocation.m_CanBecomeLost = true;
    4536  }
    4537 
    4538  void ChangeBlockAllocation(
    4539  VmaAllocator hAllocator,
    4540  VmaDeviceMemoryBlock* block,
    4541  VkDeviceSize offset);
    4542 
    4543  void ChangeSize(VkDeviceSize newSize);
    4544 
    4545  // pMappedData not null means allocation is created with MAPPED flag.
    4546  void InitDedicatedAllocation(
    4547  uint32_t memoryTypeIndex,
    4548  VkDeviceMemory hMemory,
    4549  VmaSuballocationType suballocationType,
    4550  void* pMappedData,
    4551  VkDeviceSize size)
    4552  {
    4553  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4554  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4555  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4556  m_Alignment = 0;
    4557  m_Size = size;
    4558  m_SuballocationType = (uint8_t)suballocationType;
    4559  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4560  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4561  m_DedicatedAllocation.m_hMemory = hMemory;
    4562  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4563  }
    4564 
    4565  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4566  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4567  VkDeviceSize GetSize() const { return m_Size; }
    4568  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4569  void* GetUserData() const { return m_pUserData; }
    4570  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4571  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4572 
    4573  VmaDeviceMemoryBlock* GetBlock() const
    4574  {
    4575  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4576  return m_BlockAllocation.m_Block;
    4577  }
    4578  VkDeviceSize GetOffset() const;
    4579  VkDeviceMemory GetMemory() const;
    4580  uint32_t GetMemoryTypeIndex() const;
    4581  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4582  void* GetMappedData() const;
    4583  bool CanBecomeLost() const;
    4584  VmaPool GetPool() const;
    4585 
    4586  uint32_t GetLastUseFrameIndex() const
    4587  {
    4588  return m_LastUseFrameIndex.load();
    4589  }
    4590  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4591  {
    4592  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4593  }
    4594  /*
    4595  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4596  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4597  - Else, returns false.
    4598 
    4599  If hAllocation is already lost, assert - you should not call it then.
    4600  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4601  */
    4602  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4603 
    4604  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4605  {
    4606  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4607  outInfo.blockCount = 1;
    4608  outInfo.allocationCount = 1;
    4609  outInfo.unusedRangeCount = 0;
    4610  outInfo.usedBytes = m_Size;
    4611  outInfo.unusedBytes = 0;
    4612  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4613  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4614  outInfo.unusedRangeSizeMax = 0;
    4615  }
    4616 
    4617  void BlockAllocMap();
    4618  void BlockAllocUnmap();
    4619  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4620  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4621 
    4622 #if VMA_STATS_STRING_ENABLED
    4623  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4624  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4625 
    4626  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4627  {
    4628  VMA_ASSERT(m_BufferImageUsage == 0);
    4629  m_BufferImageUsage = bufferImageUsage;
    4630  }
    4631 
    4632  void PrintParameters(class VmaJsonWriter& json) const;
    4633 #endif
    4634 
    4635 private:
    4636  VkDeviceSize m_Alignment;
    4637  VkDeviceSize m_Size;
    4638  void* m_pUserData;
    4639  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4640  uint8_t m_Type; // ALLOCATION_TYPE
    4641  uint8_t m_SuballocationType; // VmaSuballocationType
    4642  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4643  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4644  uint8_t m_MapCount;
    4645  uint8_t m_Flags; // enum FLAGS
    4646 
    4647  // Allocation out of VmaDeviceMemoryBlock.
    4648  struct BlockAllocation
    4649  {
    4650  VmaPool m_hPool; // Null if belongs to general memory.
    4651  VmaDeviceMemoryBlock* m_Block;
    4652  VkDeviceSize m_Offset;
    4653  bool m_CanBecomeLost;
    4654  };
    4655 
    4656  // Allocation for an object that has its own private VkDeviceMemory.
    4657  struct DedicatedAllocation
    4658  {
    4659  uint32_t m_MemoryTypeIndex;
    4660  VkDeviceMemory m_hMemory;
    4661  void* m_pMappedData; // Not null means memory is mapped.
    4662  };
    4663 
    4664  union
    4665  {
    4666  // Allocation out of VmaDeviceMemoryBlock.
    4667  BlockAllocation m_BlockAllocation;
    4668  // Allocation for an object that has its own private VkDeviceMemory.
    4669  DedicatedAllocation m_DedicatedAllocation;
    4670  };
    4671 
    4672 #if VMA_STATS_STRING_ENABLED
    4673  uint32_t m_CreationFrameIndex;
    4674  uint32_t m_BufferImageUsage; // 0 if unknown.
    4675 #endif
    4676 
    4677  void FreeUserDataString(VmaAllocator hAllocator);
    4678 };
    4679 
    4680 /*
    4681 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4682 allocated memory block or free.
    4683 */
    4684 struct VmaSuballocation
    4685 {
    4686  VkDeviceSize offset;
    4687  VkDeviceSize size;
    4688  VmaAllocation hAllocation;
    4689  VmaSuballocationType type;
    4690 };
    4691 
    4692 // Comparator for offsets.
    4693 struct VmaSuballocationOffsetLess
    4694 {
    4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4696  {
    4697  return lhs.offset < rhs.offset;
    4698  }
    4699 };
    4700 struct VmaSuballocationOffsetGreater
    4701 {
    4702  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4703  {
    4704  return lhs.offset > rhs.offset;
    4705  }
    4706 };
    4707 
    4708 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4709 
    4710 // Cost of one additional allocation lost, as equivalent in bytes.
    4711 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4712 
    4713 /*
    4714 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4715 
    4716 If canMakeOtherLost was false:
    4717 - item points to a FREE suballocation.
    4718 - itemsToMakeLostCount is 0.
    4719 
    4720 If canMakeOtherLost was true:
    4721 - item points to first of sequence of suballocations, which are either FREE,
    4722  or point to VmaAllocations that can become lost.
    4723 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4724  the requested allocation to succeed.
    4725 */
    4726 struct VmaAllocationRequest
    4727 {
    4728  VkDeviceSize offset;
    4729  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4730  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4731  VmaSuballocationList::iterator item;
    4732  size_t itemsToMakeLostCount;
    4733  void* customData;
    4734 
    4735  VkDeviceSize CalcCost() const
    4736  {
    4737  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4738  }
    4739 };
    4740 
    4741 /*
    4742 Data structure used for bookkeeping of allocations and unused ranges of memory
    4743 in a single VkDeviceMemory block.
    4744 */
    4745 class VmaBlockMetadata
    4746 {
    4747 public:
    4748  VmaBlockMetadata(VmaAllocator hAllocator);
    4749  virtual ~VmaBlockMetadata() { }
    4750  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4751 
    4752  // Validates all data structures inside this object. If not valid, returns false.
    4753  virtual bool Validate() const = 0;
    4754  VkDeviceSize GetSize() const { return m_Size; }
    4755  virtual size_t GetAllocationCount() const = 0;
    4756  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4757  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4758  // Returns true if this block is empty - contains only single free suballocation.
    4759  virtual bool IsEmpty() const = 0;
    4760 
    4761  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4762  // Shouldn't modify blockCount.
    4763  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4764 
    4765 #if VMA_STATS_STRING_ENABLED
    4766  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4767 #endif
    4768 
    4769  // Tries to find a place for suballocation with given parameters inside this block.
    4770  // If succeeded, fills pAllocationRequest and returns true.
    4771  // If failed, returns false.
    4772  virtual bool CreateAllocationRequest(
    4773  uint32_t currentFrameIndex,
    4774  uint32_t frameInUseCount,
    4775  VkDeviceSize bufferImageGranularity,
    4776  VkDeviceSize allocSize,
    4777  VkDeviceSize allocAlignment,
    4778  bool upperAddress,
    4779  VmaSuballocationType allocType,
    4780  bool canMakeOtherLost,
    4781  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4782  VmaAllocationRequest* pAllocationRequest) = 0;
    4783 
    4784  virtual bool MakeRequestedAllocationsLost(
    4785  uint32_t currentFrameIndex,
    4786  uint32_t frameInUseCount,
    4787  VmaAllocationRequest* pAllocationRequest) = 0;
    4788 
    4789  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4790 
    4791  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4792 
    4793  // Makes actual allocation based on request. Request must already be checked and valid.
    4794  virtual void Alloc(
    4795  const VmaAllocationRequest& request,
    4796  VmaSuballocationType type,
    4797  VkDeviceSize allocSize,
    4798  bool upperAddress,
    4799  VmaAllocation hAllocation) = 0;
    4800 
    4801  // Frees suballocation assigned to given memory region.
    4802  virtual void Free(const VmaAllocation allocation) = 0;
    4803  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4804 
    4805  // Tries to resize (grow or shrink) space for given allocation, in place.
    4806  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4807 
    4808 protected:
    4809  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4810 
    4811 #if VMA_STATS_STRING_ENABLED
    4812  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4813  VkDeviceSize unusedBytes,
    4814  size_t allocationCount,
    4815  size_t unusedRangeCount) const;
    4816  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4817  VkDeviceSize offset,
    4818  VmaAllocation hAllocation) const;
    4819  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4820  VkDeviceSize offset,
    4821  VkDeviceSize size) const;
    4822  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4823 #endif
    4824 
    4825 private:
    4826  VkDeviceSize m_Size;
    4827  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4828 };
    4829 
    4830 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4831  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4832  return false; \
    4833  } } while(false)
    4834 
    4835 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4836 {
    4837  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4838 public:
    4839  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4840  virtual ~VmaBlockMetadata_Generic();
    4841  virtual void Init(VkDeviceSize size);
    4842 
    4843  virtual bool Validate() const;
    4844  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4845  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4846  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4847  virtual bool IsEmpty() const;
    4848 
    4849  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4850  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4851 
    4852 #if VMA_STATS_STRING_ENABLED
    4853  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4854 #endif
    4855 
    4856  virtual bool CreateAllocationRequest(
    4857  uint32_t currentFrameIndex,
    4858  uint32_t frameInUseCount,
    4859  VkDeviceSize bufferImageGranularity,
    4860  VkDeviceSize allocSize,
    4861  VkDeviceSize allocAlignment,
    4862  bool upperAddress,
    4863  VmaSuballocationType allocType,
    4864  bool canMakeOtherLost,
    4865  uint32_t strategy,
    4866  VmaAllocationRequest* pAllocationRequest);
    4867 
    4868  virtual bool MakeRequestedAllocationsLost(
    4869  uint32_t currentFrameIndex,
    4870  uint32_t frameInUseCount,
    4871  VmaAllocationRequest* pAllocationRequest);
    4872 
    4873  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4874 
    4875  virtual VkResult CheckCorruption(const void* pBlockData);
    4876 
    4877  virtual void Alloc(
    4878  const VmaAllocationRequest& request,
    4879  VmaSuballocationType type,
    4880  VkDeviceSize allocSize,
    4881  bool upperAddress,
    4882  VmaAllocation hAllocation);
    4883 
    4884  virtual void Free(const VmaAllocation allocation);
    4885  virtual void FreeAtOffset(VkDeviceSize offset);
    4886 
    4887  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4888 
    4889 private:
    4890  uint32_t m_FreeCount;
    4891  VkDeviceSize m_SumFreeSize;
    4892  VmaSuballocationList m_Suballocations;
    4893  // Suballocations that are free and have size greater than certain threshold.
    4894  // Sorted by size, ascending.
    4895  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4896 
    4897  bool ValidateFreeSuballocationList() const;
    4898 
    4899  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4900  // If yes, fills pOffset and returns true. If no, returns false.
    4901  bool CheckAllocation(
    4902  uint32_t currentFrameIndex,
    4903  uint32_t frameInUseCount,
    4904  VkDeviceSize bufferImageGranularity,
    4905  VkDeviceSize allocSize,
    4906  VkDeviceSize allocAlignment,
    4907  VmaSuballocationType allocType,
    4908  VmaSuballocationList::const_iterator suballocItem,
    4909  bool canMakeOtherLost,
    4910  VkDeviceSize* pOffset,
    4911  size_t* itemsToMakeLostCount,
    4912  VkDeviceSize* pSumFreeSize,
    4913  VkDeviceSize* pSumItemSize) const;
    4914  // Given free suballocation, it merges it with following one, which must also be free.
    4915  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4916  // Releases given suballocation, making it free.
    4917  // Merges it with adjacent free suballocations if applicable.
    4918  // Returns iterator to new free suballocation at this place.
    4919  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4920  // Given free suballocation, it inserts it into sorted list of
    4921  // m_FreeSuballocationsBySize if it's suitable.
    4922  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4923  // Given free suballocation, it removes it from sorted list of
    4924  // m_FreeSuballocationsBySize if it's suitable.
    4925  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4926 };
    4927 
    4928 /*
    4929 Allocations and their references in internal data structure look like this:
    4930 
    4931 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4932 
    4933  0 +-------+
    4934  | |
    4935  | |
    4936  | |
    4937  +-------+
    4938  | Alloc | 1st[m_1stNullItemsBeginCount]
    4939  +-------+
    4940  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4941  +-------+
    4942  | ... |
    4943  +-------+
    4944  | Alloc | 1st[1st.size() - 1]
    4945  +-------+
    4946  | |
    4947  | |
    4948  | |
    4949 GetSize() +-------+
    4950 
    4951 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4952 
    4953  0 +-------+
    4954  | Alloc | 2nd[0]
    4955  +-------+
    4956  | Alloc | 2nd[1]
    4957  +-------+
    4958  | ... |
    4959  +-------+
    4960  | Alloc | 2nd[2nd.size() - 1]
    4961  +-------+
    4962  | |
    4963  | |
    4964  | |
    4965  +-------+
    4966  | Alloc | 1st[m_1stNullItemsBeginCount]
    4967  +-------+
    4968  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4969  +-------+
    4970  | ... |
    4971  +-------+
    4972  | Alloc | 1st[1st.size() - 1]
    4973  +-------+
    4974  | |
    4975 GetSize() +-------+
    4976 
    4977 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4978 
    4979  0 +-------+
    4980  | |
    4981  | |
    4982  | |
    4983  +-------+
    4984  | Alloc | 1st[m_1stNullItemsBeginCount]
    4985  +-------+
    4986  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4987  +-------+
    4988  | ... |
    4989  +-------+
    4990  | Alloc | 1st[1st.size() - 1]
    4991  +-------+
    4992  | |
    4993  | |
    4994  | |
    4995  +-------+
    4996  | Alloc | 2nd[2nd.size() - 1]
    4997  +-------+
    4998  | ... |
    4999  +-------+
    5000  | Alloc | 2nd[1]
    5001  +-------+
    5002  | Alloc | 2nd[0]
    5003 GetSize() +-------+
    5004 
    5005 */
    5006 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5007 {
    5008  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5009 public:
    5010  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5011  virtual ~VmaBlockMetadata_Linear();
    5012  virtual void Init(VkDeviceSize size);
    5013 
    5014  virtual bool Validate() const;
    5015  virtual size_t GetAllocationCount() const;
    5016  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5017  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5018  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5019 
    5020  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5021  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5022 
    5023 #if VMA_STATS_STRING_ENABLED
    5024  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5025 #endif
    5026 
    5027  virtual bool CreateAllocationRequest(
    5028  uint32_t currentFrameIndex,
    5029  uint32_t frameInUseCount,
    5030  VkDeviceSize bufferImageGranularity,
    5031  VkDeviceSize allocSize,
    5032  VkDeviceSize allocAlignment,
    5033  bool upperAddress,
    5034  VmaSuballocationType allocType,
    5035  bool canMakeOtherLost,
    5036  uint32_t strategy,
    5037  VmaAllocationRequest* pAllocationRequest);
    5038 
    5039  virtual bool MakeRequestedAllocationsLost(
    5040  uint32_t currentFrameIndex,
    5041  uint32_t frameInUseCount,
    5042  VmaAllocationRequest* pAllocationRequest);
    5043 
    5044  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5045 
    5046  virtual VkResult CheckCorruption(const void* pBlockData);
    5047 
    5048  virtual void Alloc(
    5049  const VmaAllocationRequest& request,
    5050  VmaSuballocationType type,
    5051  VkDeviceSize allocSize,
    5052  bool upperAddress,
    5053  VmaAllocation hAllocation);
    5054 
    5055  virtual void Free(const VmaAllocation allocation);
    5056  virtual void FreeAtOffset(VkDeviceSize offset);
    5057 
    5058 private:
    5059  /*
    5060  There are two suballocation vectors, used in ping-pong way.
    5061  The one with index m_1stVectorIndex is called 1st.
    5062  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5063  2nd can be non-empty only when 1st is not empty.
    5064  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5065  */
    5066  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5067 
    5068  enum SECOND_VECTOR_MODE
    5069  {
    5070  SECOND_VECTOR_EMPTY,
    5071  /*
    5072  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5073  all have smaller offset.
    5074  */
    5075  SECOND_VECTOR_RING_BUFFER,
    5076  /*
    5077  Suballocations in 2nd vector are upper side of double stack.
    5078  They all have offsets higher than those in 1st vector.
    5079  Top of this stack means smaller offsets, but higher indices in this vector.
    5080  */
    5081  SECOND_VECTOR_DOUBLE_STACK,
    5082  };
    5083 
    5084  VkDeviceSize m_SumFreeSize;
    5085  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5086  uint32_t m_1stVectorIndex;
    5087  SECOND_VECTOR_MODE m_2ndVectorMode;
    5088 
    5089  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5090  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5091  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5092  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5093 
    5094  // Number of items in 1st vector with hAllocation = null at the beginning.
    5095  size_t m_1stNullItemsBeginCount;
    5096  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5097  size_t m_1stNullItemsMiddleCount;
    5098  // Number of items in 2nd vector with hAllocation = null.
    5099  size_t m_2ndNullItemsCount;
    5100 
    5101  bool ShouldCompact1st() const;
    5102  void CleanupAfterFree();
    5103 };
    5104 
    5105 /*
    5106 - GetSize() is the original size of allocated memory block.
    5107 - m_UsableSize is this size aligned down to a power of two.
    5108  All allocations and calculations happen relative to m_UsableSize.
    5109 - GetUnusableSize() is the difference between them.
    5110  It is repoted as separate, unused range, not available for allocations.
    5111 
    5112 Node at level 0 has size = m_UsableSize.
    5113 Each next level contains nodes with size 2 times smaller than current level.
    5114 m_LevelCount is the maximum number of levels to use in the current object.
    5115 */
    5116 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5117 {
    5118  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5119 public:
    5120  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5121  virtual ~VmaBlockMetadata_Buddy();
    5122  virtual void Init(VkDeviceSize size);
    5123 
    5124  virtual bool Validate() const;
    5125  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5126  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5127  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5128  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5129 
    5130  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5131  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5132 
    5133 #if VMA_STATS_STRING_ENABLED
    5134  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5135 #endif
    5136 
    5137  virtual bool CreateAllocationRequest(
    5138  uint32_t currentFrameIndex,
    5139  uint32_t frameInUseCount,
    5140  VkDeviceSize bufferImageGranularity,
    5141  VkDeviceSize allocSize,
    5142  VkDeviceSize allocAlignment,
    5143  bool upperAddress,
    5144  VmaSuballocationType allocType,
    5145  bool canMakeOtherLost,
    5146  uint32_t strategy,
    5147  VmaAllocationRequest* pAllocationRequest);
    5148 
    5149  virtual bool MakeRequestedAllocationsLost(
    5150  uint32_t currentFrameIndex,
    5151  uint32_t frameInUseCount,
    5152  VmaAllocationRequest* pAllocationRequest);
    5153 
    5154  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5155 
    5156  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5157 
    5158  virtual void Alloc(
    5159  const VmaAllocationRequest& request,
    5160  VmaSuballocationType type,
    5161  VkDeviceSize allocSize,
    5162  bool upperAddress,
    5163  VmaAllocation hAllocation);
    5164 
    5165  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5166  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5167 
    5168 private:
    5169  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5170  static const size_t MAX_LEVELS = 30;
    5171 
    5172  struct ValidationContext
    5173  {
    5174  size_t calculatedAllocationCount;
    5175  size_t calculatedFreeCount;
    5176  VkDeviceSize calculatedSumFreeSize;
    5177 
    5178  ValidationContext() :
    5179  calculatedAllocationCount(0),
    5180  calculatedFreeCount(0),
    5181  calculatedSumFreeSize(0) { }
    5182  };
    5183 
    5184  struct Node
    5185  {
    5186  VkDeviceSize offset;
    5187  enum TYPE
    5188  {
    5189  TYPE_FREE,
    5190  TYPE_ALLOCATION,
    5191  TYPE_SPLIT,
    5192  TYPE_COUNT
    5193  } type;
    5194  Node* parent;
    5195  Node* buddy;
    5196 
    5197  union
    5198  {
    5199  struct
    5200  {
    5201  Node* prev;
    5202  Node* next;
    5203  } free;
    5204  struct
    5205  {
    5206  VmaAllocation alloc;
    5207  } allocation;
    5208  struct
    5209  {
    5210  Node* leftChild;
    5211  } split;
    5212  };
    5213  };
    5214 
    5215  // Size of the memory block aligned down to a power of two.
    5216  VkDeviceSize m_UsableSize;
    5217  uint32_t m_LevelCount;
    5218 
    5219  Node* m_Root;
    5220  struct {
    5221  Node* front;
    5222  Node* back;
    5223  } m_FreeList[MAX_LEVELS];
    5224  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5225  size_t m_AllocationCount;
    5226  // Number of nodes in the tree with type == TYPE_FREE.
    5227  size_t m_FreeCount;
    5228  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5229  VkDeviceSize m_SumFreeSize;
    5230 
    5231  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5232  void DeleteNode(Node* node);
    5233  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5234  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5235  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5236  // Alloc passed just for validation. Can be null.
    5237  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5238  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5239  // Adds node to the front of FreeList at given level.
    5240  // node->type must be FREE.
    5241  // node->free.prev, next can be undefined.
    5242  void AddToFreeListFront(uint32_t level, Node* node);
    5243  // Removes node from FreeList at given level.
    5244  // node->type must be FREE.
    5245  // node->free.prev, next stay untouched.
    5246  void RemoveFromFreeList(uint32_t level, Node* node);
    5247 
    5248 #if VMA_STATS_STRING_ENABLED
    5249  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5250 #endif
    5251 };
    5252 
    5253 /*
    5254 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5255 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5256 
    5257 Thread-safety: This class must be externally synchronized.
    5258 */
    5259 class VmaDeviceMemoryBlock
    5260 {
    5261  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5262 public:
    5263  VmaBlockMetadata* m_pMetadata;
    5264 
    5265  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5266 
    5267  ~VmaDeviceMemoryBlock()
    5268  {
    5269  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5270  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5271  }
    5272 
    5273  // Always call after construction.
    5274  void Init(
    5275  VmaAllocator hAllocator,
    5276  uint32_t newMemoryTypeIndex,
    5277  VkDeviceMemory newMemory,
    5278  VkDeviceSize newSize,
    5279  uint32_t id,
    5280  uint32_t algorithm);
    5281  // Always call before destruction.
    5282  void Destroy(VmaAllocator allocator);
    5283 
    5284  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5285  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5286  uint32_t GetId() const { return m_Id; }
    5287  void* GetMappedData() const { return m_pMappedData; }
    5288 
    5289  // Validates all data structures inside this object. If not valid, returns false.
    5290  bool Validate() const;
    5291 
    5292  VkResult CheckCorruption(VmaAllocator hAllocator);
    5293 
    5294  // ppData can be null.
    5295  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5296  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5297 
    5298  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5299  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5300 
    5301  VkResult BindBufferMemory(
    5302  const VmaAllocator hAllocator,
    5303  const VmaAllocation hAllocation,
    5304  VkBuffer hBuffer);
    5305  VkResult BindImageMemory(
    5306  const VmaAllocator hAllocator,
    5307  const VmaAllocation hAllocation,
    5308  VkImage hImage);
    5309 
    5310 private:
    5311  uint32_t m_MemoryTypeIndex;
    5312  uint32_t m_Id;
    5313  VkDeviceMemory m_hMemory;
    5314 
    5315  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5316  // Also protects m_MapCount, m_pMappedData.
    5317  VMA_MUTEX m_Mutex;
    5318  uint32_t m_MapCount;
    5319  void* m_pMappedData;
    5320 };
    5321 
    5322 struct VmaPointerLess
    5323 {
    5324  bool operator()(const void* lhs, const void* rhs) const
    5325  {
    5326  return lhs < rhs;
    5327  }
    5328 };
    5329 
    5330 class VmaDefragmentator;
    5331 
    5332 /*
    5333 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5334 Vulkan memory type.
    5335 
    5336 Synchronized internally with a mutex.
    5337 */
    5338 struct VmaBlockVector
    5339 {
    5340  VMA_CLASS_NO_COPY(VmaBlockVector)
    5341 public:
    5342  VmaBlockVector(
    5343  VmaAllocator hAllocator,
    5344  uint32_t memoryTypeIndex,
    5345  VkDeviceSize preferredBlockSize,
    5346  size_t minBlockCount,
    5347  size_t maxBlockCount,
    5348  VkDeviceSize bufferImageGranularity,
    5349  uint32_t frameInUseCount,
    5350  bool isCustomPool,
    5351  bool explicitBlockSize,
    5352  uint32_t algorithm);
    5353  ~VmaBlockVector();
    5354 
    5355  VkResult CreateMinBlocks();
    5356 
    5357  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5358  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5359  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5360  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5361  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5362 
    5363  void GetPoolStats(VmaPoolStats* pStats);
    5364 
    5365  bool IsEmpty() const { return m_Blocks.empty(); }
    5366  bool IsCorruptionDetectionEnabled() const;
    5367 
    5368  VkResult Allocate(
    5369  VmaPool hCurrentPool,
    5370  uint32_t currentFrameIndex,
    5371  VkDeviceSize size,
    5372  VkDeviceSize alignment,
    5373  const VmaAllocationCreateInfo& createInfo,
    5374  VmaSuballocationType suballocType,
    5375  VmaAllocation* pAllocation);
    5376 
    5377  void Free(
    5378  VmaAllocation hAllocation);
    5379 
    5380  // Adds statistics of this BlockVector to pStats.
    5381  void AddStats(VmaStats* pStats);
    5382 
    5383 #if VMA_STATS_STRING_ENABLED
    5384  void PrintDetailedMap(class VmaJsonWriter& json);
    5385 #endif
    5386 
    5387  void MakePoolAllocationsLost(
    5388  uint32_t currentFrameIndex,
    5389  size_t* pLostAllocationCount);
    5390  VkResult CheckCorruption();
    5391 
    5392  VmaDefragmentator* EnsureDefragmentator(
    5393  VmaAllocator hAllocator,
    5394  uint32_t currentFrameIndex);
    5395 
    5396  VkResult Defragment(
    5397  VmaDefragmentationStats* pDefragmentationStats,
    5398  VkDeviceSize& maxBytesToMove,
    5399  uint32_t& maxAllocationsToMove);
    5400 
    5401  void DestroyDefragmentator();
    5402 
    5403 private:
    5404  friend class VmaDefragmentator;
    5405 
    5406  const VmaAllocator m_hAllocator;
    5407  const uint32_t m_MemoryTypeIndex;
    5408  const VkDeviceSize m_PreferredBlockSize;
    5409  const size_t m_MinBlockCount;
    5410  const size_t m_MaxBlockCount;
    5411  const VkDeviceSize m_BufferImageGranularity;
    5412  const uint32_t m_FrameInUseCount;
    5413  const bool m_IsCustomPool;
    5414  const bool m_ExplicitBlockSize;
    5415  const uint32_t m_Algorithm;
    5416  bool m_HasEmptyBlock;
    5417  VMA_MUTEX m_Mutex;
    5418  // Incrementally sorted by sumFreeSize, ascending.
    5419  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5420  /* There can be at most one allocation that is completely empty - a
    5421  hysteresis to avoid pessimistic case of alternating creation and destruction
    5422  of a VkDeviceMemory. */
    5423  VmaDefragmentator* m_pDefragmentator;
    5424  uint32_t m_NextBlockId;
    5425 
    5426  VkDeviceSize CalcMaxBlockSize() const;
    5427 
    5428  // Finds and removes given block from vector.
    5429  void Remove(VmaDeviceMemoryBlock* pBlock);
    5430 
    5431  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5432  // after this call.
    5433  void IncrementallySortBlocks();
    5434 
    5435  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5436  VkResult AllocateFromBlock(
    5437  VmaDeviceMemoryBlock* pBlock,
    5438  VmaPool hCurrentPool,
    5439  uint32_t currentFrameIndex,
    5440  VkDeviceSize size,
    5441  VkDeviceSize alignment,
    5442  VmaAllocationCreateFlags allocFlags,
    5443  void* pUserData,
    5444  VmaSuballocationType suballocType,
    5445  uint32_t strategy,
    5446  VmaAllocation* pAllocation);
    5447 
    5448  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5449 };
    5450 
    5451 struct VmaPool_T
    5452 {
    5453  VMA_CLASS_NO_COPY(VmaPool_T)
    5454 public:
    5455  VmaBlockVector m_BlockVector;
    5456 
    5457  VmaPool_T(
    5458  VmaAllocator hAllocator,
    5459  const VmaPoolCreateInfo& createInfo,
    5460  VkDeviceSize preferredBlockSize);
    5461  ~VmaPool_T();
    5462 
    5463  uint32_t GetId() const { return m_Id; }
    5464  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5465 
    5466 #if VMA_STATS_STRING_ENABLED
    5467  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5468 #endif
    5469 
    5470 private:
    5471  uint32_t m_Id;
    5472 };
    5473 
    5474 class VmaDefragmentator
    5475 {
    5476  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5477 private:
    5478  const VmaAllocator m_hAllocator;
    5479  VmaBlockVector* const m_pBlockVector;
    5480  uint32_t m_CurrentFrameIndex;
    5481  VkDeviceSize m_BytesMoved;
    5482  uint32_t m_AllocationsMoved;
    5483 
    5484  struct AllocationInfo
    5485  {
    5486  VmaAllocation m_hAllocation;
    5487  VkBool32* m_pChanged;
    5488 
    5489  AllocationInfo() :
    5490  m_hAllocation(VK_NULL_HANDLE),
    5491  m_pChanged(VMA_NULL)
    5492  {
    5493  }
    5494  };
    5495 
    5496  struct AllocationInfoSizeGreater
    5497  {
    5498  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5499  {
    5500  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5501  }
    5502  };
    5503 
    5504  // Used between AddAllocation and Defragment.
    5505  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5506 
    5507  struct BlockInfo
    5508  {
    5509  VmaDeviceMemoryBlock* m_pBlock;
    5510  bool m_HasNonMovableAllocations;
    5511  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5512 
    5513  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5514  m_pBlock(VMA_NULL),
    5515  m_HasNonMovableAllocations(true),
    5516  m_Allocations(pAllocationCallbacks),
    5517  m_pMappedDataForDefragmentation(VMA_NULL)
    5518  {
    5519  }
    5520 
    5521  void CalcHasNonMovableAllocations()
    5522  {
    5523  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5524  const size_t defragmentAllocCount = m_Allocations.size();
    5525  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5526  }
    5527 
    5528  void SortAllocationsBySizeDescecnding()
    5529  {
    5530  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5531  }
    5532 
    5533  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5534  void Unmap(VmaAllocator hAllocator);
    5535 
    5536  private:
    5537  // Not null if mapped for defragmentation only, not originally mapped.
    5538  void* m_pMappedDataForDefragmentation;
    5539  };
    5540 
    5541  struct BlockPointerLess
    5542  {
    5543  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5544  {
    5545  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5546  }
    5547  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5548  {
    5549  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5550  }
    5551  };
    5552 
    5553  // 1. Blocks with some non-movable allocations go first.
    5554  // 2. Blocks with smaller sumFreeSize go first.
    5555  struct BlockInfoCompareMoveDestination
    5556  {
    5557  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5558  {
    5559  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5560  {
    5561  return true;
    5562  }
    5563  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5564  {
    5565  return false;
    5566  }
    5567  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5568  {
    5569  return true;
    5570  }
    5571  return false;
    5572  }
    5573  };
    5574 
    5575  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5576  BlockInfoVector m_Blocks;
    5577 
    5578  VkResult DefragmentRound(
    5579  VkDeviceSize maxBytesToMove,
    5580  uint32_t maxAllocationsToMove);
    5581 
    5582  static bool MoveMakesSense(
    5583  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5584  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5585 
    5586 public:
    5587  VmaDefragmentator(
    5588  VmaAllocator hAllocator,
    5589  VmaBlockVector* pBlockVector,
    5590  uint32_t currentFrameIndex);
    5591 
    5592  ~VmaDefragmentator();
    5593 
    5594  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5595  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5596 
    5597  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5598 
    5599  VkResult Defragment(
    5600  VkDeviceSize maxBytesToMove,
    5601  uint32_t maxAllocationsToMove);
    5602 };
    5603 
    5604 #if VMA_RECORDING_ENABLED
    5605 
    5606 class VmaRecorder
    5607 {
    5608 public:
    5609  VmaRecorder();
    5610  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5611  void WriteConfiguration(
    5612  const VkPhysicalDeviceProperties& devProps,
    5613  const VkPhysicalDeviceMemoryProperties& memProps,
    5614  bool dedicatedAllocationExtensionEnabled);
    5615  ~VmaRecorder();
    5616 
    5617  void RecordCreateAllocator(uint32_t frameIndex);
    5618  void RecordDestroyAllocator(uint32_t frameIndex);
    5619  void RecordCreatePool(uint32_t frameIndex,
    5620  const VmaPoolCreateInfo& createInfo,
    5621  VmaPool pool);
    5622  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5623  void RecordAllocateMemory(uint32_t frameIndex,
    5624  const VkMemoryRequirements& vkMemReq,
    5625  const VmaAllocationCreateInfo& createInfo,
    5626  VmaAllocation allocation);
    5627  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5628  const VkMemoryRequirements& vkMemReq,
    5629  bool requiresDedicatedAllocation,
    5630  bool prefersDedicatedAllocation,
    5631  const VmaAllocationCreateInfo& createInfo,
    5632  VmaAllocation allocation);
    5633  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5634  const VkMemoryRequirements& vkMemReq,
    5635  bool requiresDedicatedAllocation,
    5636  bool prefersDedicatedAllocation,
    5637  const VmaAllocationCreateInfo& createInfo,
    5638  VmaAllocation allocation);
    5639  void RecordFreeMemory(uint32_t frameIndex,
    5640  VmaAllocation allocation);
    5641  void RecordResizeAllocation(
    5642  uint32_t frameIndex,
    5643  VmaAllocation allocation,
    5644  VkDeviceSize newSize);
    5645  void RecordSetAllocationUserData(uint32_t frameIndex,
    5646  VmaAllocation allocation,
    5647  const void* pUserData);
    5648  void RecordCreateLostAllocation(uint32_t frameIndex,
    5649  VmaAllocation allocation);
    5650  void RecordMapMemory(uint32_t frameIndex,
    5651  VmaAllocation allocation);
    5652  void RecordUnmapMemory(uint32_t frameIndex,
    5653  VmaAllocation allocation);
    5654  void RecordFlushAllocation(uint32_t frameIndex,
    5655  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5656  void RecordInvalidateAllocation(uint32_t frameIndex,
    5657  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5658  void RecordCreateBuffer(uint32_t frameIndex,
    5659  const VkBufferCreateInfo& bufCreateInfo,
    5660  const VmaAllocationCreateInfo& allocCreateInfo,
    5661  VmaAllocation allocation);
    5662  void RecordCreateImage(uint32_t frameIndex,
    5663  const VkImageCreateInfo& imageCreateInfo,
    5664  const VmaAllocationCreateInfo& allocCreateInfo,
    5665  VmaAllocation allocation);
    5666  void RecordDestroyBuffer(uint32_t frameIndex,
    5667  VmaAllocation allocation);
    5668  void RecordDestroyImage(uint32_t frameIndex,
    5669  VmaAllocation allocation);
    5670  void RecordTouchAllocation(uint32_t frameIndex,
    5671  VmaAllocation allocation);
    5672  void RecordGetAllocationInfo(uint32_t frameIndex,
    5673  VmaAllocation allocation);
    5674  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5675  VmaPool pool);
    5676 
    5677 private:
    5678  struct CallParams
    5679  {
    5680  uint32_t threadId;
    5681  double time;
    5682  };
    5683 
    5684  class UserDataString
    5685  {
    5686  public:
    5687  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5688  const char* GetString() const { return m_Str; }
    5689 
    5690  private:
    5691  char m_PtrStr[17];
    5692  const char* m_Str;
    5693  };
    5694 
    5695  bool m_UseMutex;
    5696  VmaRecordFlags m_Flags;
    5697  FILE* m_File;
    5698  VMA_MUTEX m_FileMutex;
    5699  int64_t m_Freq;
    5700  int64_t m_StartCounter;
    5701 
    5702  void GetBasicParams(CallParams& outParams);
    5703  void Flush();
    5704 };
    5705 
    5706 #endif // #if VMA_RECORDING_ENABLED
    5707 
    5708 // Main allocator object.
    5709 struct VmaAllocator_T
    5710 {
    5711  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5712 public:
    5713  bool m_UseMutex;
    5714  bool m_UseKhrDedicatedAllocation;
    5715  VkDevice m_hDevice;
    5716  bool m_AllocationCallbacksSpecified;
    5717  VkAllocationCallbacks m_AllocationCallbacks;
    5718  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5719 
    5720  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5721  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5722  VMA_MUTEX m_HeapSizeLimitMutex;
    5723 
    5724  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5725  VkPhysicalDeviceMemoryProperties m_MemProps;
    5726 
    5727  // Default pools.
    5728  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5729 
    5730  // Each vector is sorted by memory (handle value).
    5731  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5732  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5733  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5734 
    5735  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5736  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5737  ~VmaAllocator_T();
    5738 
    5739  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5740  {
    5741  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5742  }
    5743  const VmaVulkanFunctions& GetVulkanFunctions() const
    5744  {
    5745  return m_VulkanFunctions;
    5746  }
    5747 
    5748  VkDeviceSize GetBufferImageGranularity() const
    5749  {
    5750  return VMA_MAX(
    5751  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5752  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5753  }
    5754 
    5755  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5756  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5757 
    5758  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5759  {
    5760  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5761  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5762  }
    5763  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5764  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5765  {
    5766  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5767  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5768  }
    5769  // Minimum alignment for all allocations in specific memory type.
    5770  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5771  {
    5772  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5773  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5774  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5775  }
    5776 
    5777  bool IsIntegratedGpu() const
    5778  {
    5779  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5780  }
    5781 
    5782 #if VMA_RECORDING_ENABLED
    5783  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5784 #endif
    5785 
    5786  void GetBufferMemoryRequirements(
    5787  VkBuffer hBuffer,
    5788  VkMemoryRequirements& memReq,
    5789  bool& requiresDedicatedAllocation,
    5790  bool& prefersDedicatedAllocation) const;
    5791  void GetImageMemoryRequirements(
    5792  VkImage hImage,
    5793  VkMemoryRequirements& memReq,
    5794  bool& requiresDedicatedAllocation,
    5795  bool& prefersDedicatedAllocation) const;
    5796 
    5797  // Main allocation function.
    5798  VkResult AllocateMemory(
    5799  const VkMemoryRequirements& vkMemReq,
    5800  bool requiresDedicatedAllocation,
    5801  bool prefersDedicatedAllocation,
    5802  VkBuffer dedicatedBuffer,
    5803  VkImage dedicatedImage,
    5804  const VmaAllocationCreateInfo& createInfo,
    5805  VmaSuballocationType suballocType,
    5806  VmaAllocation* pAllocation);
    5807 
    5808  // Main deallocation function.
    5809  void FreeMemory(const VmaAllocation allocation);
    5810 
    5811  VkResult ResizeAllocation(
    5812  const VmaAllocation alloc,
    5813  VkDeviceSize newSize);
    5814 
    5815  void CalculateStats(VmaStats* pStats);
    5816 
    5817 #if VMA_STATS_STRING_ENABLED
    5818  void PrintDetailedMap(class VmaJsonWriter& json);
    5819 #endif
    5820 
    5821  VkResult Defragment(
    5822  VmaAllocation* pAllocations,
    5823  size_t allocationCount,
    5824  VkBool32* pAllocationsChanged,
    5825  const VmaDefragmentationInfo* pDefragmentationInfo,
    5826  VmaDefragmentationStats* pDefragmentationStats);
    5827 
    5828  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5829  bool TouchAllocation(VmaAllocation hAllocation);
    5830 
    5831  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5832  void DestroyPool(VmaPool pool);
    5833  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5834 
    5835  void SetCurrentFrameIndex(uint32_t frameIndex);
    5836  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5837 
    5838  void MakePoolAllocationsLost(
    5839  VmaPool hPool,
    5840  size_t* pLostAllocationCount);
    5841  VkResult CheckPoolCorruption(VmaPool hPool);
    5842  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5843 
    5844  void CreateLostAllocation(VmaAllocation* pAllocation);
    5845 
    5846  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5847  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5848 
    5849  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5850  void Unmap(VmaAllocation hAllocation);
    5851 
    5852  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5853  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5854 
    5855  void FlushOrInvalidateAllocation(
    5856  VmaAllocation hAllocation,
    5857  VkDeviceSize offset, VkDeviceSize size,
    5858  VMA_CACHE_OPERATION op);
    5859 
    5860  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5861 
    5862 private:
    5863  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5864 
    5865  VkPhysicalDevice m_PhysicalDevice;
    5866  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5867 
    5868  VMA_MUTEX m_PoolsMutex;
    5869  // Protected by m_PoolsMutex. Sorted by pointer value.
    5870  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5871  uint32_t m_NextPoolId;
    5872 
    5873  VmaVulkanFunctions m_VulkanFunctions;
    5874 
    5875 #if VMA_RECORDING_ENABLED
    5876  VmaRecorder* m_pRecorder;
    5877 #endif
    5878 
    5879  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5880 
    5881  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5882 
    5883  VkResult AllocateMemoryOfType(
    5884  VkDeviceSize size,
    5885  VkDeviceSize alignment,
    5886  bool dedicatedAllocation,
    5887  VkBuffer dedicatedBuffer,
    5888  VkImage dedicatedImage,
    5889  const VmaAllocationCreateInfo& createInfo,
    5890  uint32_t memTypeIndex,
    5891  VmaSuballocationType suballocType,
    5892  VmaAllocation* pAllocation);
    5893 
    5894  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5895  VkResult AllocateDedicatedMemory(
    5896  VkDeviceSize size,
    5897  VmaSuballocationType suballocType,
    5898  uint32_t memTypeIndex,
    5899  bool map,
    5900  bool isUserDataString,
    5901  void* pUserData,
    5902  VkBuffer dedicatedBuffer,
    5903  VkImage dedicatedImage,
    5904  VmaAllocation* pAllocation);
    5905 
    5906  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5907  void FreeDedicatedMemory(VmaAllocation allocation);
    5908 };
    5909 
    5911 // Memory allocation #2 after VmaAllocator_T definition
    5912 
    5913 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5914 {
    5915  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5916 }
    5917 
    5918 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5919 {
    5920  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5921 }
    5922 
    5923 template<typename T>
    5924 static T* VmaAllocate(VmaAllocator hAllocator)
    5925 {
    5926  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5927 }
    5928 
    5929 template<typename T>
    5930 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5931 {
    5932  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5933 }
    5934 
    5935 template<typename T>
    5936 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5937 {
    5938  if(ptr != VMA_NULL)
    5939  {
    5940  ptr->~T();
    5941  VmaFree(hAllocator, ptr);
    5942  }
    5943 }
    5944 
    5945 template<typename T>
    5946 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5947 {
    5948  if(ptr != VMA_NULL)
    5949  {
    5950  for(size_t i = count; i--; )
    5951  ptr[i].~T();
    5952  VmaFree(hAllocator, ptr);
    5953  }
    5954 }
    5955 
    5957 // VmaStringBuilder
    5958 
    5959 #if VMA_STATS_STRING_ENABLED
    5960 
    5961 class VmaStringBuilder
    5962 {
    5963 public:
    5964  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5965  size_t GetLength() const { return m_Data.size(); }
    5966  const char* GetData() const { return m_Data.data(); }
    5967 
    5968  void Add(char ch) { m_Data.push_back(ch); }
    5969  void Add(const char* pStr);
    5970  void AddNewLine() { Add('\n'); }
    5971  void AddNumber(uint32_t num);
    5972  void AddNumber(uint64_t num);
    5973  void AddPointer(const void* ptr);
    5974 
    5975 private:
    5976  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5977 };
    5978 
    5979 void VmaStringBuilder::Add(const char* pStr)
    5980 {
    5981  const size_t strLen = strlen(pStr);
    5982  if(strLen > 0)
    5983  {
    5984  const size_t oldCount = m_Data.size();
    5985  m_Data.resize(oldCount + strLen);
    5986  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5987  }
    5988 }
    5989 
    5990 void VmaStringBuilder::AddNumber(uint32_t num)
    5991 {
    5992  char buf[11];
    5993  VmaUint32ToStr(buf, sizeof(buf), num);
    5994  Add(buf);
    5995 }
    5996 
    5997 void VmaStringBuilder::AddNumber(uint64_t num)
    5998 {
    5999  char buf[21];
    6000  VmaUint64ToStr(buf, sizeof(buf), num);
    6001  Add(buf);
    6002 }
    6003 
    6004 void VmaStringBuilder::AddPointer(const void* ptr)
    6005 {
    6006  char buf[21];
    6007  VmaPtrToStr(buf, sizeof(buf), ptr);
    6008  Add(buf);
    6009 }
    6010 
    6011 #endif // #if VMA_STATS_STRING_ENABLED
    6012 
    6014 // VmaJsonWriter
    6015 
    6016 #if VMA_STATS_STRING_ENABLED
    6017 
    6018 class VmaJsonWriter
    6019 {
    6020  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6021 public:
    6022  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6023  ~VmaJsonWriter();
    6024 
    6025  void BeginObject(bool singleLine = false);
    6026  void EndObject();
    6027 
    6028  void BeginArray(bool singleLine = false);
    6029  void EndArray();
    6030 
    6031  void WriteString(const char* pStr);
    6032  void BeginString(const char* pStr = VMA_NULL);
    6033  void ContinueString(const char* pStr);
    6034  void ContinueString(uint32_t n);
    6035  void ContinueString(uint64_t n);
    6036  void ContinueString_Pointer(const void* ptr);
    6037  void EndString(const char* pStr = VMA_NULL);
    6038 
    6039  void WriteNumber(uint32_t n);
    6040  void WriteNumber(uint64_t n);
    6041  void WriteBool(bool b);
    6042  void WriteNull();
    6043 
    6044 private:
    6045  static const char* const INDENT;
    6046 
    6047  enum COLLECTION_TYPE
    6048  {
    6049  COLLECTION_TYPE_OBJECT,
    6050  COLLECTION_TYPE_ARRAY,
    6051  };
    6052  struct StackItem
    6053  {
    6054  COLLECTION_TYPE type;
    6055  uint32_t valueCount;
    6056  bool singleLineMode;
    6057  };
    6058 
    6059  VmaStringBuilder& m_SB;
    6060  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6061  bool m_InsideString;
    6062 
    6063  void BeginValue(bool isString);
    6064  void WriteIndent(bool oneLess = false);
    6065 };
    6066 
    6067 const char* const VmaJsonWriter::INDENT = " ";
    6068 
    6069 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6070  m_SB(sb),
    6071  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6072  m_InsideString(false)
    6073 {
    6074 }
    6075 
    6076 VmaJsonWriter::~VmaJsonWriter()
    6077 {
    6078  VMA_ASSERT(!m_InsideString);
    6079  VMA_ASSERT(m_Stack.empty());
    6080 }
    6081 
    6082 void VmaJsonWriter::BeginObject(bool singleLine)
    6083 {
    6084  VMA_ASSERT(!m_InsideString);
    6085 
    6086  BeginValue(false);
    6087  m_SB.Add('{');
    6088 
    6089  StackItem item;
    6090  item.type = COLLECTION_TYPE_OBJECT;
    6091  item.valueCount = 0;
    6092  item.singleLineMode = singleLine;
    6093  m_Stack.push_back(item);
    6094 }
    6095 
    6096 void VmaJsonWriter::EndObject()
    6097 {
    6098  VMA_ASSERT(!m_InsideString);
    6099 
    6100  WriteIndent(true);
    6101  m_SB.Add('}');
    6102 
    6103  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6104  m_Stack.pop_back();
    6105 }
    6106 
    6107 void VmaJsonWriter::BeginArray(bool singleLine)
    6108 {
    6109  VMA_ASSERT(!m_InsideString);
    6110 
    6111  BeginValue(false);
    6112  m_SB.Add('[');
    6113 
    6114  StackItem item;
    6115  item.type = COLLECTION_TYPE_ARRAY;
    6116  item.valueCount = 0;
    6117  item.singleLineMode = singleLine;
    6118  m_Stack.push_back(item);
    6119 }
    6120 
    6121 void VmaJsonWriter::EndArray()
    6122 {
    6123  VMA_ASSERT(!m_InsideString);
    6124 
    6125  WriteIndent(true);
    6126  m_SB.Add(']');
    6127 
    6128  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6129  m_Stack.pop_back();
    6130 }
    6131 
    6132 void VmaJsonWriter::WriteString(const char* pStr)
    6133 {
    6134  BeginString(pStr);
    6135  EndString();
    6136 }
    6137 
    6138 void VmaJsonWriter::BeginString(const char* pStr)
    6139 {
    6140  VMA_ASSERT(!m_InsideString);
    6141 
    6142  BeginValue(true);
    6143  m_SB.Add('"');
    6144  m_InsideString = true;
    6145  if(pStr != VMA_NULL && pStr[0] != '\0')
    6146  {
    6147  ContinueString(pStr);
    6148  }
    6149 }
    6150 
    6151 void VmaJsonWriter::ContinueString(const char* pStr)
    6152 {
    6153  VMA_ASSERT(m_InsideString);
    6154 
    6155  const size_t strLen = strlen(pStr);
    6156  for(size_t i = 0; i < strLen; ++i)
    6157  {
    6158  char ch = pStr[i];
    6159  if(ch == '\\')
    6160  {
    6161  m_SB.Add("\\\\");
    6162  }
    6163  else if(ch == '"')
    6164  {
    6165  m_SB.Add("\\\"");
    6166  }
    6167  else if(ch >= 32)
    6168  {
    6169  m_SB.Add(ch);
    6170  }
    6171  else switch(ch)
    6172  {
    6173  case '\b':
    6174  m_SB.Add("\\b");
    6175  break;
    6176  case '\f':
    6177  m_SB.Add("\\f");
    6178  break;
    6179  case '\n':
    6180  m_SB.Add("\\n");
    6181  break;
    6182  case '\r':
    6183  m_SB.Add("\\r");
    6184  break;
    6185  case '\t':
    6186  m_SB.Add("\\t");
    6187  break;
    6188  default:
    6189  VMA_ASSERT(0 && "Character not currently supported.");
    6190  break;
    6191  }
    6192  }
    6193 }
    6194 
    6195 void VmaJsonWriter::ContinueString(uint32_t n)
    6196 {
    6197  VMA_ASSERT(m_InsideString);
    6198  m_SB.AddNumber(n);
    6199 }
    6200 
    6201 void VmaJsonWriter::ContinueString(uint64_t n)
    6202 {
    6203  VMA_ASSERT(m_InsideString);
    6204  m_SB.AddNumber(n);
    6205 }
    6206 
    6207 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6208 {
    6209  VMA_ASSERT(m_InsideString);
    6210  m_SB.AddPointer(ptr);
    6211 }
    6212 
    6213 void VmaJsonWriter::EndString(const char* pStr)
    6214 {
    6215  VMA_ASSERT(m_InsideString);
    6216  if(pStr != VMA_NULL && pStr[0] != '\0')
    6217  {
    6218  ContinueString(pStr);
    6219  }
    6220  m_SB.Add('"');
    6221  m_InsideString = false;
    6222 }
    6223 
    6224 void VmaJsonWriter::WriteNumber(uint32_t n)
    6225 {
    6226  VMA_ASSERT(!m_InsideString);
    6227  BeginValue(false);
    6228  m_SB.AddNumber(n);
    6229 }
    6230 
    6231 void VmaJsonWriter::WriteNumber(uint64_t n)
    6232 {
    6233  VMA_ASSERT(!m_InsideString);
    6234  BeginValue(false);
    6235  m_SB.AddNumber(n);
    6236 }
    6237 
    6238 void VmaJsonWriter::WriteBool(bool b)
    6239 {
    6240  VMA_ASSERT(!m_InsideString);
    6241  BeginValue(false);
    6242  m_SB.Add(b ? "true" : "false");
    6243 }
    6244 
    6245 void VmaJsonWriter::WriteNull()
    6246 {
    6247  VMA_ASSERT(!m_InsideString);
    6248  BeginValue(false);
    6249  m_SB.Add("null");
    6250 }
    6251 
    6252 void VmaJsonWriter::BeginValue(bool isString)
    6253 {
    6254  if(!m_Stack.empty())
    6255  {
    6256  StackItem& currItem = m_Stack.back();
    6257  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6258  currItem.valueCount % 2 == 0)
    6259  {
    6260  VMA_ASSERT(isString);
    6261  }
    6262 
    6263  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6264  currItem.valueCount % 2 != 0)
    6265  {
    6266  m_SB.Add(": ");
    6267  }
    6268  else if(currItem.valueCount > 0)
    6269  {
    6270  m_SB.Add(", ");
    6271  WriteIndent();
    6272  }
    6273  else
    6274  {
    6275  WriteIndent();
    6276  }
    6277  ++currItem.valueCount;
    6278  }
    6279 }
    6280 
    6281 void VmaJsonWriter::WriteIndent(bool oneLess)
    6282 {
    6283  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6284  {
    6285  m_SB.AddNewLine();
    6286 
    6287  size_t count = m_Stack.size();
    6288  if(count > 0 && oneLess)
    6289  {
    6290  --count;
    6291  }
    6292  for(size_t i = 0; i < count; ++i)
    6293  {
    6294  m_SB.Add(INDENT);
    6295  }
    6296  }
    6297 }
    6298 
    6299 #endif // #if VMA_STATS_STRING_ENABLED
    6300 
    6302 
    6303 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6304 {
    6305  if(IsUserDataString())
    6306  {
    6307  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6308 
    6309  FreeUserDataString(hAllocator);
    6310 
    6311  if(pUserData != VMA_NULL)
    6312  {
    6313  const char* const newStrSrc = (char*)pUserData;
    6314  const size_t newStrLen = strlen(newStrSrc);
    6315  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6316  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6317  m_pUserData = newStrDst;
    6318  }
    6319  }
    6320  else
    6321  {
    6322  m_pUserData = pUserData;
    6323  }
    6324 }
    6325 
    6326 void VmaAllocation_T::ChangeBlockAllocation(
    6327  VmaAllocator hAllocator,
    6328  VmaDeviceMemoryBlock* block,
    6329  VkDeviceSize offset)
    6330 {
    6331  VMA_ASSERT(block != VMA_NULL);
    6332  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6333 
    6334  // Move mapping reference counter from old block to new block.
    6335  if(block != m_BlockAllocation.m_Block)
    6336  {
    6337  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6338  if(IsPersistentMap())
    6339  ++mapRefCount;
    6340  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6341  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6342  }
    6343 
    6344  m_BlockAllocation.m_Block = block;
    6345  m_BlockAllocation.m_Offset = offset;
    6346 }
    6347 
    6348 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6349 {
    6350  VMA_ASSERT(newSize > 0);
    6351  m_Size = newSize;
    6352 }
    6353 
    6354 VkDeviceSize VmaAllocation_T::GetOffset() const
    6355 {
    6356  switch(m_Type)
    6357  {
    6358  case ALLOCATION_TYPE_BLOCK:
    6359  return m_BlockAllocation.m_Offset;
    6360  case ALLOCATION_TYPE_DEDICATED:
    6361  return 0;
    6362  default:
    6363  VMA_ASSERT(0);
    6364  return 0;
    6365  }
    6366 }
    6367 
    6368 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6369 {
    6370  switch(m_Type)
    6371  {
    6372  case ALLOCATION_TYPE_BLOCK:
    6373  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6374  case ALLOCATION_TYPE_DEDICATED:
    6375  return m_DedicatedAllocation.m_hMemory;
    6376  default:
    6377  VMA_ASSERT(0);
    6378  return VK_NULL_HANDLE;
    6379  }
    6380 }
    6381 
    6382 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6383 {
    6384  switch(m_Type)
    6385  {
    6386  case ALLOCATION_TYPE_BLOCK:
    6387  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6388  case ALLOCATION_TYPE_DEDICATED:
    6389  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6390  default:
    6391  VMA_ASSERT(0);
    6392  return UINT32_MAX;
    6393  }
    6394 }
    6395 
    6396 void* VmaAllocation_T::GetMappedData() const
    6397 {
    6398  switch(m_Type)
    6399  {
    6400  case ALLOCATION_TYPE_BLOCK:
    6401  if(m_MapCount != 0)
    6402  {
    6403  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6404  VMA_ASSERT(pBlockData != VMA_NULL);
    6405  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6406  }
    6407  else
    6408  {
    6409  return VMA_NULL;
    6410  }
    6411  break;
    6412  case ALLOCATION_TYPE_DEDICATED:
    6413  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6414  return m_DedicatedAllocation.m_pMappedData;
    6415  default:
    6416  VMA_ASSERT(0);
    6417  return VMA_NULL;
    6418  }
    6419 }
    6420 
    6421 bool VmaAllocation_T::CanBecomeLost() const
    6422 {
    6423  switch(m_Type)
    6424  {
    6425  case ALLOCATION_TYPE_BLOCK:
    6426  return m_BlockAllocation.m_CanBecomeLost;
    6427  case ALLOCATION_TYPE_DEDICATED:
    6428  return false;
    6429  default:
    6430  VMA_ASSERT(0);
    6431  return false;
    6432  }
    6433 }
    6434 
    6435 VmaPool VmaAllocation_T::GetPool() const
    6436 {
    6437  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6438  return m_BlockAllocation.m_hPool;
    6439 }
    6440 
    6441 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6442 {
    6443  VMA_ASSERT(CanBecomeLost());
    6444 
    6445  /*
    6446  Warning: This is a carefully designed algorithm.
    6447  Do not modify unless you really know what you're doing :)
    6448  */
    6449  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6450  for(;;)
    6451  {
    6452  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6453  {
    6454  VMA_ASSERT(0);
    6455  return false;
    6456  }
    6457  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6458  {
    6459  return false;
    6460  }
    6461  else // Last use time earlier than current time.
    6462  {
    6463  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6464  {
    6465  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6466  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6467  return true;
    6468  }
    6469  }
    6470  }
    6471 }
    6472 
    6473 #if VMA_STATS_STRING_ENABLED
    6474 
    6475 // Correspond to values of enum VmaSuballocationType.
    6476 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6477  "FREE",
    6478  "UNKNOWN",
    6479  "BUFFER",
    6480  "IMAGE_UNKNOWN",
    6481  "IMAGE_LINEAR",
    6482  "IMAGE_OPTIMAL",
    6483 };
    6484 
    6485 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6486 {
    6487  json.WriteString("Type");
    6488  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6489 
    6490  json.WriteString("Size");
    6491  json.WriteNumber(m_Size);
    6492 
    6493  if(m_pUserData != VMA_NULL)
    6494  {
    6495  json.WriteString("UserData");
    6496  if(IsUserDataString())
    6497  {
    6498  json.WriteString((const char*)m_pUserData);
    6499  }
    6500  else
    6501  {
    6502  json.BeginString();
    6503  json.ContinueString_Pointer(m_pUserData);
    6504  json.EndString();
    6505  }
    6506  }
    6507 
    6508  json.WriteString("CreationFrameIndex");
    6509  json.WriteNumber(m_CreationFrameIndex);
    6510 
    6511  json.WriteString("LastUseFrameIndex");
    6512  json.WriteNumber(GetLastUseFrameIndex());
    6513 
    6514  if(m_BufferImageUsage != 0)
    6515  {
    6516  json.WriteString("Usage");
    6517  json.WriteNumber(m_BufferImageUsage);
    6518  }
    6519 }
    6520 
    6521 #endif
    6522 
    6523 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6524 {
    6525  VMA_ASSERT(IsUserDataString());
    6526  if(m_pUserData != VMA_NULL)
    6527  {
    6528  char* const oldStr = (char*)m_pUserData;
    6529  const size_t oldStrLen = strlen(oldStr);
    6530  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6531  m_pUserData = VMA_NULL;
    6532  }
    6533 }
    6534 
    6535 void VmaAllocation_T::BlockAllocMap()
    6536 {
    6537  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6538 
    6539  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6540  {
    6541  ++m_MapCount;
    6542  }
    6543  else
    6544  {
    6545  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6546  }
    6547 }
    6548 
    6549 void VmaAllocation_T::BlockAllocUnmap()
    6550 {
    6551  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6552 
    6553  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6554  {
    6555  --m_MapCount;
    6556  }
    6557  else
    6558  {
    6559  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6560  }
    6561 }
    6562 
    6563 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6564 {
    6565  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6566 
    6567  if(m_MapCount != 0)
    6568  {
    6569  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6570  {
    6571  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6572  *ppData = m_DedicatedAllocation.m_pMappedData;
    6573  ++m_MapCount;
    6574  return VK_SUCCESS;
    6575  }
    6576  else
    6577  {
    6578  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6579  return VK_ERROR_MEMORY_MAP_FAILED;
    6580  }
    6581  }
    6582  else
    6583  {
    6584  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6585  hAllocator->m_hDevice,
    6586  m_DedicatedAllocation.m_hMemory,
    6587  0, // offset
    6588  VK_WHOLE_SIZE,
    6589  0, // flags
    6590  ppData);
    6591  if(result == VK_SUCCESS)
    6592  {
    6593  m_DedicatedAllocation.m_pMappedData = *ppData;
    6594  m_MapCount = 1;
    6595  }
    6596  return result;
    6597  }
    6598 }
    6599 
    6600 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6601 {
    6602  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6603 
    6604  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6605  {
    6606  --m_MapCount;
    6607  if(m_MapCount == 0)
    6608  {
    6609  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6610  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6611  hAllocator->m_hDevice,
    6612  m_DedicatedAllocation.m_hMemory);
    6613  }
    6614  }
    6615  else
    6616  {
    6617  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6618  }
    6619 }
    6620 
    6621 #if VMA_STATS_STRING_ENABLED
    6622 
    6623 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6624 {
    6625  json.BeginObject();
    6626 
    6627  json.WriteString("Blocks");
    6628  json.WriteNumber(stat.blockCount);
    6629 
    6630  json.WriteString("Allocations");
    6631  json.WriteNumber(stat.allocationCount);
    6632 
    6633  json.WriteString("UnusedRanges");
    6634  json.WriteNumber(stat.unusedRangeCount);
    6635 
    6636  json.WriteString("UsedBytes");
    6637  json.WriteNumber(stat.usedBytes);
    6638 
    6639  json.WriteString("UnusedBytes");
    6640  json.WriteNumber(stat.unusedBytes);
    6641 
    6642  if(stat.allocationCount > 1)
    6643  {
    6644  json.WriteString("AllocationSize");
    6645  json.BeginObject(true);
    6646  json.WriteString("Min");
    6647  json.WriteNumber(stat.allocationSizeMin);
    6648  json.WriteString("Avg");
    6649  json.WriteNumber(stat.allocationSizeAvg);
    6650  json.WriteString("Max");
    6651  json.WriteNumber(stat.allocationSizeMax);
    6652  json.EndObject();
    6653  }
    6654 
    6655  if(stat.unusedRangeCount > 1)
    6656  {
    6657  json.WriteString("UnusedRangeSize");
    6658  json.BeginObject(true);
    6659  json.WriteString("Min");
    6660  json.WriteNumber(stat.unusedRangeSizeMin);
    6661  json.WriteString("Avg");
    6662  json.WriteNumber(stat.unusedRangeSizeAvg);
    6663  json.WriteString("Max");
    6664  json.WriteNumber(stat.unusedRangeSizeMax);
    6665  json.EndObject();
    6666  }
    6667 
    6668  json.EndObject();
    6669 }
    6670 
    6671 #endif // #if VMA_STATS_STRING_ENABLED
    6672 
    6673 struct VmaSuballocationItemSizeLess
    6674 {
    6675  bool operator()(
    6676  const VmaSuballocationList::iterator lhs,
    6677  const VmaSuballocationList::iterator rhs) const
    6678  {
    6679  return lhs->size < rhs->size;
    6680  }
    6681  bool operator()(
    6682  const VmaSuballocationList::iterator lhs,
    6683  VkDeviceSize rhsSize) const
    6684  {
    6685  return lhs->size < rhsSize;
    6686  }
    6687 };
    6688 
    6689 
    6691 // class VmaBlockMetadata
    6692 
    6693 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6694  m_Size(0),
    6695  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6696 {
    6697 }
    6698 
    6699 #if VMA_STATS_STRING_ENABLED
    6700 
    6701 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6702  VkDeviceSize unusedBytes,
    6703  size_t allocationCount,
    6704  size_t unusedRangeCount) const
    6705 {
    6706  json.BeginObject();
    6707 
    6708  json.WriteString("TotalBytes");
    6709  json.WriteNumber(GetSize());
    6710 
    6711  json.WriteString("UnusedBytes");
    6712  json.WriteNumber(unusedBytes);
    6713 
    6714  json.WriteString("Allocations");
    6715  json.WriteNumber((uint64_t)allocationCount);
    6716 
    6717  json.WriteString("UnusedRanges");
    6718  json.WriteNumber((uint64_t)unusedRangeCount);
    6719 
    6720  json.WriteString("Suballocations");
    6721  json.BeginArray();
    6722 }
    6723 
    6724 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6725  VkDeviceSize offset,
    6726  VmaAllocation hAllocation) const
    6727 {
    6728  json.BeginObject(true);
    6729 
    6730  json.WriteString("Offset");
    6731  json.WriteNumber(offset);
    6732 
    6733  hAllocation->PrintParameters(json);
    6734 
    6735  json.EndObject();
    6736 }
    6737 
    6738 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6739  VkDeviceSize offset,
    6740  VkDeviceSize size) const
    6741 {
    6742  json.BeginObject(true);
    6743 
    6744  json.WriteString("Offset");
    6745  json.WriteNumber(offset);
    6746 
    6747  json.WriteString("Type");
    6748  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6749 
    6750  json.WriteString("Size");
    6751  json.WriteNumber(size);
    6752 
    6753  json.EndObject();
    6754 }
    6755 
    6756 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6757 {
    6758  json.EndArray();
    6759  json.EndObject();
    6760 }
    6761 
    6762 #endif // #if VMA_STATS_STRING_ENABLED
    6763 
    6765 // class VmaBlockMetadata_Generic
    6766 
    6767 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6768  VmaBlockMetadata(hAllocator),
    6769  m_FreeCount(0),
    6770  m_SumFreeSize(0),
    6771  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6772  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6773 {
    6774 }
    6775 
    6776 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6777 {
    6778 }
    6779 
    6780 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6781 {
    6782  VmaBlockMetadata::Init(size);
    6783 
    6784  m_FreeCount = 1;
    6785  m_SumFreeSize = size;
    6786 
    6787  VmaSuballocation suballoc = {};
    6788  suballoc.offset = 0;
    6789  suballoc.size = size;
    6790  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6791  suballoc.hAllocation = VK_NULL_HANDLE;
    6792 
    6793  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6794  m_Suballocations.push_back(suballoc);
    6795  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6796  --suballocItem;
    6797  m_FreeSuballocationsBySize.push_back(suballocItem);
    6798 }
    6799 
    6800 bool VmaBlockMetadata_Generic::Validate() const
    6801 {
    6802  VMA_VALIDATE(!m_Suballocations.empty());
    6803 
    6804  // Expected offset of new suballocation as calculated from previous ones.
    6805  VkDeviceSize calculatedOffset = 0;
    6806  // Expected number of free suballocations as calculated from traversing their list.
    6807  uint32_t calculatedFreeCount = 0;
    6808  // Expected sum size of free suballocations as calculated from traversing their list.
    6809  VkDeviceSize calculatedSumFreeSize = 0;
    6810  // Expected number of free suballocations that should be registered in
    6811  // m_FreeSuballocationsBySize calculated from traversing their list.
    6812  size_t freeSuballocationsToRegister = 0;
    6813  // True if previous visited suballocation was free.
    6814  bool prevFree = false;
    6815 
    6816  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6817  suballocItem != m_Suballocations.cend();
    6818  ++suballocItem)
    6819  {
    6820  const VmaSuballocation& subAlloc = *suballocItem;
    6821 
    6822  // Actual offset of this suballocation doesn't match expected one.
    6823  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6824 
    6825  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6826  // Two adjacent free suballocations are invalid. They should be merged.
    6827  VMA_VALIDATE(!prevFree || !currFree);
    6828 
    6829  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6830 
    6831  if(currFree)
    6832  {
    6833  calculatedSumFreeSize += subAlloc.size;
    6834  ++calculatedFreeCount;
    6835  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6836  {
    6837  ++freeSuballocationsToRegister;
    6838  }
    6839 
    6840  // Margin required between allocations - every free space must be at least that large.
    6841  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6842  }
    6843  else
    6844  {
    6845  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6846  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6847 
    6848  // Margin required between allocations - previous allocation must be free.
    6849  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6850  }
    6851 
    6852  calculatedOffset += subAlloc.size;
    6853  prevFree = currFree;
    6854  }
    6855 
    6856  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6857  // match expected one.
    6858  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6859 
    6860  VkDeviceSize lastSize = 0;
    6861  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6862  {
    6863  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6864 
    6865  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6866  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6867  // They must be sorted by size ascending.
    6868  VMA_VALIDATE(suballocItem->size >= lastSize);
    6869 
    6870  lastSize = suballocItem->size;
    6871  }
    6872 
    6873  // Check if totals match calculacted values.
    6874  VMA_VALIDATE(ValidateFreeSuballocationList());
    6875  VMA_VALIDATE(calculatedOffset == GetSize());
    6876  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6877  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6878 
    6879  return true;
    6880 }
    6881 
    6882 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6883 {
    6884  if(!m_FreeSuballocationsBySize.empty())
    6885  {
    6886  return m_FreeSuballocationsBySize.back()->size;
    6887  }
    6888  else
    6889  {
    6890  return 0;
    6891  }
    6892 }
    6893 
    6894 bool VmaBlockMetadata_Generic::IsEmpty() const
    6895 {
    6896  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6897 }
    6898 
    6899 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6900 {
    6901  outInfo.blockCount = 1;
    6902 
    6903  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6904  outInfo.allocationCount = rangeCount - m_FreeCount;
    6905  outInfo.unusedRangeCount = m_FreeCount;
    6906 
    6907  outInfo.unusedBytes = m_SumFreeSize;
    6908  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6909 
    6910  outInfo.allocationSizeMin = UINT64_MAX;
    6911  outInfo.allocationSizeMax = 0;
    6912  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6913  outInfo.unusedRangeSizeMax = 0;
    6914 
    6915  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6916  suballocItem != m_Suballocations.cend();
    6917  ++suballocItem)
    6918  {
    6919  const VmaSuballocation& suballoc = *suballocItem;
    6920  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6921  {
    6922  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6923  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6924  }
    6925  else
    6926  {
    6927  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6928  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6929  }
    6930  }
    6931 }
    6932 
    6933 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6934 {
    6935  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6936 
    6937  inoutStats.size += GetSize();
    6938  inoutStats.unusedSize += m_SumFreeSize;
    6939  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6940  inoutStats.unusedRangeCount += m_FreeCount;
    6941  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6942 }
    6943 
    6944 #if VMA_STATS_STRING_ENABLED
    6945 
    6946 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6947 {
    6948  PrintDetailedMap_Begin(json,
    6949  m_SumFreeSize, // unusedBytes
    6950  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6951  m_FreeCount); // unusedRangeCount
    6952 
    6953  size_t i = 0;
    6954  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6955  suballocItem != m_Suballocations.cend();
    6956  ++suballocItem, ++i)
    6957  {
    6958  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6959  {
    6960  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6961  }
    6962  else
    6963  {
    6964  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6965  }
    6966  }
    6967 
    6968  PrintDetailedMap_End(json);
    6969 }
    6970 
    6971 #endif // #if VMA_STATS_STRING_ENABLED
    6972 
    6973 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6974  uint32_t currentFrameIndex,
    6975  uint32_t frameInUseCount,
    6976  VkDeviceSize bufferImageGranularity,
    6977  VkDeviceSize allocSize,
    6978  VkDeviceSize allocAlignment,
    6979  bool upperAddress,
    6980  VmaSuballocationType allocType,
    6981  bool canMakeOtherLost,
    6982  uint32_t strategy,
    6983  VmaAllocationRequest* pAllocationRequest)
    6984 {
    6985  VMA_ASSERT(allocSize > 0);
    6986  VMA_ASSERT(!upperAddress);
    6987  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6988  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6989  VMA_HEAVY_ASSERT(Validate());
    6990 
    6991  // There is not enough total free space in this block to fullfill the request: Early return.
    6992  if(canMakeOtherLost == false &&
    6993  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6994  {
    6995  return false;
    6996  }
    6997 
    6998  // New algorithm, efficiently searching freeSuballocationsBySize.
    6999  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7000  if(freeSuballocCount > 0)
    7001  {
    7003  {
    7004  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7005  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7006  m_FreeSuballocationsBySize.data(),
    7007  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7008  allocSize + 2 * VMA_DEBUG_MARGIN,
    7009  VmaSuballocationItemSizeLess());
    7010  size_t index = it - m_FreeSuballocationsBySize.data();
    7011  for(; index < freeSuballocCount; ++index)
    7012  {
    7013  if(CheckAllocation(
    7014  currentFrameIndex,
    7015  frameInUseCount,
    7016  bufferImageGranularity,
    7017  allocSize,
    7018  allocAlignment,
    7019  allocType,
    7020  m_FreeSuballocationsBySize[index],
    7021  false, // canMakeOtherLost
    7022  &pAllocationRequest->offset,
    7023  &pAllocationRequest->itemsToMakeLostCount,
    7024  &pAllocationRequest->sumFreeSize,
    7025  &pAllocationRequest->sumItemSize))
    7026  {
    7027  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7028  return true;
    7029  }
    7030  }
    7031  }
    7032  else // WORST_FIT, FIRST_FIT
    7033  {
    7034  // Search staring from biggest suballocations.
    7035  for(size_t index = freeSuballocCount; index--; )
    7036  {
    7037  if(CheckAllocation(
    7038  currentFrameIndex,
    7039  frameInUseCount,
    7040  bufferImageGranularity,
    7041  allocSize,
    7042  allocAlignment,
    7043  allocType,
    7044  m_FreeSuballocationsBySize[index],
    7045  false, // canMakeOtherLost
    7046  &pAllocationRequest->offset,
    7047  &pAllocationRequest->itemsToMakeLostCount,
    7048  &pAllocationRequest->sumFreeSize,
    7049  &pAllocationRequest->sumItemSize))
    7050  {
    7051  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7052  return true;
    7053  }
    7054  }
    7055  }
    7056  }
    7057 
    7058  if(canMakeOtherLost)
    7059  {
    7060  // Brute-force algorithm. TODO: Come up with something better.
    7061 
    7062  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7063  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7064 
    7065  VmaAllocationRequest tmpAllocRequest = {};
    7066  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7067  suballocIt != m_Suballocations.end();
    7068  ++suballocIt)
    7069  {
    7070  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7071  suballocIt->hAllocation->CanBecomeLost())
    7072  {
    7073  if(CheckAllocation(
    7074  currentFrameIndex,
    7075  frameInUseCount,
    7076  bufferImageGranularity,
    7077  allocSize,
    7078  allocAlignment,
    7079  allocType,
    7080  suballocIt,
    7081  canMakeOtherLost,
    7082  &tmpAllocRequest.offset,
    7083  &tmpAllocRequest.itemsToMakeLostCount,
    7084  &tmpAllocRequest.sumFreeSize,
    7085  &tmpAllocRequest.sumItemSize))
    7086  {
    7087  tmpAllocRequest.item = suballocIt;
    7088 
    7089  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7091  {
    7092  *pAllocationRequest = tmpAllocRequest;
    7093  }
    7094  }
    7095  }
    7096  }
    7097 
    7098  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7099  {
    7100  return true;
    7101  }
    7102  }
    7103 
    7104  return false;
    7105 }
    7106 
    7107 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7108  uint32_t currentFrameIndex,
    7109  uint32_t frameInUseCount,
    7110  VmaAllocationRequest* pAllocationRequest)
    7111 {
    7112  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7113  {
    7114  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7115  {
    7116  ++pAllocationRequest->item;
    7117  }
    7118  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7119  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7120  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7121  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7122  {
    7123  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7124  --pAllocationRequest->itemsToMakeLostCount;
    7125  }
    7126  else
    7127  {
    7128  return false;
    7129  }
    7130  }
    7131 
    7132  VMA_HEAVY_ASSERT(Validate());
    7133  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7134  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7135 
    7136  return true;
    7137 }
    7138 
    7139 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7140 {
    7141  uint32_t lostAllocationCount = 0;
    7142  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7143  it != m_Suballocations.end();
    7144  ++it)
    7145  {
    7146  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7147  it->hAllocation->CanBecomeLost() &&
    7148  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7149  {
    7150  it = FreeSuballocation(it);
    7151  ++lostAllocationCount;
    7152  }
    7153  }
    7154  return lostAllocationCount;
    7155 }
    7156 
    7157 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7158 {
    7159  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7160  it != m_Suballocations.end();
    7161  ++it)
    7162  {
    7163  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7164  {
    7165  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7166  {
    7167  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7168  return VK_ERROR_VALIDATION_FAILED_EXT;
    7169  }
    7170  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7171  {
    7172  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7173  return VK_ERROR_VALIDATION_FAILED_EXT;
    7174  }
    7175  }
    7176  }
    7177 
    7178  return VK_SUCCESS;
    7179 }
    7180 
    7181 void VmaBlockMetadata_Generic::Alloc(
    7182  const VmaAllocationRequest& request,
    7183  VmaSuballocationType type,
    7184  VkDeviceSize allocSize,
    7185  bool upperAddress,
    7186  VmaAllocation hAllocation)
    7187 {
    7188  VMA_ASSERT(!upperAddress);
    7189  VMA_ASSERT(request.item != m_Suballocations.end());
    7190  VmaSuballocation& suballoc = *request.item;
    7191  // Given suballocation is a free block.
    7192  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7193  // Given offset is inside this suballocation.
    7194  VMA_ASSERT(request.offset >= suballoc.offset);
    7195  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7196  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7197  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7198 
    7199  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7200  // it to become used.
    7201  UnregisterFreeSuballocation(request.item);
    7202 
    7203  suballoc.offset = request.offset;
    7204  suballoc.size = allocSize;
    7205  suballoc.type = type;
    7206  suballoc.hAllocation = hAllocation;
    7207 
    7208  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7209  if(paddingEnd)
    7210  {
    7211  VmaSuballocation paddingSuballoc = {};
    7212  paddingSuballoc.offset = request.offset + allocSize;
    7213  paddingSuballoc.size = paddingEnd;
    7214  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7215  VmaSuballocationList::iterator next = request.item;
    7216  ++next;
    7217  const VmaSuballocationList::iterator paddingEndItem =
    7218  m_Suballocations.insert(next, paddingSuballoc);
    7219  RegisterFreeSuballocation(paddingEndItem);
    7220  }
    7221 
    7222  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7223  if(paddingBegin)
    7224  {
    7225  VmaSuballocation paddingSuballoc = {};
    7226  paddingSuballoc.offset = request.offset - paddingBegin;
    7227  paddingSuballoc.size = paddingBegin;
    7228  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7229  const VmaSuballocationList::iterator paddingBeginItem =
    7230  m_Suballocations.insert(request.item, paddingSuballoc);
    7231  RegisterFreeSuballocation(paddingBeginItem);
    7232  }
    7233 
    7234  // Update totals.
    7235  m_FreeCount = m_FreeCount - 1;
    7236  if(paddingBegin > 0)
    7237  {
    7238  ++m_FreeCount;
    7239  }
    7240  if(paddingEnd > 0)
    7241  {
    7242  ++m_FreeCount;
    7243  }
    7244  m_SumFreeSize -= allocSize;
    7245 }
    7246 
    7247 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7248 {
    7249  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7250  suballocItem != m_Suballocations.end();
    7251  ++suballocItem)
    7252  {
    7253  VmaSuballocation& suballoc = *suballocItem;
    7254  if(suballoc.hAllocation == allocation)
    7255  {
    7256  FreeSuballocation(suballocItem);
    7257  VMA_HEAVY_ASSERT(Validate());
    7258  return;
    7259  }
    7260  }
    7261  VMA_ASSERT(0 && "Not found!");
    7262 }
    7263 
    7264 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7265 {
    7266  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7267  suballocItem != m_Suballocations.end();
    7268  ++suballocItem)
    7269  {
    7270  VmaSuballocation& suballoc = *suballocItem;
    7271  if(suballoc.offset == offset)
    7272  {
    7273  FreeSuballocation(suballocItem);
    7274  return;
    7275  }
    7276  }
    7277  VMA_ASSERT(0 && "Not found!");
    7278 }
    7279 
    7280 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7281 {
    7282  typedef VmaSuballocationList::iterator iter_type;
    7283  for(iter_type suballocItem = m_Suballocations.begin();
    7284  suballocItem != m_Suballocations.end();
    7285  ++suballocItem)
    7286  {
    7287  VmaSuballocation& suballoc = *suballocItem;
    7288  if(suballoc.hAllocation == alloc)
    7289  {
    7290  iter_type nextItem = suballocItem;
    7291  ++nextItem;
    7292 
    7293  // Should have been ensured on higher level.
    7294  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7295 
    7296  // Shrinking.
    7297  if(newSize < alloc->GetSize())
    7298  {
    7299  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7300 
    7301  // There is next item.
    7302  if(nextItem != m_Suballocations.end())
    7303  {
    7304  // Next item is free.
    7305  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7306  {
    7307  // Grow this next item backward.
    7308  UnregisterFreeSuballocation(nextItem);
    7309  nextItem->offset -= sizeDiff;
    7310  nextItem->size += sizeDiff;
    7311  RegisterFreeSuballocation(nextItem);
    7312  }
    7313  // Next item is not free.
    7314  else
    7315  {
    7316  // Create free item after current one.
    7317  VmaSuballocation newFreeSuballoc;
    7318  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7319  newFreeSuballoc.offset = suballoc.offset + newSize;
    7320  newFreeSuballoc.size = sizeDiff;
    7321  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7322  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7323  RegisterFreeSuballocation(newFreeSuballocIt);
    7324 
    7325  ++m_FreeCount;
    7326  }
    7327  }
    7328  // This is the last item.
    7329  else
    7330  {
    7331  // Create free item at the end.
    7332  VmaSuballocation newFreeSuballoc;
    7333  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7334  newFreeSuballoc.offset = suballoc.offset + newSize;
    7335  newFreeSuballoc.size = sizeDiff;
    7336  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7337  m_Suballocations.push_back(newFreeSuballoc);
    7338 
    7339  iter_type newFreeSuballocIt = m_Suballocations.end();
    7340  RegisterFreeSuballocation(--newFreeSuballocIt);
    7341 
    7342  ++m_FreeCount;
    7343  }
    7344 
    7345  suballoc.size = newSize;
    7346  m_SumFreeSize += sizeDiff;
    7347  }
    7348  // Growing.
    7349  else
    7350  {
    7351  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7352 
    7353  // There is next item.
    7354  if(nextItem != m_Suballocations.end())
    7355  {
    7356  // Next item is free.
    7357  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7358  {
    7359  // There is not enough free space, including margin.
    7360  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7361  {
    7362  return false;
    7363  }
    7364 
    7365  // There is more free space than required.
    7366  if(nextItem->size > sizeDiff)
    7367  {
    7368  // Move and shrink this next item.
    7369  UnregisterFreeSuballocation(nextItem);
    7370  nextItem->offset += sizeDiff;
    7371  nextItem->size -= sizeDiff;
    7372  RegisterFreeSuballocation(nextItem);
    7373  }
    7374  // There is exactly the amount of free space required.
    7375  else
    7376  {
    7377  // Remove this next free item.
    7378  UnregisterFreeSuballocation(nextItem);
    7379  m_Suballocations.erase(nextItem);
    7380  --m_FreeCount;
    7381  }
    7382  }
    7383  // Next item is not free - there is no space to grow.
    7384  else
    7385  {
    7386  return false;
    7387  }
    7388  }
    7389  // This is the last item - there is no space to grow.
    7390  else
    7391  {
    7392  return false;
    7393  }
    7394 
    7395  suballoc.size = newSize;
    7396  m_SumFreeSize -= sizeDiff;
    7397  }
    7398 
    7399  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7400  return true;
    7401  }
    7402  }
    7403  VMA_ASSERT(0 && "Not found!");
    7404  return false;
    7405 }
    7406 
    7407 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7408 {
    7409  VkDeviceSize lastSize = 0;
    7410  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7411  {
    7412  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7413 
    7414  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7415  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7416  VMA_VALIDATE(it->size >= lastSize);
    7417  lastSize = it->size;
    7418  }
    7419  return true;
    7420 }
    7421 
    7422 bool VmaBlockMetadata_Generic::CheckAllocation(
    7423  uint32_t currentFrameIndex,
    7424  uint32_t frameInUseCount,
    7425  VkDeviceSize bufferImageGranularity,
    7426  VkDeviceSize allocSize,
    7427  VkDeviceSize allocAlignment,
    7428  VmaSuballocationType allocType,
    7429  VmaSuballocationList::const_iterator suballocItem,
    7430  bool canMakeOtherLost,
    7431  VkDeviceSize* pOffset,
    7432  size_t* itemsToMakeLostCount,
    7433  VkDeviceSize* pSumFreeSize,
    7434  VkDeviceSize* pSumItemSize) const
    7435 {
    7436  VMA_ASSERT(allocSize > 0);
    7437  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7438  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7439  VMA_ASSERT(pOffset != VMA_NULL);
    7440 
    7441  *itemsToMakeLostCount = 0;
    7442  *pSumFreeSize = 0;
    7443  *pSumItemSize = 0;
    7444 
    7445  if(canMakeOtherLost)
    7446  {
    7447  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7448  {
    7449  *pSumFreeSize = suballocItem->size;
    7450  }
    7451  else
    7452  {
    7453  if(suballocItem->hAllocation->CanBecomeLost() &&
    7454  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7455  {
    7456  ++*itemsToMakeLostCount;
    7457  *pSumItemSize = suballocItem->size;
    7458  }
    7459  else
    7460  {
    7461  return false;
    7462  }
    7463  }
    7464 
    7465  // Remaining size is too small for this request: Early return.
    7466  if(GetSize() - suballocItem->offset < allocSize)
    7467  {
    7468  return false;
    7469  }
    7470 
    7471  // Start from offset equal to beginning of this suballocation.
    7472  *pOffset = suballocItem->offset;
    7473 
    7474  // Apply VMA_DEBUG_MARGIN at the beginning.
    7475  if(VMA_DEBUG_MARGIN > 0)
    7476  {
    7477  *pOffset += VMA_DEBUG_MARGIN;
    7478  }
    7479 
    7480  // Apply alignment.
    7481  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7482 
    7483  // Check previous suballocations for BufferImageGranularity conflicts.
    7484  // Make bigger alignment if necessary.
    7485  if(bufferImageGranularity > 1)
    7486  {
    7487  bool bufferImageGranularityConflict = false;
    7488  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7489  while(prevSuballocItem != m_Suballocations.cbegin())
    7490  {
    7491  --prevSuballocItem;
    7492  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7493  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7494  {
    7495  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7496  {
    7497  bufferImageGranularityConflict = true;
    7498  break;
    7499  }
    7500  }
    7501  else
    7502  // Already on previous page.
    7503  break;
    7504  }
    7505  if(bufferImageGranularityConflict)
    7506  {
    7507  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7508  }
    7509  }
    7510 
    7511  // Now that we have final *pOffset, check if we are past suballocItem.
    7512  // If yes, return false - this function should be called for another suballocItem as starting point.
    7513  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7514  {
    7515  return false;
    7516  }
    7517 
    7518  // Calculate padding at the beginning based on current offset.
    7519  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7520 
    7521  // Calculate required margin at the end.
    7522  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7523 
    7524  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7525  // Another early return check.
    7526  if(suballocItem->offset + totalSize > GetSize())
    7527  {
    7528  return false;
    7529  }
    7530 
    7531  // Advance lastSuballocItem until desired size is reached.
    7532  // Update itemsToMakeLostCount.
    7533  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7534  if(totalSize > suballocItem->size)
    7535  {
    7536  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7537  while(remainingSize > 0)
    7538  {
    7539  ++lastSuballocItem;
    7540  if(lastSuballocItem == m_Suballocations.cend())
    7541  {
    7542  return false;
    7543  }
    7544  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7545  {
    7546  *pSumFreeSize += lastSuballocItem->size;
    7547  }
    7548  else
    7549  {
    7550  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7551  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7552  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7553  {
    7554  ++*itemsToMakeLostCount;
    7555  *pSumItemSize += lastSuballocItem->size;
    7556  }
    7557  else
    7558  {
    7559  return false;
    7560  }
    7561  }
    7562  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7563  remainingSize - lastSuballocItem->size : 0;
    7564  }
    7565  }
    7566 
    7567  // Check next suballocations for BufferImageGranularity conflicts.
    7568  // If conflict exists, we must mark more allocations lost or fail.
    7569  if(bufferImageGranularity > 1)
    7570  {
    7571  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7572  ++nextSuballocItem;
    7573  while(nextSuballocItem != m_Suballocations.cend())
    7574  {
    7575  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7576  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7577  {
    7578  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7579  {
    7580  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7581  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7582  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7583  {
    7584  ++*itemsToMakeLostCount;
    7585  }
    7586  else
    7587  {
    7588  return false;
    7589  }
    7590  }
    7591  }
    7592  else
    7593  {
    7594  // Already on next page.
    7595  break;
    7596  }
    7597  ++nextSuballocItem;
    7598  }
    7599  }
    7600  }
    7601  else
    7602  {
    7603  const VmaSuballocation& suballoc = *suballocItem;
    7604  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7605 
    7606  *pSumFreeSize = suballoc.size;
    7607 
    7608  // Size of this suballocation is too small for this request: Early return.
    7609  if(suballoc.size < allocSize)
    7610  {
    7611  return false;
    7612  }
    7613 
    7614  // Start from offset equal to beginning of this suballocation.
    7615  *pOffset = suballoc.offset;
    7616 
    7617  // Apply VMA_DEBUG_MARGIN at the beginning.
    7618  if(VMA_DEBUG_MARGIN > 0)
    7619  {
    7620  *pOffset += VMA_DEBUG_MARGIN;
    7621  }
    7622 
    7623  // Apply alignment.
    7624  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7625 
    7626  // Check previous suballocations for BufferImageGranularity conflicts.
    7627  // Make bigger alignment if necessary.
    7628  if(bufferImageGranularity > 1)
    7629  {
    7630  bool bufferImageGranularityConflict = false;
    7631  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7632  while(prevSuballocItem != m_Suballocations.cbegin())
    7633  {
    7634  --prevSuballocItem;
    7635  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7636  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7637  {
    7638  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7639  {
    7640  bufferImageGranularityConflict = true;
    7641  break;
    7642  }
    7643  }
    7644  else
    7645  // Already on previous page.
    7646  break;
    7647  }
    7648  if(bufferImageGranularityConflict)
    7649  {
    7650  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7651  }
    7652  }
    7653 
    7654  // Calculate padding at the beginning based on current offset.
    7655  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7656 
    7657  // Calculate required margin at the end.
    7658  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7659 
    7660  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7661  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7662  {
    7663  return false;
    7664  }
    7665 
    7666  // Check next suballocations for BufferImageGranularity conflicts.
    7667  // If conflict exists, allocation cannot be made here.
    7668  if(bufferImageGranularity > 1)
    7669  {
    7670  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7671  ++nextSuballocItem;
    7672  while(nextSuballocItem != m_Suballocations.cend())
    7673  {
    7674  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7675  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7676  {
    7677  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7678  {
    7679  return false;
    7680  }
    7681  }
    7682  else
    7683  {
    7684  // Already on next page.
    7685  break;
    7686  }
    7687  ++nextSuballocItem;
    7688  }
    7689  }
    7690  }
    7691 
    7692  // All tests passed: Success. pOffset is already filled.
    7693  return true;
    7694 }
    7695 
    7696 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7697 {
    7698  VMA_ASSERT(item != m_Suballocations.end());
    7699  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7700 
    7701  VmaSuballocationList::iterator nextItem = item;
    7702  ++nextItem;
    7703  VMA_ASSERT(nextItem != m_Suballocations.end());
    7704  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7705 
    7706  item->size += nextItem->size;
    7707  --m_FreeCount;
    7708  m_Suballocations.erase(nextItem);
    7709 }
    7710 
    7711 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7712 {
    7713  // Change this suballocation to be marked as free.
    7714  VmaSuballocation& suballoc = *suballocItem;
    7715  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7716  suballoc.hAllocation = VK_NULL_HANDLE;
    7717 
    7718  // Update totals.
    7719  ++m_FreeCount;
    7720  m_SumFreeSize += suballoc.size;
    7721 
    7722  // Merge with previous and/or next suballocation if it's also free.
    7723  bool mergeWithNext = false;
    7724  bool mergeWithPrev = false;
    7725 
    7726  VmaSuballocationList::iterator nextItem = suballocItem;
    7727  ++nextItem;
    7728  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7729  {
    7730  mergeWithNext = true;
    7731  }
    7732 
    7733  VmaSuballocationList::iterator prevItem = suballocItem;
    7734  if(suballocItem != m_Suballocations.begin())
    7735  {
    7736  --prevItem;
    7737  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7738  {
    7739  mergeWithPrev = true;
    7740  }
    7741  }
    7742 
    7743  if(mergeWithNext)
    7744  {
    7745  UnregisterFreeSuballocation(nextItem);
    7746  MergeFreeWithNext(suballocItem);
    7747  }
    7748 
    7749  if(mergeWithPrev)
    7750  {
    7751  UnregisterFreeSuballocation(prevItem);
    7752  MergeFreeWithNext(prevItem);
    7753  RegisterFreeSuballocation(prevItem);
    7754  return prevItem;
    7755  }
    7756  else
    7757  {
    7758  RegisterFreeSuballocation(suballocItem);
    7759  return suballocItem;
    7760  }
    7761 }
    7762 
    7763 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7764 {
    7765  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7766  VMA_ASSERT(item->size > 0);
    7767 
    7768  // You may want to enable this validation at the beginning or at the end of
    7769  // this function, depending on what do you want to check.
    7770  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7771 
    7772  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7773  {
    7774  if(m_FreeSuballocationsBySize.empty())
    7775  {
    7776  m_FreeSuballocationsBySize.push_back(item);
    7777  }
    7778  else
    7779  {
    7780  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7781  }
    7782  }
    7783 
    7784  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7785 }
    7786 
    7787 
    7788 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7789 {
    7790  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7791  VMA_ASSERT(item->size > 0);
    7792 
    7793  // You may want to enable this validation at the beginning or at the end of
    7794  // this function, depending on what do you want to check.
    7795  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7796 
    7797  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7798  {
    7799  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7800  m_FreeSuballocationsBySize.data(),
    7801  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7802  item,
    7803  VmaSuballocationItemSizeLess());
    7804  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7805  index < m_FreeSuballocationsBySize.size();
    7806  ++index)
    7807  {
    7808  if(m_FreeSuballocationsBySize[index] == item)
    7809  {
    7810  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7811  return;
    7812  }
    7813  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7814  }
    7815  VMA_ASSERT(0 && "Not found.");
    7816  }
    7817 
    7818  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7819 }
    7820 
    7822 // class VmaBlockMetadata_Linear
    7823 
    7824 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7825  VmaBlockMetadata(hAllocator),
    7826  m_SumFreeSize(0),
    7827  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7828  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7829  m_1stVectorIndex(0),
    7830  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7831  m_1stNullItemsBeginCount(0),
    7832  m_1stNullItemsMiddleCount(0),
    7833  m_2ndNullItemsCount(0)
    7834 {
    7835 }
    7836 
    7837 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7838 {
    7839 }
    7840 
    7841 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7842 {
    7843  VmaBlockMetadata::Init(size);
    7844  m_SumFreeSize = size;
    7845 }
    7846 
    7847 bool VmaBlockMetadata_Linear::Validate() const
    7848 {
    7849  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7850  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7851 
    7852  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7853  VMA_VALIDATE(!suballocations1st.empty() ||
    7854  suballocations2nd.empty() ||
    7855  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7856 
    7857  if(!suballocations1st.empty())
    7858  {
    7859  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7860  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7861  // Null item at the end should be just pop_back().
    7862  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7863  }
    7864  if(!suballocations2nd.empty())
    7865  {
    7866  // Null item at the end should be just pop_back().
    7867  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7868  }
    7869 
    7870  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7871  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7872 
    7873  VkDeviceSize sumUsedSize = 0;
    7874  const size_t suballoc1stCount = suballocations1st.size();
    7875  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7876 
    7877  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7878  {
    7879  const size_t suballoc2ndCount = suballocations2nd.size();
    7880  size_t nullItem2ndCount = 0;
    7881  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7882  {
    7883  const VmaSuballocation& suballoc = suballocations2nd[i];
    7884  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7885 
    7886  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7887  VMA_VALIDATE(suballoc.offset >= offset);
    7888 
    7889  if(!currFree)
    7890  {
    7891  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7892  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7893  sumUsedSize += suballoc.size;
    7894  }
    7895  else
    7896  {
    7897  ++nullItem2ndCount;
    7898  }
    7899 
    7900  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7901  }
    7902 
    7903  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7904  }
    7905 
    7906  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7907  {
    7908  const VmaSuballocation& suballoc = suballocations1st[i];
    7909  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7910  suballoc.hAllocation == VK_NULL_HANDLE);
    7911  }
    7912 
    7913  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7914 
    7915  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7916  {
    7917  const VmaSuballocation& suballoc = suballocations1st[i];
    7918  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7919 
    7920  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7921  VMA_VALIDATE(suballoc.offset >= offset);
    7922  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7923 
    7924  if(!currFree)
    7925  {
    7926  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7927  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7928  sumUsedSize += suballoc.size;
    7929  }
    7930  else
    7931  {
    7932  ++nullItem1stCount;
    7933  }
    7934 
    7935  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7936  }
    7937  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7938 
    7939  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7940  {
    7941  const size_t suballoc2ndCount = suballocations2nd.size();
    7942  size_t nullItem2ndCount = 0;
    7943  for(size_t i = suballoc2ndCount; i--; )
    7944  {
    7945  const VmaSuballocation& suballoc = suballocations2nd[i];
    7946  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7947 
    7948  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7949  VMA_VALIDATE(suballoc.offset >= offset);
    7950 
    7951  if(!currFree)
    7952  {
    7953  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7954  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7955  sumUsedSize += suballoc.size;
    7956  }
    7957  else
    7958  {
    7959  ++nullItem2ndCount;
    7960  }
    7961 
    7962  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7963  }
    7964 
    7965  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7966  }
    7967 
    7968  VMA_VALIDATE(offset <= GetSize());
    7969  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7970 
    7971  return true;
    7972 }
    7973 
    7974 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7975 {
    7976  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7977  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7978 }
    7979 
    7980 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7981 {
    7982  const VkDeviceSize size = GetSize();
    7983 
    7984  /*
    7985  We don't consider gaps inside allocation vectors with freed allocations because
    7986  they are not suitable for reuse in linear allocator. We consider only space that
    7987  is available for new allocations.
    7988  */
    7989  if(IsEmpty())
    7990  {
    7991  return size;
    7992  }
    7993 
    7994  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7995 
    7996  switch(m_2ndVectorMode)
    7997  {
    7998  case SECOND_VECTOR_EMPTY:
    7999  /*
    8000  Available space is after end of 1st, as well as before beginning of 1st (which
    8001  whould make it a ring buffer).
    8002  */
    8003  {
    8004  const size_t suballocations1stCount = suballocations1st.size();
    8005  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8006  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8007  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8008  return VMA_MAX(
    8009  firstSuballoc.offset,
    8010  size - (lastSuballoc.offset + lastSuballoc.size));
    8011  }
    8012  break;
    8013 
    8014  case SECOND_VECTOR_RING_BUFFER:
    8015  /*
    8016  Available space is only between end of 2nd and beginning of 1st.
    8017  */
    8018  {
    8019  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8020  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8021  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8022  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8023  }
    8024  break;
    8025 
    8026  case SECOND_VECTOR_DOUBLE_STACK:
    8027  /*
    8028  Available space is only between end of 1st and top of 2nd.
    8029  */
    8030  {
    8031  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8032  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8033  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8034  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8035  }
    8036  break;
    8037 
    8038  default:
    8039  VMA_ASSERT(0);
    8040  return 0;
    8041  }
    8042 }
    8043 
    8044 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8045 {
    8046  const VkDeviceSize size = GetSize();
    8047  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8048  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8049  const size_t suballoc1stCount = suballocations1st.size();
    8050  const size_t suballoc2ndCount = suballocations2nd.size();
    8051 
    8052  outInfo.blockCount = 1;
    8053  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8054  outInfo.unusedRangeCount = 0;
    8055  outInfo.usedBytes = 0;
    8056  outInfo.allocationSizeMin = UINT64_MAX;
    8057  outInfo.allocationSizeMax = 0;
    8058  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8059  outInfo.unusedRangeSizeMax = 0;
    8060 
    8061  VkDeviceSize lastOffset = 0;
    8062 
    8063  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8064  {
    8065  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8066  size_t nextAlloc2ndIndex = 0;
    8067  while(lastOffset < freeSpace2ndTo1stEnd)
    8068  {
    8069  // Find next non-null allocation or move nextAllocIndex to the end.
    8070  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8071  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8072  {
    8073  ++nextAlloc2ndIndex;
    8074  }
    8075 
    8076  // Found non-null allocation.
    8077  if(nextAlloc2ndIndex < suballoc2ndCount)
    8078  {
    8079  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8080 
    8081  // 1. Process free space before this allocation.
    8082  if(lastOffset < suballoc.offset)
    8083  {
    8084  // There is free space from lastOffset to suballoc.offset.
    8085  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8086  ++outInfo.unusedRangeCount;
    8087  outInfo.unusedBytes += unusedRangeSize;
    8088  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8089  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8090  }
    8091 
    8092  // 2. Process this allocation.
    8093  // There is allocation with suballoc.offset, suballoc.size.
    8094  outInfo.usedBytes += suballoc.size;
    8095  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8096  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8097 
    8098  // 3. Prepare for next iteration.
    8099  lastOffset = suballoc.offset + suballoc.size;
    8100  ++nextAlloc2ndIndex;
    8101  }
    8102  // We are at the end.
    8103  else
    8104  {
    8105  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8106  if(lastOffset < freeSpace2ndTo1stEnd)
    8107  {
    8108  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8109  ++outInfo.unusedRangeCount;
    8110  outInfo.unusedBytes += unusedRangeSize;
    8111  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8112  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8113  }
    8114 
    8115  // End of loop.
    8116  lastOffset = freeSpace2ndTo1stEnd;
    8117  }
    8118  }
    8119  }
    8120 
    8121  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8122  const VkDeviceSize freeSpace1stTo2ndEnd =
    8123  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8124  while(lastOffset < freeSpace1stTo2ndEnd)
    8125  {
    8126  // Find next non-null allocation or move nextAllocIndex to the end.
    8127  while(nextAlloc1stIndex < suballoc1stCount &&
    8128  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8129  {
    8130  ++nextAlloc1stIndex;
    8131  }
    8132 
    8133  // Found non-null allocation.
    8134  if(nextAlloc1stIndex < suballoc1stCount)
    8135  {
    8136  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8137 
    8138  // 1. Process free space before this allocation.
    8139  if(lastOffset < suballoc.offset)
    8140  {
    8141  // There is free space from lastOffset to suballoc.offset.
    8142  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8143  ++outInfo.unusedRangeCount;
    8144  outInfo.unusedBytes += unusedRangeSize;
    8145  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8146  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8147  }
    8148 
    8149  // 2. Process this allocation.
    8150  // There is allocation with suballoc.offset, suballoc.size.
    8151  outInfo.usedBytes += suballoc.size;
    8152  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8153  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8154 
    8155  // 3. Prepare for next iteration.
    8156  lastOffset = suballoc.offset + suballoc.size;
    8157  ++nextAlloc1stIndex;
    8158  }
    8159  // We are at the end.
    8160  else
    8161  {
    8162  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8163  if(lastOffset < freeSpace1stTo2ndEnd)
    8164  {
    8165  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8166  ++outInfo.unusedRangeCount;
    8167  outInfo.unusedBytes += unusedRangeSize;
    8168  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8169  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8170  }
    8171 
    8172  // End of loop.
    8173  lastOffset = freeSpace1stTo2ndEnd;
    8174  }
    8175  }
    8176 
    8177  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8178  {
    8179  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8180  while(lastOffset < size)
    8181  {
    8182  // Find next non-null allocation or move nextAllocIndex to the end.
    8183  while(nextAlloc2ndIndex != SIZE_MAX &&
    8184  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8185  {
    8186  --nextAlloc2ndIndex;
    8187  }
    8188 
    8189  // Found non-null allocation.
    8190  if(nextAlloc2ndIndex != SIZE_MAX)
    8191  {
    8192  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8193 
    8194  // 1. Process free space before this allocation.
    8195  if(lastOffset < suballoc.offset)
    8196  {
    8197  // There is free space from lastOffset to suballoc.offset.
    8198  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8199  ++outInfo.unusedRangeCount;
    8200  outInfo.unusedBytes += unusedRangeSize;
    8201  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8202  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8203  }
    8204 
    8205  // 2. Process this allocation.
    8206  // There is allocation with suballoc.offset, suballoc.size.
    8207  outInfo.usedBytes += suballoc.size;
    8208  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8209  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8210 
    8211  // 3. Prepare for next iteration.
    8212  lastOffset = suballoc.offset + suballoc.size;
    8213  --nextAlloc2ndIndex;
    8214  }
    8215  // We are at the end.
    8216  else
    8217  {
    8218  // There is free space from lastOffset to size.
    8219  if(lastOffset < size)
    8220  {
    8221  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8222  ++outInfo.unusedRangeCount;
    8223  outInfo.unusedBytes += unusedRangeSize;
    8224  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8225  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8226  }
    8227 
    8228  // End of loop.
    8229  lastOffset = size;
    8230  }
    8231  }
    8232  }
    8233 
    8234  outInfo.unusedBytes = size - outInfo.usedBytes;
    8235 }
    8236 
    8237 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8238 {
    8239  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8240  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8241  const VkDeviceSize size = GetSize();
    8242  const size_t suballoc1stCount = suballocations1st.size();
    8243  const size_t suballoc2ndCount = suballocations2nd.size();
    8244 
    8245  inoutStats.size += size;
    8246 
    8247  VkDeviceSize lastOffset = 0;
    8248 
    8249  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8250  {
    8251  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8252  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8253  while(lastOffset < freeSpace2ndTo1stEnd)
    8254  {
    8255  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8256  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8257  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8258  {
    8259  ++nextAlloc2ndIndex;
    8260  }
    8261 
    8262  // Found non-null allocation.
    8263  if(nextAlloc2ndIndex < suballoc2ndCount)
    8264  {
    8265  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8266 
    8267  // 1. Process free space before this allocation.
    8268  if(lastOffset < suballoc.offset)
    8269  {
    8270  // There is free space from lastOffset to suballoc.offset.
    8271  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8272  inoutStats.unusedSize += unusedRangeSize;
    8273  ++inoutStats.unusedRangeCount;
    8274  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8275  }
    8276 
    8277  // 2. Process this allocation.
    8278  // There is allocation with suballoc.offset, suballoc.size.
    8279  ++inoutStats.allocationCount;
    8280 
    8281  // 3. Prepare for next iteration.
    8282  lastOffset = suballoc.offset + suballoc.size;
    8283  ++nextAlloc2ndIndex;
    8284  }
    8285  // We are at the end.
    8286  else
    8287  {
    8288  if(lastOffset < freeSpace2ndTo1stEnd)
    8289  {
    8290  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8291  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8292  inoutStats.unusedSize += unusedRangeSize;
    8293  ++inoutStats.unusedRangeCount;
    8294  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8295  }
    8296 
    8297  // End of loop.
    8298  lastOffset = freeSpace2ndTo1stEnd;
    8299  }
    8300  }
    8301  }
    8302 
    8303  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8304  const VkDeviceSize freeSpace1stTo2ndEnd =
    8305  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8306  while(lastOffset < freeSpace1stTo2ndEnd)
    8307  {
    8308  // Find next non-null allocation or move nextAllocIndex to the end.
    8309  while(nextAlloc1stIndex < suballoc1stCount &&
    8310  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8311  {
    8312  ++nextAlloc1stIndex;
    8313  }
    8314 
    8315  // Found non-null allocation.
    8316  if(nextAlloc1stIndex < suballoc1stCount)
    8317  {
    8318  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8319 
    8320  // 1. Process free space before this allocation.
    8321  if(lastOffset < suballoc.offset)
    8322  {
    8323  // There is free space from lastOffset to suballoc.offset.
    8324  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8325  inoutStats.unusedSize += unusedRangeSize;
    8326  ++inoutStats.unusedRangeCount;
    8327  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8328  }
    8329 
    8330  // 2. Process this allocation.
    8331  // There is allocation with suballoc.offset, suballoc.size.
    8332  ++inoutStats.allocationCount;
    8333 
    8334  // 3. Prepare for next iteration.
    8335  lastOffset = suballoc.offset + suballoc.size;
    8336  ++nextAlloc1stIndex;
    8337  }
    8338  // We are at the end.
    8339  else
    8340  {
    8341  if(lastOffset < freeSpace1stTo2ndEnd)
    8342  {
    8343  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8344  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8345  inoutStats.unusedSize += unusedRangeSize;
    8346  ++inoutStats.unusedRangeCount;
    8347  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8348  }
    8349 
    8350  // End of loop.
    8351  lastOffset = freeSpace1stTo2ndEnd;
    8352  }
    8353  }
    8354 
    8355  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8356  {
    8357  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8358  while(lastOffset < size)
    8359  {
    8360  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8361  while(nextAlloc2ndIndex != SIZE_MAX &&
    8362  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8363  {
    8364  --nextAlloc2ndIndex;
    8365  }
    8366 
    8367  // Found non-null allocation.
    8368  if(nextAlloc2ndIndex != SIZE_MAX)
    8369  {
    8370  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8371 
    8372  // 1. Process free space before this allocation.
    8373  if(lastOffset < suballoc.offset)
    8374  {
    8375  // There is free space from lastOffset to suballoc.offset.
    8376  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8377  inoutStats.unusedSize += unusedRangeSize;
    8378  ++inoutStats.unusedRangeCount;
    8379  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8380  }
    8381 
    8382  // 2. Process this allocation.
    8383  // There is allocation with suballoc.offset, suballoc.size.
    8384  ++inoutStats.allocationCount;
    8385 
    8386  // 3. Prepare for next iteration.
    8387  lastOffset = suballoc.offset + suballoc.size;
    8388  --nextAlloc2ndIndex;
    8389  }
    8390  // We are at the end.
    8391  else
    8392  {
    8393  if(lastOffset < size)
    8394  {
    8395  // There is free space from lastOffset to size.
    8396  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8397  inoutStats.unusedSize += unusedRangeSize;
    8398  ++inoutStats.unusedRangeCount;
    8399  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8400  }
    8401 
    8402  // End of loop.
    8403  lastOffset = size;
    8404  }
    8405  }
    8406  }
    8407 }
    8408 
    8409 #if VMA_STATS_STRING_ENABLED
    8410 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8411 {
    8412  const VkDeviceSize size = GetSize();
    8413  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8414  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8415  const size_t suballoc1stCount = suballocations1st.size();
    8416  const size_t suballoc2ndCount = suballocations2nd.size();
    8417 
    8418  // FIRST PASS
    8419 
    8420  size_t unusedRangeCount = 0;
    8421  VkDeviceSize usedBytes = 0;
    8422 
    8423  VkDeviceSize lastOffset = 0;
    8424 
    8425  size_t alloc2ndCount = 0;
    8426  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8427  {
    8428  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8429  size_t nextAlloc2ndIndex = 0;
    8430  while(lastOffset < freeSpace2ndTo1stEnd)
    8431  {
    8432  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8433  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8434  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8435  {
    8436  ++nextAlloc2ndIndex;
    8437  }
    8438 
    8439  // Found non-null allocation.
    8440  if(nextAlloc2ndIndex < suballoc2ndCount)
    8441  {
    8442  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8443 
    8444  // 1. Process free space before this allocation.
    8445  if(lastOffset < suballoc.offset)
    8446  {
    8447  // There is free space from lastOffset to suballoc.offset.
    8448  ++unusedRangeCount;
    8449  }
    8450 
    8451  // 2. Process this allocation.
    8452  // There is allocation with suballoc.offset, suballoc.size.
    8453  ++alloc2ndCount;
    8454  usedBytes += suballoc.size;
    8455 
    8456  // 3. Prepare for next iteration.
    8457  lastOffset = suballoc.offset + suballoc.size;
    8458  ++nextAlloc2ndIndex;
    8459  }
    8460  // We are at the end.
    8461  else
    8462  {
    8463  if(lastOffset < freeSpace2ndTo1stEnd)
    8464  {
    8465  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8466  ++unusedRangeCount;
    8467  }
    8468 
    8469  // End of loop.
    8470  lastOffset = freeSpace2ndTo1stEnd;
    8471  }
    8472  }
    8473  }
    8474 
    8475  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8476  size_t alloc1stCount = 0;
    8477  const VkDeviceSize freeSpace1stTo2ndEnd =
    8478  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8479  while(lastOffset < freeSpace1stTo2ndEnd)
    8480  {
    8481  // Find next non-null allocation or move nextAllocIndex to the end.
    8482  while(nextAlloc1stIndex < suballoc1stCount &&
    8483  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8484  {
    8485  ++nextAlloc1stIndex;
    8486  }
    8487 
    8488  // Found non-null allocation.
    8489  if(nextAlloc1stIndex < suballoc1stCount)
    8490  {
    8491  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8492 
    8493  // 1. Process free space before this allocation.
    8494  if(lastOffset < suballoc.offset)
    8495  {
    8496  // There is free space from lastOffset to suballoc.offset.
    8497  ++unusedRangeCount;
    8498  }
    8499 
    8500  // 2. Process this allocation.
    8501  // There is allocation with suballoc.offset, suballoc.size.
    8502  ++alloc1stCount;
    8503  usedBytes += suballoc.size;
    8504 
    8505  // 3. Prepare for next iteration.
    8506  lastOffset = suballoc.offset + suballoc.size;
    8507  ++nextAlloc1stIndex;
    8508  }
    8509  // We are at the end.
    8510  else
    8511  {
    8512  if(lastOffset < size)
    8513  {
    8514  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8515  ++unusedRangeCount;
    8516  }
    8517 
    8518  // End of loop.
    8519  lastOffset = freeSpace1stTo2ndEnd;
    8520  }
    8521  }
    8522 
    8523  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8524  {
    8525  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8526  while(lastOffset < size)
    8527  {
    8528  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8529  while(nextAlloc2ndIndex != SIZE_MAX &&
    8530  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8531  {
    8532  --nextAlloc2ndIndex;
    8533  }
    8534 
    8535  // Found non-null allocation.
    8536  if(nextAlloc2ndIndex != SIZE_MAX)
    8537  {
    8538  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8539 
    8540  // 1. Process free space before this allocation.
    8541  if(lastOffset < suballoc.offset)
    8542  {
    8543  // There is free space from lastOffset to suballoc.offset.
    8544  ++unusedRangeCount;
    8545  }
    8546 
    8547  // 2. Process this allocation.
    8548  // There is allocation with suballoc.offset, suballoc.size.
    8549  ++alloc2ndCount;
    8550  usedBytes += suballoc.size;
    8551 
    8552  // 3. Prepare for next iteration.
    8553  lastOffset = suballoc.offset + suballoc.size;
    8554  --nextAlloc2ndIndex;
    8555  }
    8556  // We are at the end.
    8557  else
    8558  {
    8559  if(lastOffset < size)
    8560  {
    8561  // There is free space from lastOffset to size.
    8562  ++unusedRangeCount;
    8563  }
    8564 
    8565  // End of loop.
    8566  lastOffset = size;
    8567  }
    8568  }
    8569  }
    8570 
    8571  const VkDeviceSize unusedBytes = size - usedBytes;
    8572  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8573 
    8574  // SECOND PASS
    8575  lastOffset = 0;
    8576 
    8577  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8578  {
    8579  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8580  size_t nextAlloc2ndIndex = 0;
    8581  while(lastOffset < freeSpace2ndTo1stEnd)
    8582  {
    8583  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8584  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8585  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8586  {
    8587  ++nextAlloc2ndIndex;
    8588  }
    8589 
    8590  // Found non-null allocation.
    8591  if(nextAlloc2ndIndex < suballoc2ndCount)
    8592  {
    8593  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8594 
    8595  // 1. Process free space before this allocation.
    8596  if(lastOffset < suballoc.offset)
    8597  {
    8598  // There is free space from lastOffset to suballoc.offset.
    8599  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8600  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8601  }
    8602 
    8603  // 2. Process this allocation.
    8604  // There is allocation with suballoc.offset, suballoc.size.
    8605  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8606 
    8607  // 3. Prepare for next iteration.
    8608  lastOffset = suballoc.offset + suballoc.size;
    8609  ++nextAlloc2ndIndex;
    8610  }
    8611  // We are at the end.
    8612  else
    8613  {
    8614  if(lastOffset < freeSpace2ndTo1stEnd)
    8615  {
    8616  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8617  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8618  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8619  }
    8620 
    8621  // End of loop.
    8622  lastOffset = freeSpace2ndTo1stEnd;
    8623  }
    8624  }
    8625  }
    8626 
    8627  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8628  while(lastOffset < freeSpace1stTo2ndEnd)
    8629  {
    8630  // Find next non-null allocation or move nextAllocIndex to the end.
    8631  while(nextAlloc1stIndex < suballoc1stCount &&
    8632  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8633  {
    8634  ++nextAlloc1stIndex;
    8635  }
    8636 
    8637  // Found non-null allocation.
    8638  if(nextAlloc1stIndex < suballoc1stCount)
    8639  {
    8640  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8641 
    8642  // 1. Process free space before this allocation.
    8643  if(lastOffset < suballoc.offset)
    8644  {
    8645  // There is free space from lastOffset to suballoc.offset.
    8646  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8647  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8648  }
    8649 
    8650  // 2. Process this allocation.
    8651  // There is allocation with suballoc.offset, suballoc.size.
    8652  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8653 
    8654  // 3. Prepare for next iteration.
    8655  lastOffset = suballoc.offset + suballoc.size;
    8656  ++nextAlloc1stIndex;
    8657  }
    8658  // We are at the end.
    8659  else
    8660  {
    8661  if(lastOffset < freeSpace1stTo2ndEnd)
    8662  {
    8663  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8664  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8665  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8666  }
    8667 
    8668  // End of loop.
    8669  lastOffset = freeSpace1stTo2ndEnd;
    8670  }
    8671  }
    8672 
    8673  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8674  {
    8675  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8676  while(lastOffset < size)
    8677  {
    8678  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8679  while(nextAlloc2ndIndex != SIZE_MAX &&
    8680  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8681  {
    8682  --nextAlloc2ndIndex;
    8683  }
    8684 
    8685  // Found non-null allocation.
    8686  if(nextAlloc2ndIndex != SIZE_MAX)
    8687  {
    8688  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8689 
    8690  // 1. Process free space before this allocation.
    8691  if(lastOffset < suballoc.offset)
    8692  {
    8693  // There is free space from lastOffset to suballoc.offset.
    8694  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8695  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8696  }
    8697 
    8698  // 2. Process this allocation.
    8699  // There is allocation with suballoc.offset, suballoc.size.
    8700  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8701 
    8702  // 3. Prepare for next iteration.
    8703  lastOffset = suballoc.offset + suballoc.size;
    8704  --nextAlloc2ndIndex;
    8705  }
    8706  // We are at the end.
    8707  else
    8708  {
    8709  if(lastOffset < size)
    8710  {
    8711  // There is free space from lastOffset to size.
    8712  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8713  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8714  }
    8715 
    8716  // End of loop.
    8717  lastOffset = size;
    8718  }
    8719  }
    8720  }
    8721 
    8722  PrintDetailedMap_End(json);
    8723 }
    8724 #endif // #if VMA_STATS_STRING_ENABLED
    8725 
    8726 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8727  uint32_t currentFrameIndex,
    8728  uint32_t frameInUseCount,
    8729  VkDeviceSize bufferImageGranularity,
    8730  VkDeviceSize allocSize,
    8731  VkDeviceSize allocAlignment,
    8732  bool upperAddress,
    8733  VmaSuballocationType allocType,
    8734  bool canMakeOtherLost,
    8735  uint32_t strategy,
    8736  VmaAllocationRequest* pAllocationRequest)
    8737 {
    8738  VMA_ASSERT(allocSize > 0);
    8739  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8740  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8741  VMA_HEAVY_ASSERT(Validate());
    8742 
    8743  const VkDeviceSize size = GetSize();
    8744  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8745  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8746 
    8747  if(upperAddress)
    8748  {
    8749  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8750  {
    8751  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8752  return false;
    8753  }
    8754 
    8755  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8756  if(allocSize > size)
    8757  {
    8758  return false;
    8759  }
    8760  VkDeviceSize resultBaseOffset = size - allocSize;
    8761  if(!suballocations2nd.empty())
    8762  {
    8763  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8764  resultBaseOffset = lastSuballoc.offset - allocSize;
    8765  if(allocSize > lastSuballoc.offset)
    8766  {
    8767  return false;
    8768  }
    8769  }
    8770 
    8771  // Start from offset equal to end of free space.
    8772  VkDeviceSize resultOffset = resultBaseOffset;
    8773 
    8774  // Apply VMA_DEBUG_MARGIN at the end.
    8775  if(VMA_DEBUG_MARGIN > 0)
    8776  {
    8777  if(resultOffset < VMA_DEBUG_MARGIN)
    8778  {
    8779  return false;
    8780  }
    8781  resultOffset -= VMA_DEBUG_MARGIN;
    8782  }
    8783 
    8784  // Apply alignment.
    8785  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8786 
    8787  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8788  // Make bigger alignment if necessary.
    8789  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8790  {
    8791  bool bufferImageGranularityConflict = false;
    8792  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8793  {
    8794  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8795  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8796  {
    8797  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8798  {
    8799  bufferImageGranularityConflict = true;
    8800  break;
    8801  }
    8802  }
    8803  else
    8804  // Already on previous page.
    8805  break;
    8806  }
    8807  if(bufferImageGranularityConflict)
    8808  {
    8809  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8810  }
    8811  }
    8812 
    8813  // There is enough free space.
    8814  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8815  suballocations1st.back().offset + suballocations1st.back().size :
    8816  0;
    8817  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8818  {
    8819  // Check previous suballocations for BufferImageGranularity conflicts.
    8820  // If conflict exists, allocation cannot be made here.
    8821  if(bufferImageGranularity > 1)
    8822  {
    8823  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8824  {
    8825  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8826  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8827  {
    8828  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8829  {
    8830  return false;
    8831  }
    8832  }
    8833  else
    8834  {
    8835  // Already on next page.
    8836  break;
    8837  }
    8838  }
    8839  }
    8840 
    8841  // All tests passed: Success.
    8842  pAllocationRequest->offset = resultOffset;
    8843  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8844  pAllocationRequest->sumItemSize = 0;
    8845  // pAllocationRequest->item unused.
    8846  pAllocationRequest->itemsToMakeLostCount = 0;
    8847  return true;
    8848  }
    8849  }
    8850  else // !upperAddress
    8851  {
    8852  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8853  {
    8854  // Try to allocate at the end of 1st vector.
    8855 
    8856  VkDeviceSize resultBaseOffset = 0;
    8857  if(!suballocations1st.empty())
    8858  {
    8859  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8860  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8861  }
    8862 
    8863  // Start from offset equal to beginning of free space.
    8864  VkDeviceSize resultOffset = resultBaseOffset;
    8865 
    8866  // Apply VMA_DEBUG_MARGIN at the beginning.
    8867  if(VMA_DEBUG_MARGIN > 0)
    8868  {
    8869  resultOffset += VMA_DEBUG_MARGIN;
    8870  }
    8871 
    8872  // Apply alignment.
    8873  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8874 
    8875  // Check previous suballocations for BufferImageGranularity conflicts.
    8876  // Make bigger alignment if necessary.
    8877  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8878  {
    8879  bool bufferImageGranularityConflict = false;
    8880  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8881  {
    8882  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8883  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8884  {
    8885  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8886  {
    8887  bufferImageGranularityConflict = true;
    8888  break;
    8889  }
    8890  }
    8891  else
    8892  // Already on previous page.
    8893  break;
    8894  }
    8895  if(bufferImageGranularityConflict)
    8896  {
    8897  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8898  }
    8899  }
    8900 
    8901  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8902  suballocations2nd.back().offset : size;
    8903 
    8904  // There is enough free space at the end after alignment.
    8905  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8906  {
    8907  // Check next suballocations for BufferImageGranularity conflicts.
    8908  // If conflict exists, allocation cannot be made here.
    8909  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8910  {
    8911  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8912  {
    8913  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8914  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8915  {
    8916  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8917  {
    8918  return false;
    8919  }
    8920  }
    8921  else
    8922  {
    8923  // Already on previous page.
    8924  break;
    8925  }
    8926  }
    8927  }
    8928 
    8929  // All tests passed: Success.
    8930  pAllocationRequest->offset = resultOffset;
    8931  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8932  pAllocationRequest->sumItemSize = 0;
    8933  // pAllocationRequest->item unused.
    8934  pAllocationRequest->itemsToMakeLostCount = 0;
    8935  return true;
    8936  }
    8937  }
    8938 
    8939  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8940  // beginning of 1st vector as the end of free space.
    8941  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8942  {
    8943  VMA_ASSERT(!suballocations1st.empty());
    8944 
    8945  VkDeviceSize resultBaseOffset = 0;
    8946  if(!suballocations2nd.empty())
    8947  {
    8948  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8949  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8950  }
    8951 
    8952  // Start from offset equal to beginning of free space.
    8953  VkDeviceSize resultOffset = resultBaseOffset;
    8954 
    8955  // Apply VMA_DEBUG_MARGIN at the beginning.
    8956  if(VMA_DEBUG_MARGIN > 0)
    8957  {
    8958  resultOffset += VMA_DEBUG_MARGIN;
    8959  }
    8960 
    8961  // Apply alignment.
    8962  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8963 
    8964  // Check previous suballocations for BufferImageGranularity conflicts.
    8965  // Make bigger alignment if necessary.
    8966  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8967  {
    8968  bool bufferImageGranularityConflict = false;
    8969  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8970  {
    8971  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8972  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8973  {
    8974  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8975  {
    8976  bufferImageGranularityConflict = true;
    8977  break;
    8978  }
    8979  }
    8980  else
    8981  // Already on previous page.
    8982  break;
    8983  }
    8984  if(bufferImageGranularityConflict)
    8985  {
    8986  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8987  }
    8988  }
    8989 
    8990  pAllocationRequest->itemsToMakeLostCount = 0;
    8991  pAllocationRequest->sumItemSize = 0;
    8992  size_t index1st = m_1stNullItemsBeginCount;
    8993 
    8994  if(canMakeOtherLost)
    8995  {
    8996  while(index1st < suballocations1st.size() &&
    8997  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8998  {
    8999  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    9000  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9001  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    9002  {
    9003  // No problem.
    9004  }
    9005  else
    9006  {
    9007  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9008  if(suballoc.hAllocation->CanBecomeLost() &&
    9009  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9010  {
    9011  ++pAllocationRequest->itemsToMakeLostCount;
    9012  pAllocationRequest->sumItemSize += suballoc.size;
    9013  }
    9014  else
    9015  {
    9016  return false;
    9017  }
    9018  }
    9019  ++index1st;
    9020  }
    9021 
    9022  // Check next suballocations for BufferImageGranularity conflicts.
    9023  // If conflict exists, we must mark more allocations lost or fail.
    9024  if(bufferImageGranularity > 1)
    9025  {
    9026  while(index1st < suballocations1st.size())
    9027  {
    9028  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9029  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9030  {
    9031  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9032  {
    9033  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9034  if(suballoc.hAllocation->CanBecomeLost() &&
    9035  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9036  {
    9037  ++pAllocationRequest->itemsToMakeLostCount;
    9038  pAllocationRequest->sumItemSize += suballoc.size;
    9039  }
    9040  else
    9041  {
    9042  return false;
    9043  }
    9044  }
    9045  }
    9046  else
    9047  {
    9048  // Already on next page.
    9049  break;
    9050  }
    9051  ++index1st;
    9052  }
    9053  }
    9054  }
    9055 
    9056  // There is enough free space at the end after alignment.
    9057  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9058  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9059  {
    9060  // Check next suballocations for BufferImageGranularity conflicts.
    9061  // If conflict exists, allocation cannot be made here.
    9062  if(bufferImageGranularity > 1)
    9063  {
    9064  for(size_t nextSuballocIndex = index1st;
    9065  nextSuballocIndex < suballocations1st.size();
    9066  nextSuballocIndex++)
    9067  {
    9068  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9069  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9070  {
    9071  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9072  {
    9073  return false;
    9074  }
    9075  }
    9076  else
    9077  {
    9078  // Already on next page.
    9079  break;
    9080  }
    9081  }
    9082  }
    9083 
    9084  // All tests passed: Success.
    9085  pAllocationRequest->offset = resultOffset;
    9086  pAllocationRequest->sumFreeSize =
    9087  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9088  - resultBaseOffset
    9089  - pAllocationRequest->sumItemSize;
    9090  // pAllocationRequest->item unused.
    9091  return true;
    9092  }
    9093  }
    9094  }
    9095 
    9096  return false;
    9097 }
    9098 
    9099 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9100  uint32_t currentFrameIndex,
    9101  uint32_t frameInUseCount,
    9102  VmaAllocationRequest* pAllocationRequest)
    9103 {
    9104  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9105  {
    9106  return true;
    9107  }
    9108 
    9109  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9110 
    9111  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9112  size_t index1st = m_1stNullItemsBeginCount;
    9113  size_t madeLostCount = 0;
    9114  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9115  {
    9116  VMA_ASSERT(index1st < suballocations1st.size());
    9117  VmaSuballocation& suballoc = suballocations1st[index1st];
    9118  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9119  {
    9120  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9121  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9122  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9123  {
    9124  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9125  suballoc.hAllocation = VK_NULL_HANDLE;
    9126  m_SumFreeSize += suballoc.size;
    9127  ++m_1stNullItemsMiddleCount;
    9128  ++madeLostCount;
    9129  }
    9130  else
    9131  {
    9132  return false;
    9133  }
    9134  }
    9135  ++index1st;
    9136  }
    9137 
    9138  CleanupAfterFree();
    9139  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9140 
    9141  return true;
    9142 }
    9143 
    9144 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9145 {
    9146  uint32_t lostAllocationCount = 0;
    9147 
    9148  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9149  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9150  {
    9151  VmaSuballocation& suballoc = suballocations1st[i];
    9152  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9153  suballoc.hAllocation->CanBecomeLost() &&
    9154  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9155  {
    9156  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9157  suballoc.hAllocation = VK_NULL_HANDLE;
    9158  ++m_1stNullItemsMiddleCount;
    9159  m_SumFreeSize += suballoc.size;
    9160  ++lostAllocationCount;
    9161  }
    9162  }
    9163 
    9164  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9165  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9166  {
    9167  VmaSuballocation& suballoc = suballocations2nd[i];
    9168  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9169  suballoc.hAllocation->CanBecomeLost() &&
    9170  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9171  {
    9172  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9173  suballoc.hAllocation = VK_NULL_HANDLE;
    9174  ++m_2ndNullItemsCount;
    9175  ++lostAllocationCount;
    9176  }
    9177  }
    9178 
    9179  if(lostAllocationCount)
    9180  {
    9181  CleanupAfterFree();
    9182  }
    9183 
    9184  return lostAllocationCount;
    9185 }
    9186 
    9187 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9188 {
    9189  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9190  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9191  {
    9192  const VmaSuballocation& suballoc = suballocations1st[i];
    9193  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9194  {
    9195  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9196  {
    9197  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9198  return VK_ERROR_VALIDATION_FAILED_EXT;
    9199  }
    9200  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9201  {
    9202  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9203  return VK_ERROR_VALIDATION_FAILED_EXT;
    9204  }
    9205  }
    9206  }
    9207 
    9208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9209  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9210  {
    9211  const VmaSuballocation& suballoc = suballocations2nd[i];
    9212  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9213  {
    9214  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9215  {
    9216  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9217  return VK_ERROR_VALIDATION_FAILED_EXT;
    9218  }
    9219  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9220  {
    9221  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9222  return VK_ERROR_VALIDATION_FAILED_EXT;
    9223  }
    9224  }
    9225  }
    9226 
    9227  return VK_SUCCESS;
    9228 }
    9229 
    9230 void VmaBlockMetadata_Linear::Alloc(
    9231  const VmaAllocationRequest& request,
    9232  VmaSuballocationType type,
    9233  VkDeviceSize allocSize,
    9234  bool upperAddress,
    9235  VmaAllocation hAllocation)
    9236 {
    9237  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9238 
    9239  if(upperAddress)
    9240  {
    9241  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9242  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9243  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9244  suballocations2nd.push_back(newSuballoc);
    9245  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9246  }
    9247  else
    9248  {
    9249  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9250 
    9251  // First allocation.
    9252  if(suballocations1st.empty())
    9253  {
    9254  suballocations1st.push_back(newSuballoc);
    9255  }
    9256  else
    9257  {
    9258  // New allocation at the end of 1st vector.
    9259  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9260  {
    9261  // Check if it fits before the end of the block.
    9262  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9263  suballocations1st.push_back(newSuballoc);
    9264  }
    9265  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9266  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9267  {
    9268  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9269 
    9270  switch(m_2ndVectorMode)
    9271  {
    9272  case SECOND_VECTOR_EMPTY:
    9273  // First allocation from second part ring buffer.
    9274  VMA_ASSERT(suballocations2nd.empty());
    9275  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9276  break;
    9277  case SECOND_VECTOR_RING_BUFFER:
    9278  // 2-part ring buffer is already started.
    9279  VMA_ASSERT(!suballocations2nd.empty());
    9280  break;
    9281  case SECOND_VECTOR_DOUBLE_STACK:
    9282  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9283  break;
    9284  default:
    9285  VMA_ASSERT(0);
    9286  }
    9287 
    9288  suballocations2nd.push_back(newSuballoc);
    9289  }
    9290  else
    9291  {
    9292  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9293  }
    9294  }
    9295  }
    9296 
    9297  m_SumFreeSize -= newSuballoc.size;
    9298 }
    9299 
    9300 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9301 {
    9302  FreeAtOffset(allocation->GetOffset());
    9303 }
    9304 
    9305 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9306 {
    9307  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9308  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9309 
    9310  if(!suballocations1st.empty())
    9311  {
    9312  // First allocation: Mark it as next empty at the beginning.
    9313  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9314  if(firstSuballoc.offset == offset)
    9315  {
    9316  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9317  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9318  m_SumFreeSize += firstSuballoc.size;
    9319  ++m_1stNullItemsBeginCount;
    9320  CleanupAfterFree();
    9321  return;
    9322  }
    9323  }
    9324 
    9325  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9326  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9327  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9328  {
    9329  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9330  if(lastSuballoc.offset == offset)
    9331  {
    9332  m_SumFreeSize += lastSuballoc.size;
    9333  suballocations2nd.pop_back();
    9334  CleanupAfterFree();
    9335  return;
    9336  }
    9337  }
    9338  // Last allocation in 1st vector.
    9339  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9340  {
    9341  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9342  if(lastSuballoc.offset == offset)
    9343  {
    9344  m_SumFreeSize += lastSuballoc.size;
    9345  suballocations1st.pop_back();
    9346  CleanupAfterFree();
    9347  return;
    9348  }
    9349  }
    9350 
    9351  // Item from the middle of 1st vector.
    9352  {
    9353  VmaSuballocation refSuballoc;
    9354  refSuballoc.offset = offset;
    9355  // Rest of members stays uninitialized intentionally for better performance.
    9356  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9357  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9358  suballocations1st.end(),
    9359  refSuballoc);
    9360  if(it != suballocations1st.end())
    9361  {
    9362  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9363  it->hAllocation = VK_NULL_HANDLE;
    9364  ++m_1stNullItemsMiddleCount;
    9365  m_SumFreeSize += it->size;
    9366  CleanupAfterFree();
    9367  return;
    9368  }
    9369  }
    9370 
    9371  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9372  {
    9373  // Item from the middle of 2nd vector.
    9374  VmaSuballocation refSuballoc;
    9375  refSuballoc.offset = offset;
    9376  // Rest of members stays uninitialized intentionally for better performance.
    9377  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9378  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9379  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9380  if(it != suballocations2nd.end())
    9381  {
    9382  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9383  it->hAllocation = VK_NULL_HANDLE;
    9384  ++m_2ndNullItemsCount;
    9385  m_SumFreeSize += it->size;
    9386  CleanupAfterFree();
    9387  return;
    9388  }
    9389  }
    9390 
    9391  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9392 }
    9393 
    9394 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9395 {
    9396  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9397  const size_t suballocCount = AccessSuballocations1st().size();
    9398  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9399 }
    9400 
    9401 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9402 {
    9403  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9404  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9405 
    9406  if(IsEmpty())
    9407  {
    9408  suballocations1st.clear();
    9409  suballocations2nd.clear();
    9410  m_1stNullItemsBeginCount = 0;
    9411  m_1stNullItemsMiddleCount = 0;
    9412  m_2ndNullItemsCount = 0;
    9413  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9414  }
    9415  else
    9416  {
    9417  const size_t suballoc1stCount = suballocations1st.size();
    9418  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9419  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9420 
    9421  // Find more null items at the beginning of 1st vector.
    9422  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9423  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9424  {
    9425  ++m_1stNullItemsBeginCount;
    9426  --m_1stNullItemsMiddleCount;
    9427  }
    9428 
    9429  // Find more null items at the end of 1st vector.
    9430  while(m_1stNullItemsMiddleCount > 0 &&
    9431  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9432  {
    9433  --m_1stNullItemsMiddleCount;
    9434  suballocations1st.pop_back();
    9435  }
    9436 
    9437  // Find more null items at the end of 2nd vector.
    9438  while(m_2ndNullItemsCount > 0 &&
    9439  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9440  {
    9441  --m_2ndNullItemsCount;
    9442  suballocations2nd.pop_back();
    9443  }
    9444 
    9445  if(ShouldCompact1st())
    9446  {
    9447  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9448  size_t srcIndex = m_1stNullItemsBeginCount;
    9449  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9450  {
    9451  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9452  {
    9453  ++srcIndex;
    9454  }
    9455  if(dstIndex != srcIndex)
    9456  {
    9457  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9458  }
    9459  ++srcIndex;
    9460  }
    9461  suballocations1st.resize(nonNullItemCount);
    9462  m_1stNullItemsBeginCount = 0;
    9463  m_1stNullItemsMiddleCount = 0;
    9464  }
    9465 
    9466  // 2nd vector became empty.
    9467  if(suballocations2nd.empty())
    9468  {
    9469  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9470  }
    9471 
    9472  // 1st vector became empty.
    9473  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9474  {
    9475  suballocations1st.clear();
    9476  m_1stNullItemsBeginCount = 0;
    9477 
    9478  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9479  {
    9480  // Swap 1st with 2nd. Now 2nd is empty.
    9481  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9482  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9483  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9484  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9485  {
    9486  ++m_1stNullItemsBeginCount;
    9487  --m_1stNullItemsMiddleCount;
    9488  }
    9489  m_2ndNullItemsCount = 0;
    9490  m_1stVectorIndex ^= 1;
    9491  }
    9492  }
    9493  }
    9494 
    9495  VMA_HEAVY_ASSERT(Validate());
    9496 }
    9497 
    9498 
    9500 // class VmaBlockMetadata_Buddy
    9501 
    9502 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9503  VmaBlockMetadata(hAllocator),
    9504  m_Root(VMA_NULL),
    9505  m_AllocationCount(0),
    9506  m_FreeCount(1),
    9507  m_SumFreeSize(0)
    9508 {
    9509  memset(m_FreeList, 0, sizeof(m_FreeList));
    9510 }
    9511 
    9512 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9513 {
    9514  DeleteNode(m_Root);
    9515 }
    9516 
    9517 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9518 {
    9519  VmaBlockMetadata::Init(size);
    9520 
    9521  m_UsableSize = VmaPrevPow2(size);
    9522  m_SumFreeSize = m_UsableSize;
    9523 
    9524  // Calculate m_LevelCount.
    9525  m_LevelCount = 1;
    9526  while(m_LevelCount < MAX_LEVELS &&
    9527  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9528  {
    9529  ++m_LevelCount;
    9530  }
    9531 
    9532  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9533  rootNode->offset = 0;
    9534  rootNode->type = Node::TYPE_FREE;
    9535  rootNode->parent = VMA_NULL;
    9536  rootNode->buddy = VMA_NULL;
    9537 
    9538  m_Root = rootNode;
    9539  AddToFreeListFront(0, rootNode);
    9540 }
    9541 
    9542 bool VmaBlockMetadata_Buddy::Validate() const
    9543 {
    9544  // Validate tree.
    9545  ValidationContext ctx;
    9546  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9547  {
    9548  VMA_VALIDATE(false && "ValidateNode failed.");
    9549  }
    9550  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9551  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9552 
    9553  // Validate free node lists.
    9554  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9555  {
    9556  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9557  m_FreeList[level].front->free.prev == VMA_NULL);
    9558 
    9559  for(Node* node = m_FreeList[level].front;
    9560  node != VMA_NULL;
    9561  node = node->free.next)
    9562  {
    9563  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9564 
    9565  if(node->free.next == VMA_NULL)
    9566  {
    9567  VMA_VALIDATE(m_FreeList[level].back == node);
    9568  }
    9569  else
    9570  {
    9571  VMA_VALIDATE(node->free.next->free.prev == node);
    9572  }
    9573  }
    9574  }
    9575 
    9576  // Validate that free lists ar higher levels are empty.
    9577  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9578  {
    9579  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9580  }
    9581 
    9582  return true;
    9583 }
    9584 
    9585 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9586 {
    9587  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9588  {
    9589  if(m_FreeList[level].front != VMA_NULL)
    9590  {
    9591  return LevelToNodeSize(level);
    9592  }
    9593  }
    9594  return 0;
    9595 }
    9596 
    9597 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9598 {
    9599  const VkDeviceSize unusableSize = GetUnusableSize();
    9600 
    9601  outInfo.blockCount = 1;
    9602 
    9603  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9604  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9605 
    9606  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9607  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9608  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9609 
    9610  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9611 
    9612  if(unusableSize > 0)
    9613  {
    9614  ++outInfo.unusedRangeCount;
    9615  outInfo.unusedBytes += unusableSize;
    9616  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9617  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9618  }
    9619 }
    9620 
    9621 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9622 {
    9623  const VkDeviceSize unusableSize = GetUnusableSize();
    9624 
    9625  inoutStats.size += GetSize();
    9626  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9627  inoutStats.allocationCount += m_AllocationCount;
    9628  inoutStats.unusedRangeCount += m_FreeCount;
    9629  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9630 
    9631  if(unusableSize > 0)
    9632  {
    9633  ++inoutStats.unusedRangeCount;
    9634  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9635  }
    9636 }
    9637 
    9638 #if VMA_STATS_STRING_ENABLED
    9639 
    9640 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9641 {
    9642  // TODO optimize
    9643  VmaStatInfo stat;
    9644  CalcAllocationStatInfo(stat);
    9645 
    9646  PrintDetailedMap_Begin(
    9647  json,
    9648  stat.unusedBytes,
    9649  stat.allocationCount,
    9650  stat.unusedRangeCount);
    9651 
    9652  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9653 
    9654  const VkDeviceSize unusableSize = GetUnusableSize();
    9655  if(unusableSize > 0)
    9656  {
    9657  PrintDetailedMap_UnusedRange(json,
    9658  m_UsableSize, // offset
    9659  unusableSize); // size
    9660  }
    9661 
    9662  PrintDetailedMap_End(json);
    9663 }
    9664 
    9665 #endif // #if VMA_STATS_STRING_ENABLED
    9666 
    9667 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9668  uint32_t currentFrameIndex,
    9669  uint32_t frameInUseCount,
    9670  VkDeviceSize bufferImageGranularity,
    9671  VkDeviceSize allocSize,
    9672  VkDeviceSize allocAlignment,
    9673  bool upperAddress,
    9674  VmaSuballocationType allocType,
    9675  bool canMakeOtherLost,
    9676  uint32_t strategy,
    9677  VmaAllocationRequest* pAllocationRequest)
    9678 {
    9679  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9680 
    9681  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9682  // Whenever it might be an OPTIMAL image...
    9683  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9684  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9685  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9686  {
    9687  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9688  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9689  }
    9690 
    9691  if(allocSize > m_UsableSize)
    9692  {
    9693  return false;
    9694  }
    9695 
    9696  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9697  for(uint32_t level = targetLevel + 1; level--; )
    9698  {
    9699  for(Node* freeNode = m_FreeList[level].front;
    9700  freeNode != VMA_NULL;
    9701  freeNode = freeNode->free.next)
    9702  {
    9703  if(freeNode->offset % allocAlignment == 0)
    9704  {
    9705  pAllocationRequest->offset = freeNode->offset;
    9706  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9707  pAllocationRequest->sumItemSize = 0;
    9708  pAllocationRequest->itemsToMakeLostCount = 0;
    9709  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9710  return true;
    9711  }
    9712  }
    9713  }
    9714 
    9715  return false;
    9716 }
    9717 
    9718 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9719  uint32_t currentFrameIndex,
    9720  uint32_t frameInUseCount,
    9721  VmaAllocationRequest* pAllocationRequest)
    9722 {
    9723  /*
    9724  Lost allocations are not supported in buddy allocator at the moment.
    9725  Support might be added in the future.
    9726  */
    9727  return pAllocationRequest->itemsToMakeLostCount == 0;
    9728 }
    9729 
    9730 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9731 {
    9732  /*
    9733  Lost allocations are not supported in buddy allocator at the moment.
    9734  Support might be added in the future.
    9735  */
    9736  return 0;
    9737 }
    9738 
    9739 void VmaBlockMetadata_Buddy::Alloc(
    9740  const VmaAllocationRequest& request,
    9741  VmaSuballocationType type,
    9742  VkDeviceSize allocSize,
    9743  bool upperAddress,
    9744  VmaAllocation hAllocation)
    9745 {
    9746  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9747  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9748 
    9749  Node* currNode = m_FreeList[currLevel].front;
    9750  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9751  while(currNode->offset != request.offset)
    9752  {
    9753  currNode = currNode->free.next;
    9754  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9755  }
    9756 
    9757  // Go down, splitting free nodes.
    9758  while(currLevel < targetLevel)
    9759  {
    9760  // currNode is already first free node at currLevel.
    9761  // Remove it from list of free nodes at this currLevel.
    9762  RemoveFromFreeList(currLevel, currNode);
    9763 
    9764  const uint32_t childrenLevel = currLevel + 1;
    9765 
    9766  // Create two free sub-nodes.
    9767  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9768  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9769 
    9770  leftChild->offset = currNode->offset;
    9771  leftChild->type = Node::TYPE_FREE;
    9772  leftChild->parent = currNode;
    9773  leftChild->buddy = rightChild;
    9774 
    9775  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9776  rightChild->type = Node::TYPE_FREE;
    9777  rightChild->parent = currNode;
    9778  rightChild->buddy = leftChild;
    9779 
    9780  // Convert current currNode to split type.
    9781  currNode->type = Node::TYPE_SPLIT;
    9782  currNode->split.leftChild = leftChild;
    9783 
    9784  // Add child nodes to free list. Order is important!
    9785  AddToFreeListFront(childrenLevel, rightChild);
    9786  AddToFreeListFront(childrenLevel, leftChild);
    9787 
    9788  ++m_FreeCount;
    9789  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9790  ++currLevel;
    9791  currNode = m_FreeList[currLevel].front;
    9792 
    9793  /*
    9794  We can be sure that currNode, as left child of node previously split,
    9795  also fullfills the alignment requirement.
    9796  */
    9797  }
    9798 
    9799  // Remove from free list.
    9800  VMA_ASSERT(currLevel == targetLevel &&
    9801  currNode != VMA_NULL &&
    9802  currNode->type == Node::TYPE_FREE);
    9803  RemoveFromFreeList(currLevel, currNode);
    9804 
    9805  // Convert to allocation node.
    9806  currNode->type = Node::TYPE_ALLOCATION;
    9807  currNode->allocation.alloc = hAllocation;
    9808 
    9809  ++m_AllocationCount;
    9810  --m_FreeCount;
    9811  m_SumFreeSize -= allocSize;
    9812 }
    9813 
    9814 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9815 {
    9816  if(node->type == Node::TYPE_SPLIT)
    9817  {
    9818  DeleteNode(node->split.leftChild->buddy);
    9819  DeleteNode(node->split.leftChild);
    9820  }
    9821 
    9822  vma_delete(GetAllocationCallbacks(), node);
    9823 }
    9824 
    9825 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9826 {
    9827  VMA_VALIDATE(level < m_LevelCount);
    9828  VMA_VALIDATE(curr->parent == parent);
    9829  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9830  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9831  switch(curr->type)
    9832  {
    9833  case Node::TYPE_FREE:
    9834  // curr->free.prev, next are validated separately.
    9835  ctx.calculatedSumFreeSize += levelNodeSize;
    9836  ++ctx.calculatedFreeCount;
    9837  break;
    9838  case Node::TYPE_ALLOCATION:
    9839  ++ctx.calculatedAllocationCount;
    9840  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9841  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9842  break;
    9843  case Node::TYPE_SPLIT:
    9844  {
    9845  const uint32_t childrenLevel = level + 1;
    9846  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9847  const Node* const leftChild = curr->split.leftChild;
    9848  VMA_VALIDATE(leftChild != VMA_NULL);
    9849  VMA_VALIDATE(leftChild->offset == curr->offset);
    9850  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9851  {
    9852  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9853  }
    9854  const Node* const rightChild = leftChild->buddy;
    9855  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9856  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9857  {
    9858  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9859  }
    9860  }
    9861  break;
    9862  default:
    9863  return false;
    9864  }
    9865 
    9866  return true;
    9867 }
    9868 
    9869 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9870 {
    9871  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9872  uint32_t level = 0;
    9873  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9874  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9875  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9876  {
    9877  ++level;
    9878  currLevelNodeSize = nextLevelNodeSize;
    9879  nextLevelNodeSize = currLevelNodeSize >> 1;
    9880  }
    9881  return level;
    9882 }
    9883 
    9884 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9885 {
    9886  // Find node and level.
    9887  Node* node = m_Root;
    9888  VkDeviceSize nodeOffset = 0;
    9889  uint32_t level = 0;
    9890  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9891  while(node->type == Node::TYPE_SPLIT)
    9892  {
    9893  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9894  if(offset < nodeOffset + nextLevelSize)
    9895  {
    9896  node = node->split.leftChild;
    9897  }
    9898  else
    9899  {
    9900  node = node->split.leftChild->buddy;
    9901  nodeOffset += nextLevelSize;
    9902  }
    9903  ++level;
    9904  levelNodeSize = nextLevelSize;
    9905  }
    9906 
    9907  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9908  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9909 
    9910  ++m_FreeCount;
    9911  --m_AllocationCount;
    9912  m_SumFreeSize += alloc->GetSize();
    9913 
    9914  node->type = Node::TYPE_FREE;
    9915 
    9916  // Join free nodes if possible.
    9917  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9918  {
    9919  RemoveFromFreeList(level, node->buddy);
    9920  Node* const parent = node->parent;
    9921 
    9922  vma_delete(GetAllocationCallbacks(), node->buddy);
    9923  vma_delete(GetAllocationCallbacks(), node);
    9924  parent->type = Node::TYPE_FREE;
    9925 
    9926  node = parent;
    9927  --level;
    9928  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9929  --m_FreeCount;
    9930  }
    9931 
    9932  AddToFreeListFront(level, node);
    9933 }
    9934 
    9935 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9936 {
    9937  switch(node->type)
    9938  {
    9939  case Node::TYPE_FREE:
    9940  ++outInfo.unusedRangeCount;
    9941  outInfo.unusedBytes += levelNodeSize;
    9942  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9943  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9944  break;
    9945  case Node::TYPE_ALLOCATION:
    9946  {
    9947  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9948  ++outInfo.allocationCount;
    9949  outInfo.usedBytes += allocSize;
    9950  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9951  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9952 
    9953  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9954  if(unusedRangeSize > 0)
    9955  {
    9956  ++outInfo.unusedRangeCount;
    9957  outInfo.unusedBytes += unusedRangeSize;
    9958  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9959  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9960  }
    9961  }
    9962  break;
    9963  case Node::TYPE_SPLIT:
    9964  {
    9965  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9966  const Node* const leftChild = node->split.leftChild;
    9967  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9968  const Node* const rightChild = leftChild->buddy;
    9969  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9970  }
    9971  break;
    9972  default:
    9973  VMA_ASSERT(0);
    9974  }
    9975 }
    9976 
    9977 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9978 {
    9979  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9980 
    9981  // List is empty.
    9982  Node* const frontNode = m_FreeList[level].front;
    9983  if(frontNode == VMA_NULL)
    9984  {
    9985  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9986  node->free.prev = node->free.next = VMA_NULL;
    9987  m_FreeList[level].front = m_FreeList[level].back = node;
    9988  }
    9989  else
    9990  {
    9991  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9992  node->free.prev = VMA_NULL;
    9993  node->free.next = frontNode;
    9994  frontNode->free.prev = node;
    9995  m_FreeList[level].front = node;
    9996  }
    9997 }
    9998 
    9999 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    10000 {
    10001  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    10002 
    10003  // It is at the front.
    10004  if(node->free.prev == VMA_NULL)
    10005  {
    10006  VMA_ASSERT(m_FreeList[level].front == node);
    10007  m_FreeList[level].front = node->free.next;
    10008  }
    10009  else
    10010  {
    10011  Node* const prevFreeNode = node->free.prev;
    10012  VMA_ASSERT(prevFreeNode->free.next == node);
    10013  prevFreeNode->free.next = node->free.next;
    10014  }
    10015 
    10016  // It is at the back.
    10017  if(node->free.next == VMA_NULL)
    10018  {
    10019  VMA_ASSERT(m_FreeList[level].back == node);
    10020  m_FreeList[level].back = node->free.prev;
    10021  }
    10022  else
    10023  {
    10024  Node* const nextFreeNode = node->free.next;
    10025  VMA_ASSERT(nextFreeNode->free.prev == node);
    10026  nextFreeNode->free.prev = node->free.prev;
    10027  }
    10028 }
    10029 
    10030 #if VMA_STATS_STRING_ENABLED
    10031 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10032 {
    10033  switch(node->type)
    10034  {
    10035  case Node::TYPE_FREE:
    10036  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10037  break;
    10038  case Node::TYPE_ALLOCATION:
    10039  {
    10040  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10041  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10042  if(allocSize < levelNodeSize)
    10043  {
    10044  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10045  }
    10046  }
    10047  break;
    10048  case Node::TYPE_SPLIT:
    10049  {
    10050  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10051  const Node* const leftChild = node->split.leftChild;
    10052  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10053  const Node* const rightChild = leftChild->buddy;
    10054  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10055  }
    10056  break;
    10057  default:
    10058  VMA_ASSERT(0);
    10059  }
    10060 }
    10061 #endif // #if VMA_STATS_STRING_ENABLED
    10062 
    10063 
    10065 // class VmaDeviceMemoryBlock
    10066 
    10067 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10068  m_pMetadata(VMA_NULL),
    10069  m_MemoryTypeIndex(UINT32_MAX),
    10070  m_Id(0),
    10071  m_hMemory(VK_NULL_HANDLE),
    10072  m_MapCount(0),
    10073  m_pMappedData(VMA_NULL)
    10074 {
    10075 }
    10076 
    10077 void VmaDeviceMemoryBlock::Init(
    10078  VmaAllocator hAllocator,
    10079  uint32_t newMemoryTypeIndex,
    10080  VkDeviceMemory newMemory,
    10081  VkDeviceSize newSize,
    10082  uint32_t id,
    10083  uint32_t algorithm)
    10084 {
    10085  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10086 
    10087  m_MemoryTypeIndex = newMemoryTypeIndex;
    10088  m_Id = id;
    10089  m_hMemory = newMemory;
    10090 
    10091  switch(algorithm)
    10092  {
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10095  break;
    10097  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10098  break;
    10099  default:
    10100  VMA_ASSERT(0);
    10101  // Fall-through.
    10102  case 0:
    10103  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10104  }
    10105  m_pMetadata->Init(newSize);
    10106 }
    10107 
    10108 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10109 {
    10110  // This is the most important assert in the entire library.
    10111  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10112  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10113 
    10114  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10115  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10116  m_hMemory = VK_NULL_HANDLE;
    10117 
    10118  vma_delete(allocator, m_pMetadata);
    10119  m_pMetadata = VMA_NULL;
    10120 }
    10121 
    10122 bool VmaDeviceMemoryBlock::Validate() const
    10123 {
    10124  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10125  (m_pMetadata->GetSize() != 0));
    10126 
    10127  return m_pMetadata->Validate();
    10128 }
    10129 
    10130 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10131 {
    10132  void* pData = nullptr;
    10133  VkResult res = Map(hAllocator, 1, &pData);
    10134  if(res != VK_SUCCESS)
    10135  {
    10136  return res;
    10137  }
    10138 
    10139  res = m_pMetadata->CheckCorruption(pData);
    10140 
    10141  Unmap(hAllocator, 1);
    10142 
    10143  return res;
    10144 }
    10145 
    10146 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10147 {
    10148  if(count == 0)
    10149  {
    10150  return VK_SUCCESS;
    10151  }
    10152 
    10153  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10154  if(m_MapCount != 0)
    10155  {
    10156  m_MapCount += count;
    10157  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10158  if(ppData != VMA_NULL)
    10159  {
    10160  *ppData = m_pMappedData;
    10161  }
    10162  return VK_SUCCESS;
    10163  }
    10164  else
    10165  {
    10166  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10167  hAllocator->m_hDevice,
    10168  m_hMemory,
    10169  0, // offset
    10170  VK_WHOLE_SIZE,
    10171  0, // flags
    10172  &m_pMappedData);
    10173  if(result == VK_SUCCESS)
    10174  {
    10175  if(ppData != VMA_NULL)
    10176  {
    10177  *ppData = m_pMappedData;
    10178  }
    10179  m_MapCount = count;
    10180  }
    10181  return result;
    10182  }
    10183 }
    10184 
    10185 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10186 {
    10187  if(count == 0)
    10188  {
    10189  return;
    10190  }
    10191 
    10192  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10193  if(m_MapCount >= count)
    10194  {
    10195  m_MapCount -= count;
    10196  if(m_MapCount == 0)
    10197  {
    10198  m_pMappedData = VMA_NULL;
    10199  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10200  }
    10201  }
    10202  else
    10203  {
    10204  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10205  }
    10206 }
    10207 
    10208 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10209 {
    10210  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10211  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10212 
    10213  void* pData;
    10214  VkResult res = Map(hAllocator, 1, &pData);
    10215  if(res != VK_SUCCESS)
    10216  {
    10217  return res;
    10218  }
    10219 
    10220  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10221  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10222 
    10223  Unmap(hAllocator, 1);
    10224 
    10225  return VK_SUCCESS;
    10226 }
    10227 
    10228 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10229 {
    10230  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10231  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10232 
    10233  void* pData;
    10234  VkResult res = Map(hAllocator, 1, &pData);
    10235  if(res != VK_SUCCESS)
    10236  {
    10237  return res;
    10238  }
    10239 
    10240  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10241  {
    10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10243  }
    10244  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10245  {
    10246  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10247  }
    10248 
    10249  Unmap(hAllocator, 1);
    10250 
    10251  return VK_SUCCESS;
    10252 }
    10253 
    10254 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10255  const VmaAllocator hAllocator,
    10256  const VmaAllocation hAllocation,
    10257  VkBuffer hBuffer)
    10258 {
    10259  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10260  hAllocation->GetBlock() == this);
    10261  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10262  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10263  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10264  hAllocator->m_hDevice,
    10265  hBuffer,
    10266  m_hMemory,
    10267  hAllocation->GetOffset());
    10268 }
    10269 
    10270 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10271  const VmaAllocator hAllocator,
    10272  const VmaAllocation hAllocation,
    10273  VkImage hImage)
    10274 {
    10275  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10276  hAllocation->GetBlock() == this);
    10277  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10278  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10279  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10280  hAllocator->m_hDevice,
    10281  hImage,
    10282  m_hMemory,
    10283  hAllocation->GetOffset());
    10284 }
    10285 
    10286 static void InitStatInfo(VmaStatInfo& outInfo)
    10287 {
    10288  memset(&outInfo, 0, sizeof(outInfo));
    10289  outInfo.allocationSizeMin = UINT64_MAX;
    10290  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10291 }
    10292 
    10293 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10294 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10295 {
    10296  inoutInfo.blockCount += srcInfo.blockCount;
    10297  inoutInfo.allocationCount += srcInfo.allocationCount;
    10298  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10299  inoutInfo.usedBytes += srcInfo.usedBytes;
    10300  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10301  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10302  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10303  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10304  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10305 }
    10306 
    10307 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10308 {
    10309  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10310  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10311  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10312  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10313 }
    10314 
    10315 VmaPool_T::VmaPool_T(
    10316  VmaAllocator hAllocator,
    10317  const VmaPoolCreateInfo& createInfo,
    10318  VkDeviceSize preferredBlockSize) :
    10319  m_BlockVector(
    10320  hAllocator,
    10321  createInfo.memoryTypeIndex,
    10322  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10323  createInfo.minBlockCount,
    10324  createInfo.maxBlockCount,
    10325  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10326  createInfo.frameInUseCount,
    10327  true, // isCustomPool
    10328  createInfo.blockSize != 0, // explicitBlockSize
    10329  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10330  m_Id(0)
    10331 {
    10332 }
    10333 
    10334 VmaPool_T::~VmaPool_T()
    10335 {
    10336 }
    10337 
    10338 #if VMA_STATS_STRING_ENABLED
    10339 
    10340 #endif // #if VMA_STATS_STRING_ENABLED
    10341 
    10342 VmaBlockVector::VmaBlockVector(
    10343  VmaAllocator hAllocator,
    10344  uint32_t memoryTypeIndex,
    10345  VkDeviceSize preferredBlockSize,
    10346  size_t minBlockCount,
    10347  size_t maxBlockCount,
    10348  VkDeviceSize bufferImageGranularity,
    10349  uint32_t frameInUseCount,
    10350  bool isCustomPool,
    10351  bool explicitBlockSize,
    10352  uint32_t algorithm) :
    10353  m_hAllocator(hAllocator),
    10354  m_MemoryTypeIndex(memoryTypeIndex),
    10355  m_PreferredBlockSize(preferredBlockSize),
    10356  m_MinBlockCount(minBlockCount),
    10357  m_MaxBlockCount(maxBlockCount),
    10358  m_BufferImageGranularity(bufferImageGranularity),
    10359  m_FrameInUseCount(frameInUseCount),
    10360  m_IsCustomPool(isCustomPool),
    10361  m_ExplicitBlockSize(explicitBlockSize),
    10362  m_Algorithm(algorithm),
    10363  m_HasEmptyBlock(false),
    10364  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10365  m_pDefragmentator(VMA_NULL),
    10366  m_NextBlockId(0)
    10367 {
    10368 }
    10369 
    10370 VmaBlockVector::~VmaBlockVector()
    10371 {
    10372  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10373 
    10374  for(size_t i = m_Blocks.size(); i--; )
    10375  {
    10376  m_Blocks[i]->Destroy(m_hAllocator);
    10377  vma_delete(m_hAllocator, m_Blocks[i]);
    10378  }
    10379 }
    10380 
    10381 VkResult VmaBlockVector::CreateMinBlocks()
    10382 {
    10383  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10384  {
    10385  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10386  if(res != VK_SUCCESS)
    10387  {
    10388  return res;
    10389  }
    10390  }
    10391  return VK_SUCCESS;
    10392 }
    10393 
    10394 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10395 {
    10396  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10397 
    10398  const size_t blockCount = m_Blocks.size();
    10399 
    10400  pStats->size = 0;
    10401  pStats->unusedSize = 0;
    10402  pStats->allocationCount = 0;
    10403  pStats->unusedRangeCount = 0;
    10404  pStats->unusedRangeSizeMax = 0;
    10405  pStats->blockCount = blockCount;
    10406 
    10407  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10408  {
    10409  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10410  VMA_ASSERT(pBlock);
    10411  VMA_HEAVY_ASSERT(pBlock->Validate());
    10412  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10413  }
    10414 }
    10415 
    10416 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10417 {
    10418  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10419  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10420  (VMA_DEBUG_MARGIN > 0) &&
    10421  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10422 }
    10423 
    10424 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10425 
    10426 VkResult VmaBlockVector::Allocate(
    10427  VmaPool hCurrentPool,
    10428  uint32_t currentFrameIndex,
    10429  VkDeviceSize size,
    10430  VkDeviceSize alignment,
    10431  const VmaAllocationCreateInfo& createInfo,
    10432  VmaSuballocationType suballocType,
    10433  VmaAllocation* pAllocation)
    10434 {
    10435  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10436  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10437  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10438  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10439  const bool canCreateNewBlock =
    10440  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10441  (m_Blocks.size() < m_MaxBlockCount);
    10442  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10443 
    10444  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10445  // Which in turn is available only when maxBlockCount = 1.
    10446  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10447  {
    10448  canMakeOtherLost = false;
    10449  }
    10450 
    10451  // Upper address can only be used with linear allocator and within single memory block.
    10452  if(isUpperAddress &&
    10453  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10454  {
    10455  return VK_ERROR_FEATURE_NOT_PRESENT;
    10456  }
    10457 
    10458  // Validate strategy.
    10459  switch(strategy)
    10460  {
    10461  case 0:
    10463  break;
    10467  break;
    10468  default:
    10469  return VK_ERROR_FEATURE_NOT_PRESENT;
    10470  }
    10471 
    10472  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10473  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10474  {
    10475  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10476  }
    10477 
    10478  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10479 
    10480  /*
    10481  Under certain condition, this whole section can be skipped for optimization, so
    10482  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10483  e.g. for custom pools with linear algorithm.
    10484  */
    10485  if(!canMakeOtherLost || canCreateNewBlock)
    10486  {
    10487  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10488  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10490 
    10491  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10492  {
    10493  // Use only last block.
    10494  if(!m_Blocks.empty())
    10495  {
    10496  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10497  VMA_ASSERT(pCurrBlock);
    10498  VkResult res = AllocateFromBlock(
    10499  pCurrBlock,
    10500  hCurrentPool,
    10501  currentFrameIndex,
    10502  size,
    10503  alignment,
    10504  allocFlagsCopy,
    10505  createInfo.pUserData,
    10506  suballocType,
    10507  strategy,
    10508  pAllocation);
    10509  if(res == VK_SUCCESS)
    10510  {
    10511  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10512  return VK_SUCCESS;
    10513  }
    10514  }
    10515  }
    10516  else
    10517  {
    10519  {
    10520  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10521  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10522  {
    10523  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10524  VMA_ASSERT(pCurrBlock);
    10525  VkResult res = AllocateFromBlock(
    10526  pCurrBlock,
    10527  hCurrentPool,
    10528  currentFrameIndex,
    10529  size,
    10530  alignment,
    10531  allocFlagsCopy,
    10532  createInfo.pUserData,
    10533  suballocType,
    10534  strategy,
    10535  pAllocation);
    10536  if(res == VK_SUCCESS)
    10537  {
    10538  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10539  return VK_SUCCESS;
    10540  }
    10541  }
    10542  }
    10543  else // WORST_FIT, FIRST_FIT
    10544  {
    10545  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10546  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10547  {
    10548  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10549  VMA_ASSERT(pCurrBlock);
    10550  VkResult res = AllocateFromBlock(
    10551  pCurrBlock,
    10552  hCurrentPool,
    10553  currentFrameIndex,
    10554  size,
    10555  alignment,
    10556  allocFlagsCopy,
    10557  createInfo.pUserData,
    10558  suballocType,
    10559  strategy,
    10560  pAllocation);
    10561  if(res == VK_SUCCESS)
    10562  {
    10563  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10564  return VK_SUCCESS;
    10565  }
    10566  }
    10567  }
    10568  }
    10569 
    10570  // 2. Try to create new block.
    10571  if(canCreateNewBlock)
    10572  {
    10573  // Calculate optimal size for new block.
    10574  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10575  uint32_t newBlockSizeShift = 0;
    10576  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10577 
    10578  if(!m_ExplicitBlockSize)
    10579  {
    10580  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10581  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10582  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10583  {
    10584  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10585  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10586  {
    10587  newBlockSize = smallerNewBlockSize;
    10588  ++newBlockSizeShift;
    10589  }
    10590  else
    10591  {
    10592  break;
    10593  }
    10594  }
    10595  }
    10596 
    10597  size_t newBlockIndex = 0;
    10598  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10599  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10600  if(!m_ExplicitBlockSize)
    10601  {
    10602  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10603  {
    10604  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10605  if(smallerNewBlockSize >= size)
    10606  {
    10607  newBlockSize = smallerNewBlockSize;
    10608  ++newBlockSizeShift;
    10609  res = CreateBlock(newBlockSize, &newBlockIndex);
    10610  }
    10611  else
    10612  {
    10613  break;
    10614  }
    10615  }
    10616  }
    10617 
    10618  if(res == VK_SUCCESS)
    10619  {
    10620  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10621  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10622 
    10623  res = AllocateFromBlock(
    10624  pBlock,
    10625  hCurrentPool,
    10626  currentFrameIndex,
    10627  size,
    10628  alignment,
    10629  allocFlagsCopy,
    10630  createInfo.pUserData,
    10631  suballocType,
    10632  strategy,
    10633  pAllocation);
    10634  if(res == VK_SUCCESS)
    10635  {
    10636  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10637  return VK_SUCCESS;
    10638  }
    10639  else
    10640  {
    10641  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10642  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10643  }
    10644  }
    10645  }
    10646  }
    10647 
    10648  // 3. Try to allocate from existing blocks with making other allocations lost.
    10649  if(canMakeOtherLost)
    10650  {
    10651  uint32_t tryIndex = 0;
    10652  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10653  {
    10654  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10655  VmaAllocationRequest bestRequest = {};
    10656  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10657 
    10658  // 1. Search existing allocations.
    10660  {
    10661  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10662  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10663  {
    10664  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10665  VMA_ASSERT(pCurrBlock);
    10666  VmaAllocationRequest currRequest = {};
    10667  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10668  currentFrameIndex,
    10669  m_FrameInUseCount,
    10670  m_BufferImageGranularity,
    10671  size,
    10672  alignment,
    10673  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10674  suballocType,
    10675  canMakeOtherLost,
    10676  strategy,
    10677  &currRequest))
    10678  {
    10679  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10680  if(pBestRequestBlock == VMA_NULL ||
    10681  currRequestCost < bestRequestCost)
    10682  {
    10683  pBestRequestBlock = pCurrBlock;
    10684  bestRequest = currRequest;
    10685  bestRequestCost = currRequestCost;
    10686 
    10687  if(bestRequestCost == 0)
    10688  {
    10689  break;
    10690  }
    10691  }
    10692  }
    10693  }
    10694  }
    10695  else // WORST_FIT, FIRST_FIT
    10696  {
    10697  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10698  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10699  {
    10700  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10701  VMA_ASSERT(pCurrBlock);
    10702  VmaAllocationRequest currRequest = {};
    10703  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10704  currentFrameIndex,
    10705  m_FrameInUseCount,
    10706  m_BufferImageGranularity,
    10707  size,
    10708  alignment,
    10709  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10710  suballocType,
    10711  canMakeOtherLost,
    10712  strategy,
    10713  &currRequest))
    10714  {
    10715  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10716  if(pBestRequestBlock == VMA_NULL ||
    10717  currRequestCost < bestRequestCost ||
    10719  {
    10720  pBestRequestBlock = pCurrBlock;
    10721  bestRequest = currRequest;
    10722  bestRequestCost = currRequestCost;
    10723 
    10724  if(bestRequestCost == 0 ||
    10726  {
    10727  break;
    10728  }
    10729  }
    10730  }
    10731  }
    10732  }
    10733 
    10734  if(pBestRequestBlock != VMA_NULL)
    10735  {
    10736  if(mapped)
    10737  {
    10738  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10739  if(res != VK_SUCCESS)
    10740  {
    10741  return res;
    10742  }
    10743  }
    10744 
    10745  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10746  currentFrameIndex,
    10747  m_FrameInUseCount,
    10748  &bestRequest))
    10749  {
    10750  // We no longer have an empty Allocation.
    10751  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10752  {
    10753  m_HasEmptyBlock = false;
    10754  }
    10755  // Allocate from this pBlock.
    10756  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10757  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10758  (*pAllocation)->InitBlockAllocation(
    10759  hCurrentPool,
    10760  pBestRequestBlock,
    10761  bestRequest.offset,
    10762  alignment,
    10763  size,
    10764  suballocType,
    10765  mapped,
    10766  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10767  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10768  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10769  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10770  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10771  {
    10772  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10773  }
    10774  if(IsCorruptionDetectionEnabled())
    10775  {
    10776  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10777  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10778  }
    10779  return VK_SUCCESS;
    10780  }
    10781  // else: Some allocations must have been touched while we are here. Next try.
    10782  }
    10783  else
    10784  {
    10785  // Could not find place in any of the blocks - break outer loop.
    10786  break;
    10787  }
    10788  }
    10789  /* Maximum number of tries exceeded - a very unlike event when many other
    10790  threads are simultaneously touching allocations making it impossible to make
    10791  lost at the same time as we try to allocate. */
    10792  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10793  {
    10794  return VK_ERROR_TOO_MANY_OBJECTS;
    10795  }
    10796  }
    10797 
    10798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10799 }
    10800 
    10801 void VmaBlockVector::Free(
    10802  VmaAllocation hAllocation)
    10803 {
    10804  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10805 
    10806  // Scope for lock.
    10807  {
    10808  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10809 
    10810  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10811 
    10812  if(IsCorruptionDetectionEnabled())
    10813  {
    10814  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10815  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10816  }
    10817 
    10818  if(hAllocation->IsPersistentMap())
    10819  {
    10820  pBlock->Unmap(m_hAllocator, 1);
    10821  }
    10822 
    10823  pBlock->m_pMetadata->Free(hAllocation);
    10824  VMA_HEAVY_ASSERT(pBlock->Validate());
    10825 
    10826  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10827 
    10828  // pBlock became empty after this deallocation.
    10829  if(pBlock->m_pMetadata->IsEmpty())
    10830  {
    10831  // Already has empty Allocation. We don't want to have two, so delete this one.
    10832  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10833  {
    10834  pBlockToDelete = pBlock;
    10835  Remove(pBlock);
    10836  }
    10837  // We now have first empty block.
    10838  else
    10839  {
    10840  m_HasEmptyBlock = true;
    10841  }
    10842  }
    10843  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10844  // (This is optional, heuristics.)
    10845  else if(m_HasEmptyBlock)
    10846  {
    10847  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10848  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10849  {
    10850  pBlockToDelete = pLastBlock;
    10851  m_Blocks.pop_back();
    10852  m_HasEmptyBlock = false;
    10853  }
    10854  }
    10855 
    10856  IncrementallySortBlocks();
    10857  }
    10858 
    10859  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10860  // lock, for performance reason.
    10861  if(pBlockToDelete != VMA_NULL)
    10862  {
    10863  VMA_DEBUG_LOG(" Deleted empty allocation");
    10864  pBlockToDelete->Destroy(m_hAllocator);
    10865  vma_delete(m_hAllocator, pBlockToDelete);
    10866  }
    10867 }
    10868 
    10869 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10870 {
    10871  VkDeviceSize result = 0;
    10872  for(size_t i = m_Blocks.size(); i--; )
    10873  {
    10874  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10875  if(result >= m_PreferredBlockSize)
    10876  {
    10877  break;
    10878  }
    10879  }
    10880  return result;
    10881 }
    10882 
    10883 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10884 {
    10885  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10886  {
    10887  if(m_Blocks[blockIndex] == pBlock)
    10888  {
    10889  VmaVectorRemove(m_Blocks, blockIndex);
    10890  return;
    10891  }
    10892  }
    10893  VMA_ASSERT(0);
    10894 }
    10895 
    10896 void VmaBlockVector::IncrementallySortBlocks()
    10897 {
    10898  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10899  {
    10900  // Bubble sort only until first swap.
    10901  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10902  {
    10903  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10904  {
    10905  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10906  return;
    10907  }
    10908  }
    10909  }
    10910 }
    10911 
    10912 VkResult VmaBlockVector::AllocateFromBlock(
    10913  VmaDeviceMemoryBlock* pBlock,
    10914  VmaPool hCurrentPool,
    10915  uint32_t currentFrameIndex,
    10916  VkDeviceSize size,
    10917  VkDeviceSize alignment,
    10918  VmaAllocationCreateFlags allocFlags,
    10919  void* pUserData,
    10920  VmaSuballocationType suballocType,
    10921  uint32_t strategy,
    10922  VmaAllocation* pAllocation)
    10923 {
    10924  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10925  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10926  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10927  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10928 
    10929  VmaAllocationRequest currRequest = {};
    10930  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10931  currentFrameIndex,
    10932  m_FrameInUseCount,
    10933  m_BufferImageGranularity,
    10934  size,
    10935  alignment,
    10936  isUpperAddress,
    10937  suballocType,
    10938  false, // canMakeOtherLost
    10939  strategy,
    10940  &currRequest))
    10941  {
    10942  // Allocate from pCurrBlock.
    10943  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10944 
    10945  if(mapped)
    10946  {
    10947  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10948  if(res != VK_SUCCESS)
    10949  {
    10950  return res;
    10951  }
    10952  }
    10953 
    10954  // We no longer have an empty Allocation.
    10955  if(pBlock->m_pMetadata->IsEmpty())
    10956  {
    10957  m_HasEmptyBlock = false;
    10958  }
    10959 
    10960  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10961  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10962  (*pAllocation)->InitBlockAllocation(
    10963  hCurrentPool,
    10964  pBlock,
    10965  currRequest.offset,
    10966  alignment,
    10967  size,
    10968  suballocType,
    10969  mapped,
    10970  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10971  VMA_HEAVY_ASSERT(pBlock->Validate());
    10972  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10973  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10974  {
    10975  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10976  }
    10977  if(IsCorruptionDetectionEnabled())
    10978  {
    10979  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10980  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10981  }
    10982  return VK_SUCCESS;
    10983  }
    10984  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10985 }
    10986 
    10987 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10988 {
    10989  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10990  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10991  allocInfo.allocationSize = blockSize;
    10992  VkDeviceMemory mem = VK_NULL_HANDLE;
    10993  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10994  if(res < 0)
    10995  {
    10996  return res;
    10997  }
    10998 
    10999  // New VkDeviceMemory successfully created.
    11000 
    11001  // Create new Allocation for it.
    11002  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11003  pBlock->Init(
    11004  m_hAllocator,
    11005  m_MemoryTypeIndex,
    11006  mem,
    11007  allocInfo.allocationSize,
    11008  m_NextBlockId++,
    11009  m_Algorithm);
    11010 
    11011  m_Blocks.push_back(pBlock);
    11012  if(pNewBlockIndex != VMA_NULL)
    11013  {
    11014  *pNewBlockIndex = m_Blocks.size() - 1;
    11015  }
    11016 
    11017  return VK_SUCCESS;
    11018 }
    11019 
    11020 #if VMA_STATS_STRING_ENABLED
    11021 
    11022 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11023 {
    11024  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11025 
    11026  json.BeginObject();
    11027 
    11028  if(m_IsCustomPool)
    11029  {
    11030  json.WriteString("MemoryTypeIndex");
    11031  json.WriteNumber(m_MemoryTypeIndex);
    11032 
    11033  json.WriteString("BlockSize");
    11034  json.WriteNumber(m_PreferredBlockSize);
    11035 
    11036  json.WriteString("BlockCount");
    11037  json.BeginObject(true);
    11038  if(m_MinBlockCount > 0)
    11039  {
    11040  json.WriteString("Min");
    11041  json.WriteNumber((uint64_t)m_MinBlockCount);
    11042  }
    11043  if(m_MaxBlockCount < SIZE_MAX)
    11044  {
    11045  json.WriteString("Max");
    11046  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11047  }
    11048  json.WriteString("Cur");
    11049  json.WriteNumber((uint64_t)m_Blocks.size());
    11050  json.EndObject();
    11051 
    11052  if(m_FrameInUseCount > 0)
    11053  {
    11054  json.WriteString("FrameInUseCount");
    11055  json.WriteNumber(m_FrameInUseCount);
    11056  }
    11057 
    11058  if(m_Algorithm != 0)
    11059  {
    11060  json.WriteString("Algorithm");
    11061  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11062  }
    11063  }
    11064  else
    11065  {
    11066  json.WriteString("PreferredBlockSize");
    11067  json.WriteNumber(m_PreferredBlockSize);
    11068  }
    11069 
    11070  json.WriteString("Blocks");
    11071  json.BeginObject();
    11072  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11073  {
    11074  json.BeginString();
    11075  json.ContinueString(m_Blocks[i]->GetId());
    11076  json.EndString();
    11077 
    11078  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11079  }
    11080  json.EndObject();
    11081 
    11082  json.EndObject();
    11083 }
    11084 
    11085 #endif // #if VMA_STATS_STRING_ENABLED
    11086 
    11087 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11088  VmaAllocator hAllocator,
    11089  uint32_t currentFrameIndex)
    11090 {
    11091  if(m_pDefragmentator == VMA_NULL)
    11092  {
    11093  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11094  hAllocator,
    11095  this,
    11096  currentFrameIndex);
    11097  }
    11098 
    11099  return m_pDefragmentator;
    11100 }
    11101 
    11102 VkResult VmaBlockVector::Defragment(
    11103  VmaDefragmentationStats* pDefragmentationStats,
    11104  VkDeviceSize& maxBytesToMove,
    11105  uint32_t& maxAllocationsToMove)
    11106 {
    11107  if(m_pDefragmentator == VMA_NULL)
    11108  {
    11109  return VK_SUCCESS;
    11110  }
    11111 
    11112  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11113 
    11114  // Defragment.
    11115  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11116 
    11117  // Accumulate statistics.
    11118  if(pDefragmentationStats != VMA_NULL)
    11119  {
    11120  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11121  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11122  pDefragmentationStats->bytesMoved += bytesMoved;
    11123  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11124  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11125  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11126  maxBytesToMove -= bytesMoved;
    11127  maxAllocationsToMove -= allocationsMoved;
    11128  }
    11129 
    11130  // Free empty blocks.
    11131  m_HasEmptyBlock = false;
    11132  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11133  {
    11134  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11135  if(pBlock->m_pMetadata->IsEmpty())
    11136  {
    11137  if(m_Blocks.size() > m_MinBlockCount)
    11138  {
    11139  if(pDefragmentationStats != VMA_NULL)
    11140  {
    11141  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11142  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11143  }
    11144 
    11145  VmaVectorRemove(m_Blocks, blockIndex);
    11146  pBlock->Destroy(m_hAllocator);
    11147  vma_delete(m_hAllocator, pBlock);
    11148  }
    11149  else
    11150  {
    11151  m_HasEmptyBlock = true;
    11152  }
    11153  }
    11154  }
    11155 
    11156  return result;
    11157 }
    11158 
    11159 void VmaBlockVector::DestroyDefragmentator()
    11160 {
    11161  if(m_pDefragmentator != VMA_NULL)
    11162  {
    11163  vma_delete(m_hAllocator, m_pDefragmentator);
    11164  m_pDefragmentator = VMA_NULL;
    11165  }
    11166 }
    11167 
    11168 void VmaBlockVector::MakePoolAllocationsLost(
    11169  uint32_t currentFrameIndex,
    11170  size_t* pLostAllocationCount)
    11171 {
    11172  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11173  size_t lostAllocationCount = 0;
    11174  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11175  {
    11176  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11177  VMA_ASSERT(pBlock);
    11178  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11179  }
    11180  if(pLostAllocationCount != VMA_NULL)
    11181  {
    11182  *pLostAllocationCount = lostAllocationCount;
    11183  }
    11184 }
    11185 
    11186 VkResult VmaBlockVector::CheckCorruption()
    11187 {
    11188  if(!IsCorruptionDetectionEnabled())
    11189  {
    11190  return VK_ERROR_FEATURE_NOT_PRESENT;
    11191  }
    11192 
    11193  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11194  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11195  {
    11196  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11197  VMA_ASSERT(pBlock);
    11198  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11199  if(res != VK_SUCCESS)
    11200  {
    11201  return res;
    11202  }
    11203  }
    11204  return VK_SUCCESS;
    11205 }
    11206 
    11207 void VmaBlockVector::AddStats(VmaStats* pStats)
    11208 {
    11209  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11210  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11211 
    11212  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11213 
    11214  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11215  {
    11216  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11217  VMA_ASSERT(pBlock);
    11218  VMA_HEAVY_ASSERT(pBlock->Validate());
    11219  VmaStatInfo allocationStatInfo;
    11220  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11221  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11222  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11223  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11224  }
    11225 }
    11226 
    11228 // VmaDefragmentator members definition
    11229 
    11230 VmaDefragmentator::VmaDefragmentator(
    11231  VmaAllocator hAllocator,
    11232  VmaBlockVector* pBlockVector,
    11233  uint32_t currentFrameIndex) :
    11234  m_hAllocator(hAllocator),
    11235  m_pBlockVector(pBlockVector),
    11236  m_CurrentFrameIndex(currentFrameIndex),
    11237  m_BytesMoved(0),
    11238  m_AllocationsMoved(0),
    11239  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11240  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11241 {
    11242  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11243 }
    11244 
    11245 VmaDefragmentator::~VmaDefragmentator()
    11246 {
    11247  for(size_t i = m_Blocks.size(); i--; )
    11248  {
    11249  vma_delete(m_hAllocator, m_Blocks[i]);
    11250  }
    11251 }
    11252 
    11253 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11254 {
    11255  AllocationInfo allocInfo;
    11256  allocInfo.m_hAllocation = hAlloc;
    11257  allocInfo.m_pChanged = pChanged;
    11258  m_Allocations.push_back(allocInfo);
    11259 }
    11260 
    11261 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11262 {
    11263  // It has already been mapped for defragmentation.
    11264  if(m_pMappedDataForDefragmentation)
    11265  {
    11266  *ppMappedData = m_pMappedDataForDefragmentation;
    11267  return VK_SUCCESS;
    11268  }
    11269 
    11270  // It is originally mapped.
    11271  if(m_pBlock->GetMappedData())
    11272  {
    11273  *ppMappedData = m_pBlock->GetMappedData();
    11274  return VK_SUCCESS;
    11275  }
    11276 
    11277  // Map on first usage.
    11278  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11279  *ppMappedData = m_pMappedDataForDefragmentation;
    11280  return res;
    11281 }
    11282 
    11283 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11284 {
    11285  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11286  {
    11287  m_pBlock->Unmap(hAllocator, 1);
    11288  }
    11289 }
    11290 
    11291 VkResult VmaDefragmentator::DefragmentRound(
    11292  VkDeviceSize maxBytesToMove,
    11293  uint32_t maxAllocationsToMove)
    11294 {
    11295  if(m_Blocks.empty())
    11296  {
    11297  return VK_SUCCESS;
    11298  }
    11299 
    11300  size_t srcBlockIndex = m_Blocks.size() - 1;
    11301  size_t srcAllocIndex = SIZE_MAX;
    11302  for(;;)
    11303  {
    11304  // 1. Find next allocation to move.
    11305  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11306  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11307  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11308  {
    11309  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11310  {
    11311  // Finished: no more allocations to process.
    11312  if(srcBlockIndex == 0)
    11313  {
    11314  return VK_SUCCESS;
    11315  }
    11316  else
    11317  {
    11318  --srcBlockIndex;
    11319  srcAllocIndex = SIZE_MAX;
    11320  }
    11321  }
    11322  else
    11323  {
    11324  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11325  }
    11326  }
    11327 
    11328  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11329  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11330 
    11331  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11332  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11333  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11334  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11335 
    11336  // 2. Try to find new place for this allocation in preceding or current block.
    11337  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11338  {
    11339  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11340  VmaAllocationRequest dstAllocRequest;
    11341  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11342  m_CurrentFrameIndex,
    11343  m_pBlockVector->GetFrameInUseCount(),
    11344  m_pBlockVector->GetBufferImageGranularity(),
    11345  size,
    11346  alignment,
    11347  false, // upperAddress
    11348  suballocType,
    11349  false, // canMakeOtherLost
    11351  &dstAllocRequest) &&
    11352  MoveMakesSense(
    11353  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11354  {
    11355  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11356 
    11357  // Reached limit on number of allocations or bytes to move.
    11358  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11359  (m_BytesMoved + size > maxBytesToMove))
    11360  {
    11361  return VK_INCOMPLETE;
    11362  }
    11363 
    11364  void* pDstMappedData = VMA_NULL;
    11365  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11366  if(res != VK_SUCCESS)
    11367  {
    11368  return res;
    11369  }
    11370 
    11371  void* pSrcMappedData = VMA_NULL;
    11372  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11373  if(res != VK_SUCCESS)
    11374  {
    11375  return res;
    11376  }
    11377 
    11378  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11379  memcpy(
    11380  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11381  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11382  static_cast<size_t>(size));
    11383 
    11384  if(VMA_DEBUG_MARGIN > 0)
    11385  {
    11386  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11387  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11388  }
    11389 
    11390  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11391  dstAllocRequest,
    11392  suballocType,
    11393  size,
    11394  false, // upperAddress
    11395  allocInfo.m_hAllocation);
    11396  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11397 
    11398  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11399 
    11400  if(allocInfo.m_pChanged != VMA_NULL)
    11401  {
    11402  *allocInfo.m_pChanged = VK_TRUE;
    11403  }
    11404 
    11405  ++m_AllocationsMoved;
    11406  m_BytesMoved += size;
    11407 
    11408  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11409 
    11410  break;
    11411  }
    11412  }
    11413 
    11414  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11415 
    11416  if(srcAllocIndex > 0)
    11417  {
    11418  --srcAllocIndex;
    11419  }
    11420  else
    11421  {
    11422  if(srcBlockIndex > 0)
    11423  {
    11424  --srcBlockIndex;
    11425  srcAllocIndex = SIZE_MAX;
    11426  }
    11427  else
    11428  {
    11429  return VK_SUCCESS;
    11430  }
    11431  }
    11432  }
    11433 }
    11434 
    11435 VkResult VmaDefragmentator::Defragment(
    11436  VkDeviceSize maxBytesToMove,
    11437  uint32_t maxAllocationsToMove)
    11438 {
    11439  if(m_Allocations.empty())
    11440  {
    11441  return VK_SUCCESS;
    11442  }
    11443 
    11444  // Create block info for each block.
    11445  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11446  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11447  {
    11448  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11449  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11450  m_Blocks.push_back(pBlockInfo);
    11451  }
    11452 
    11453  // Sort them by m_pBlock pointer value.
    11454  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11455 
    11456  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11457  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11458  {
    11459  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11460  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11461  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11462  {
    11463  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11464  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11465  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11466  {
    11467  (*it)->m_Allocations.push_back(allocInfo);
    11468  }
    11469  else
    11470  {
    11471  VMA_ASSERT(0);
    11472  }
    11473  }
    11474  }
    11475  m_Allocations.clear();
    11476 
    11477  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11478  {
    11479  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11480  pBlockInfo->CalcHasNonMovableAllocations();
    11481  pBlockInfo->SortAllocationsBySizeDescecnding();
    11482  }
    11483 
    11484  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11485  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11486 
    11487  // Execute defragmentation rounds (the main part).
    11488  VkResult result = VK_SUCCESS;
    11489  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11490  {
    11491  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11492  }
    11493 
    11494  // Unmap blocks that were mapped for defragmentation.
    11495  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11496  {
    11497  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11498  }
    11499 
    11500  return result;
    11501 }
    11502 
    11503 bool VmaDefragmentator::MoveMakesSense(
    11504  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11505  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11506 {
    11507  if(dstBlockIndex < srcBlockIndex)
    11508  {
    11509  return true;
    11510  }
    11511  if(dstBlockIndex > srcBlockIndex)
    11512  {
    11513  return false;
    11514  }
    11515  if(dstOffset < srcOffset)
    11516  {
    11517  return true;
    11518  }
    11519  return false;
    11520 }
    11521 
    11523 // VmaRecorder
    11524 
    11525 #if VMA_RECORDING_ENABLED
    11526 
    11527 VmaRecorder::VmaRecorder() :
    11528  m_UseMutex(true),
    11529  m_Flags(0),
    11530  m_File(VMA_NULL),
    11531  m_Freq(INT64_MAX),
    11532  m_StartCounter(INT64_MAX)
    11533 {
    11534 }
    11535 
    11536 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11537 {
    11538  m_UseMutex = useMutex;
    11539  m_Flags = settings.flags;
    11540 
    11541  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11542  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11543 
    11544  // Open file for writing.
    11545  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11546  if(err != 0)
    11547  {
    11548  return VK_ERROR_INITIALIZATION_FAILED;
    11549  }
    11550 
    11551  // Write header.
    11552  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11553  fprintf(m_File, "%s\n", "1,4");
    11554 
    11555  return VK_SUCCESS;
    11556 }
    11557 
    11558 VmaRecorder::~VmaRecorder()
    11559 {
    11560  if(m_File != VMA_NULL)
    11561  {
    11562  fclose(m_File);
    11563  }
    11564 }
    11565 
    11566 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11567 {
    11568  CallParams callParams;
    11569  GetBasicParams(callParams);
    11570 
    11571  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11572  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11573  Flush();
    11574 }
    11575 
    11576 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11577 {
    11578  CallParams callParams;
    11579  GetBasicParams(callParams);
    11580 
    11581  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11582  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11583  Flush();
    11584 }
    11585 
    11586 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11587 {
    11588  CallParams callParams;
    11589  GetBasicParams(callParams);
    11590 
    11591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11592  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11593  createInfo.memoryTypeIndex,
    11594  createInfo.flags,
    11595  createInfo.blockSize,
    11596  (uint64_t)createInfo.minBlockCount,
    11597  (uint64_t)createInfo.maxBlockCount,
    11598  createInfo.frameInUseCount,
    11599  pool);
    11600  Flush();
    11601 }
    11602 
    11603 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11604 {
    11605  CallParams callParams;
    11606  GetBasicParams(callParams);
    11607 
    11608  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11609  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11610  pool);
    11611  Flush();
    11612 }
    11613 
    11614 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11615  const VkMemoryRequirements& vkMemReq,
    11616  const VmaAllocationCreateInfo& createInfo,
    11617  VmaAllocation allocation)
    11618 {
    11619  CallParams callParams;
    11620  GetBasicParams(callParams);
    11621 
    11622  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11623  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11624  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11625  vkMemReq.size,
    11626  vkMemReq.alignment,
    11627  vkMemReq.memoryTypeBits,
    11628  createInfo.flags,
    11629  createInfo.usage,
    11630  createInfo.requiredFlags,
    11631  createInfo.preferredFlags,
    11632  createInfo.memoryTypeBits,
    11633  createInfo.pool,
    11634  allocation,
    11635  userDataStr.GetString());
    11636  Flush();
    11637 }
    11638 
    11639 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11640  const VkMemoryRequirements& vkMemReq,
    11641  bool requiresDedicatedAllocation,
    11642  bool prefersDedicatedAllocation,
    11643  const VmaAllocationCreateInfo& createInfo,
    11644  VmaAllocation allocation)
    11645 {
    11646  CallParams callParams;
    11647  GetBasicParams(callParams);
    11648 
    11649  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11650  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11651  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11652  vkMemReq.size,
    11653  vkMemReq.alignment,
    11654  vkMemReq.memoryTypeBits,
    11655  requiresDedicatedAllocation ? 1 : 0,
    11656  prefersDedicatedAllocation ? 1 : 0,
    11657  createInfo.flags,
    11658  createInfo.usage,
    11659  createInfo.requiredFlags,
    11660  createInfo.preferredFlags,
    11661  createInfo.memoryTypeBits,
    11662  createInfo.pool,
    11663  allocation,
    11664  userDataStr.GetString());
    11665  Flush();
    11666 }
    11667 
    11668 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11669  const VkMemoryRequirements& vkMemReq,
    11670  bool requiresDedicatedAllocation,
    11671  bool prefersDedicatedAllocation,
    11672  const VmaAllocationCreateInfo& createInfo,
    11673  VmaAllocation allocation)
    11674 {
    11675  CallParams callParams;
    11676  GetBasicParams(callParams);
    11677 
    11678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11679  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11680  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11681  vkMemReq.size,
    11682  vkMemReq.alignment,
    11683  vkMemReq.memoryTypeBits,
    11684  requiresDedicatedAllocation ? 1 : 0,
    11685  prefersDedicatedAllocation ? 1 : 0,
    11686  createInfo.flags,
    11687  createInfo.usage,
    11688  createInfo.requiredFlags,
    11689  createInfo.preferredFlags,
    11690  createInfo.memoryTypeBits,
    11691  createInfo.pool,
    11692  allocation,
    11693  userDataStr.GetString());
    11694  Flush();
    11695 }
    11696 
    11697 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11698  VmaAllocation allocation)
    11699 {
    11700  CallParams callParams;
    11701  GetBasicParams(callParams);
    11702 
    11703  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11704  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11705  allocation);
    11706  Flush();
    11707 }
    11708 
    11709 void VmaRecorder::RecordResizeAllocation(
    11710  uint32_t frameIndex,
    11711  VmaAllocation allocation,
    11712  VkDeviceSize newSize)
    11713 {
    11714  CallParams callParams;
    11715  GetBasicParams(callParams);
    11716 
    11717  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11718  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11719  allocation, newSize);
    11720  Flush();
    11721 }
    11722 
    11723 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11724  VmaAllocation allocation,
    11725  const void* pUserData)
    11726 {
    11727  CallParams callParams;
    11728  GetBasicParams(callParams);
    11729 
    11730  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11731  UserDataString userDataStr(
    11732  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11733  pUserData);
    11734  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11735  allocation,
    11736  userDataStr.GetString());
    11737  Flush();
    11738 }
    11739 
    11740 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11741  VmaAllocation allocation)
    11742 {
    11743  CallParams callParams;
    11744  GetBasicParams(callParams);
    11745 
    11746  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11747  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11748  allocation);
    11749  Flush();
    11750 }
    11751 
    11752 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11753  VmaAllocation allocation)
    11754 {
    11755  CallParams callParams;
    11756  GetBasicParams(callParams);
    11757 
    11758  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11759  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11760  allocation);
    11761  Flush();
    11762 }
    11763 
    11764 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11765  VmaAllocation allocation)
    11766 {
    11767  CallParams callParams;
    11768  GetBasicParams(callParams);
    11769 
    11770  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11771  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11772  allocation);
    11773  Flush();
    11774 }
    11775 
    11776 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11777  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11778 {
    11779  CallParams callParams;
    11780  GetBasicParams(callParams);
    11781 
    11782  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11783  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11784  allocation,
    11785  offset,
    11786  size);
    11787  Flush();
    11788 }
    11789 
    11790 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11791  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11792 {
    11793  CallParams callParams;
    11794  GetBasicParams(callParams);
    11795 
    11796  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11797  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11798  allocation,
    11799  offset,
    11800  size);
    11801  Flush();
    11802 }
    11803 
    11804 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11805  const VkBufferCreateInfo& bufCreateInfo,
    11806  const VmaAllocationCreateInfo& allocCreateInfo,
    11807  VmaAllocation allocation)
    11808 {
    11809  CallParams callParams;
    11810  GetBasicParams(callParams);
    11811 
    11812  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11813  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11814  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11815  bufCreateInfo.flags,
    11816  bufCreateInfo.size,
    11817  bufCreateInfo.usage,
    11818  bufCreateInfo.sharingMode,
    11819  allocCreateInfo.flags,
    11820  allocCreateInfo.usage,
    11821  allocCreateInfo.requiredFlags,
    11822  allocCreateInfo.preferredFlags,
    11823  allocCreateInfo.memoryTypeBits,
    11824  allocCreateInfo.pool,
    11825  allocation,
    11826  userDataStr.GetString());
    11827  Flush();
    11828 }
    11829 
    11830 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11831  const VkImageCreateInfo& imageCreateInfo,
    11832  const VmaAllocationCreateInfo& allocCreateInfo,
    11833  VmaAllocation allocation)
    11834 {
    11835  CallParams callParams;
    11836  GetBasicParams(callParams);
    11837 
    11838  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11839  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11840  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11841  imageCreateInfo.flags,
    11842  imageCreateInfo.imageType,
    11843  imageCreateInfo.format,
    11844  imageCreateInfo.extent.width,
    11845  imageCreateInfo.extent.height,
    11846  imageCreateInfo.extent.depth,
    11847  imageCreateInfo.mipLevels,
    11848  imageCreateInfo.arrayLayers,
    11849  imageCreateInfo.samples,
    11850  imageCreateInfo.tiling,
    11851  imageCreateInfo.usage,
    11852  imageCreateInfo.sharingMode,
    11853  imageCreateInfo.initialLayout,
    11854  allocCreateInfo.flags,
    11855  allocCreateInfo.usage,
    11856  allocCreateInfo.requiredFlags,
    11857  allocCreateInfo.preferredFlags,
    11858  allocCreateInfo.memoryTypeBits,
    11859  allocCreateInfo.pool,
    11860  allocation,
    11861  userDataStr.GetString());
    11862  Flush();
    11863 }
    11864 
    11865 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11866  VmaAllocation allocation)
    11867 {
    11868  CallParams callParams;
    11869  GetBasicParams(callParams);
    11870 
    11871  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11872  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11873  allocation);
    11874  Flush();
    11875 }
    11876 
    11877 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11878  VmaAllocation allocation)
    11879 {
    11880  CallParams callParams;
    11881  GetBasicParams(callParams);
    11882 
    11883  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11884  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11885  allocation);
    11886  Flush();
    11887 }
    11888 
    11889 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11890  VmaAllocation allocation)
    11891 {
    11892  CallParams callParams;
    11893  GetBasicParams(callParams);
    11894 
    11895  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11896  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11897  allocation);
    11898  Flush();
    11899 }
    11900 
    11901 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11902  VmaAllocation allocation)
    11903 {
    11904  CallParams callParams;
    11905  GetBasicParams(callParams);
    11906 
    11907  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11908  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11909  allocation);
    11910  Flush();
    11911 }
    11912 
    11913 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11914  VmaPool pool)
    11915 {
    11916  CallParams callParams;
    11917  GetBasicParams(callParams);
    11918 
    11919  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11920  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11921  pool);
    11922  Flush();
    11923 }
    11924 
    11925 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11926 {
    11927  if(pUserData != VMA_NULL)
    11928  {
    11929  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11930  {
    11931  m_Str = (const char*)pUserData;
    11932  }
    11933  else
    11934  {
    11935  sprintf_s(m_PtrStr, "%p", pUserData);
    11936  m_Str = m_PtrStr;
    11937  }
    11938  }
    11939  else
    11940  {
    11941  m_Str = "";
    11942  }
    11943 }
    11944 
    11945 void VmaRecorder::WriteConfiguration(
    11946  const VkPhysicalDeviceProperties& devProps,
    11947  const VkPhysicalDeviceMemoryProperties& memProps,
    11948  bool dedicatedAllocationExtensionEnabled)
    11949 {
    11950  fprintf(m_File, "Config,Begin\n");
    11951 
    11952  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11953  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11954  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11955  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11956  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11957  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11958 
    11959  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11960  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11961  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11962 
    11963  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11964  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11965  {
    11966  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11967  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11968  }
    11969  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11970  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11971  {
    11972  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11973  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11974  }
    11975 
    11976  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11977 
    11978  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11981  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11982  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11983  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11984  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11985  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11986  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11987 
    11988  fprintf(m_File, "Config,End\n");
    11989 }
    11990 
    11991 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11992 {
    11993  outParams.threadId = GetCurrentThreadId();
    11994 
    11995  LARGE_INTEGER counter;
    11996  QueryPerformanceCounter(&counter);
    11997  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11998 }
    11999 
    12000 void VmaRecorder::Flush()
    12001 {
    12002  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    12003  {
    12004  fflush(m_File);
    12005  }
    12006 }
    12007 
    12008 #endif // #if VMA_RECORDING_ENABLED
    12009 
    12011 // VmaAllocator_T
    12012 
    12013 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12014  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12015  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12016  m_hDevice(pCreateInfo->device),
    12017  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12018  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12019  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12020  m_PreferredLargeHeapBlockSize(0),
    12021  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12022  m_CurrentFrameIndex(0),
    12023  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12024  m_NextPoolId(0)
    12026  ,m_pRecorder(VMA_NULL)
    12027 #endif
    12028 {
    12029  if(VMA_DEBUG_DETECT_CORRUPTION)
    12030  {
    12031  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12032  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12033  }
    12034 
    12035  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12036 
    12037 #if !(VMA_DEDICATED_ALLOCATION)
    12039  {
    12040  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12041  }
    12042 #endif
    12043 
    12044  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12045  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12046  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12047 
    12048  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12049  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12050 
    12051  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12052  {
    12053  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12054  }
    12055 
    12056  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12057  {
    12058  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12059  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12060  }
    12061 
    12062  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12063 
    12064  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12065  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12066 
    12067  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12068  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12069  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12070  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12071 
    12072  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12073  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12074 
    12075  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12076  {
    12077  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12078  {
    12079  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12080  if(limit != VK_WHOLE_SIZE)
    12081  {
    12082  m_HeapSizeLimit[heapIndex] = limit;
    12083  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12084  {
    12085  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12086  }
    12087  }
    12088  }
    12089  }
    12090 
    12091  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12092  {
    12093  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12094 
    12095  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12096  this,
    12097  memTypeIndex,
    12098  preferredBlockSize,
    12099  0,
    12100  SIZE_MAX,
    12101  GetBufferImageGranularity(),
    12102  pCreateInfo->frameInUseCount,
    12103  false, // isCustomPool
    12104  false, // explicitBlockSize
    12105  false); // linearAlgorithm
    12106  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12107  // becase minBlockCount is 0.
    12108  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12109 
    12110  }
    12111 }
    12112 
    12113 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12114 {
    12115  VkResult res = VK_SUCCESS;
    12116 
    12117  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12118  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12119  {
    12120 #if VMA_RECORDING_ENABLED
    12121  m_pRecorder = vma_new(this, VmaRecorder)();
    12122  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12123  if(res != VK_SUCCESS)
    12124  {
    12125  return res;
    12126  }
    12127  m_pRecorder->WriteConfiguration(
    12128  m_PhysicalDeviceProperties,
    12129  m_MemProps,
    12130  m_UseKhrDedicatedAllocation);
    12131  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12132 #else
    12133  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12134  return VK_ERROR_FEATURE_NOT_PRESENT;
    12135 #endif
    12136  }
    12137 
    12138  return res;
    12139 }
    12140 
    12141 VmaAllocator_T::~VmaAllocator_T()
    12142 {
    12143 #if VMA_RECORDING_ENABLED
    12144  if(m_pRecorder != VMA_NULL)
    12145  {
    12146  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12147  vma_delete(this, m_pRecorder);
    12148  }
    12149 #endif
    12150 
    12151  VMA_ASSERT(m_Pools.empty());
    12152 
    12153  for(size_t i = GetMemoryTypeCount(); i--; )
    12154  {
    12155  vma_delete(this, m_pDedicatedAllocations[i]);
    12156  vma_delete(this, m_pBlockVectors[i]);
    12157  }
    12158 }
    12159 
    12160 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12161 {
    12162 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12163  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12164  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12165  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12166  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12167  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12168  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12169  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12170  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12171  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12172  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12173  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12174  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12175  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12176  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12177  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12178  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12179 #if VMA_DEDICATED_ALLOCATION
    12180  if(m_UseKhrDedicatedAllocation)
    12181  {
    12182  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12183  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12184  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12185  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12186  }
    12187 #endif // #if VMA_DEDICATED_ALLOCATION
    12188 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12189 
    12190 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12191  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12192 
    12193  if(pVulkanFunctions != VMA_NULL)
    12194  {
    12195  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12196  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12197  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12198  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12199  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12200  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12202  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12203  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12204  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12207  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12208  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12209  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12210  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12211 #if VMA_DEDICATED_ALLOCATION
    12212  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12213  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12214 #endif
    12215  }
    12216 
    12217 #undef VMA_COPY_IF_NOT_NULL
    12218 
    12219  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12220  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12221  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12233  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12234  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12235  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12236  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12237 #if VMA_DEDICATED_ALLOCATION
    12238  if(m_UseKhrDedicatedAllocation)
    12239  {
    12240  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12241  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12242  }
    12243 #endif
    12244 }
    12245 
    12246 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12247 {
    12248  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12249  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12250  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12251  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12252 }
    12253 
    12254 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12255  VkDeviceSize size,
    12256  VkDeviceSize alignment,
    12257  bool dedicatedAllocation,
    12258  VkBuffer dedicatedBuffer,
    12259  VkImage dedicatedImage,
    12260  const VmaAllocationCreateInfo& createInfo,
    12261  uint32_t memTypeIndex,
    12262  VmaSuballocationType suballocType,
    12263  VmaAllocation* pAllocation)
    12264 {
    12265  VMA_ASSERT(pAllocation != VMA_NULL);
    12266  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12267 
    12268  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12269 
    12270  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12271  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12272  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12273  {
    12274  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12275  }
    12276 
    12277  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12278  VMA_ASSERT(blockVector);
    12279 
    12280  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12281  bool preferDedicatedMemory =
    12282  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12283  dedicatedAllocation ||
    12284  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12285  size > preferredBlockSize / 2;
    12286 
    12287  if(preferDedicatedMemory &&
    12288  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12289  finalCreateInfo.pool == VK_NULL_HANDLE)
    12290  {
    12292  }
    12293 
    12294  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12295  {
    12296  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12297  {
    12298  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12299  }
    12300  else
    12301  {
    12302  return AllocateDedicatedMemory(
    12303  size,
    12304  suballocType,
    12305  memTypeIndex,
    12306  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12307  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12308  finalCreateInfo.pUserData,
    12309  dedicatedBuffer,
    12310  dedicatedImage,
    12311  pAllocation);
    12312  }
    12313  }
    12314  else
    12315  {
    12316  VkResult res = blockVector->Allocate(
    12317  VK_NULL_HANDLE, // hCurrentPool
    12318  m_CurrentFrameIndex.load(),
    12319  size,
    12320  alignment,
    12321  finalCreateInfo,
    12322  suballocType,
    12323  pAllocation);
    12324  if(res == VK_SUCCESS)
    12325  {
    12326  return res;
    12327  }
    12328 
    12329  // 5. Try dedicated memory.
    12330  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12331  {
    12332  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12333  }
    12334  else
    12335  {
    12336  res = AllocateDedicatedMemory(
    12337  size,
    12338  suballocType,
    12339  memTypeIndex,
    12340  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12341  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12342  finalCreateInfo.pUserData,
    12343  dedicatedBuffer,
    12344  dedicatedImage,
    12345  pAllocation);
    12346  if(res == VK_SUCCESS)
    12347  {
    12348  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12349  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12350  return VK_SUCCESS;
    12351  }
    12352  else
    12353  {
    12354  // Everything failed: Return error code.
    12355  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12356  return res;
    12357  }
    12358  }
    12359  }
    12360 }
    12361 
    12362 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12363  VkDeviceSize size,
    12364  VmaSuballocationType suballocType,
    12365  uint32_t memTypeIndex,
    12366  bool map,
    12367  bool isUserDataString,
    12368  void* pUserData,
    12369  VkBuffer dedicatedBuffer,
    12370  VkImage dedicatedImage,
    12371  VmaAllocation* pAllocation)
    12372 {
    12373  VMA_ASSERT(pAllocation);
    12374 
    12375  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12376  allocInfo.memoryTypeIndex = memTypeIndex;
    12377  allocInfo.allocationSize = size;
    12378 
    12379 #if VMA_DEDICATED_ALLOCATION
    12380  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12381  if(m_UseKhrDedicatedAllocation)
    12382  {
    12383  if(dedicatedBuffer != VK_NULL_HANDLE)
    12384  {
    12385  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12386  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12387  allocInfo.pNext = &dedicatedAllocInfo;
    12388  }
    12389  else if(dedicatedImage != VK_NULL_HANDLE)
    12390  {
    12391  dedicatedAllocInfo.image = dedicatedImage;
    12392  allocInfo.pNext = &dedicatedAllocInfo;
    12393  }
    12394  }
    12395 #endif // #if VMA_DEDICATED_ALLOCATION
    12396 
    12397  // Allocate VkDeviceMemory.
    12398  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12399  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12400  if(res < 0)
    12401  {
    12402  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12403  return res;
    12404  }
    12405 
    12406  void* pMappedData = VMA_NULL;
    12407  if(map)
    12408  {
    12409  res = (*m_VulkanFunctions.vkMapMemory)(
    12410  m_hDevice,
    12411  hMemory,
    12412  0,
    12413  VK_WHOLE_SIZE,
    12414  0,
    12415  &pMappedData);
    12416  if(res < 0)
    12417  {
    12418  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12419  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12420  return res;
    12421  }
    12422  }
    12423 
    12424  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12425  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12426  (*pAllocation)->SetUserData(this, pUserData);
    12427  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12428  {
    12429  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12430  }
    12431 
    12432  // Register it in m_pDedicatedAllocations.
    12433  {
    12434  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12435  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12436  VMA_ASSERT(pDedicatedAllocations);
    12437  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12438  }
    12439 
    12440  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12441 
    12442  return VK_SUCCESS;
    12443 }
    12444 
    12445 void VmaAllocator_T::GetBufferMemoryRequirements(
    12446  VkBuffer hBuffer,
    12447  VkMemoryRequirements& memReq,
    12448  bool& requiresDedicatedAllocation,
    12449  bool& prefersDedicatedAllocation) const
    12450 {
    12451 #if VMA_DEDICATED_ALLOCATION
    12452  if(m_UseKhrDedicatedAllocation)
    12453  {
    12454  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12455  memReqInfo.buffer = hBuffer;
    12456 
    12457  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12458 
    12459  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12460  memReq2.pNext = &memDedicatedReq;
    12461 
    12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12463 
    12464  memReq = memReq2.memoryRequirements;
    12465  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12466  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12467  }
    12468  else
    12469 #endif // #if VMA_DEDICATED_ALLOCATION
    12470  {
    12471  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12472  requiresDedicatedAllocation = false;
    12473  prefersDedicatedAllocation = false;
    12474  }
    12475 }
    12476 
    12477 void VmaAllocator_T::GetImageMemoryRequirements(
    12478  VkImage hImage,
    12479  VkMemoryRequirements& memReq,
    12480  bool& requiresDedicatedAllocation,
    12481  bool& prefersDedicatedAllocation) const
    12482 {
    12483 #if VMA_DEDICATED_ALLOCATION
    12484  if(m_UseKhrDedicatedAllocation)
    12485  {
    12486  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12487  memReqInfo.image = hImage;
    12488 
    12489  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12490 
    12491  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12492  memReq2.pNext = &memDedicatedReq;
    12493 
    12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12495 
    12496  memReq = memReq2.memoryRequirements;
    12497  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12498  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12499  }
    12500  else
    12501 #endif // #if VMA_DEDICATED_ALLOCATION
    12502  {
    12503  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12504  requiresDedicatedAllocation = false;
    12505  prefersDedicatedAllocation = false;
    12506  }
    12507 }
    12508 
    12509 VkResult VmaAllocator_T::AllocateMemory(
    12510  const VkMemoryRequirements& vkMemReq,
    12511  bool requiresDedicatedAllocation,
    12512  bool prefersDedicatedAllocation,
    12513  VkBuffer dedicatedBuffer,
    12514  VkImage dedicatedImage,
    12515  const VmaAllocationCreateInfo& createInfo,
    12516  VmaSuballocationType suballocType,
    12517  VmaAllocation* pAllocation)
    12518 {
    12519  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12520 
    12521  if(vkMemReq.size == 0)
    12522  {
    12523  return VK_ERROR_VALIDATION_FAILED_EXT;
    12524  }
    12525  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12526  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12527  {
    12528  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12529  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12530  }
    12531  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12533  {
    12534  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12536  }
    12537  if(requiresDedicatedAllocation)
    12538  {
    12539  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12540  {
    12541  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12542  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12543  }
    12544  if(createInfo.pool != VK_NULL_HANDLE)
    12545  {
    12546  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12548  }
    12549  }
    12550  if((createInfo.pool != VK_NULL_HANDLE) &&
    12551  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12552  {
    12553  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12554  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12555  }
    12556 
    12557  if(createInfo.pool != VK_NULL_HANDLE)
    12558  {
    12559  const VkDeviceSize alignmentForPool = VMA_MAX(
    12560  vkMemReq.alignment,
    12561  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12562  return createInfo.pool->m_BlockVector.Allocate(
    12563  createInfo.pool,
    12564  m_CurrentFrameIndex.load(),
    12565  vkMemReq.size,
    12566  alignmentForPool,
    12567  createInfo,
    12568  suballocType,
    12569  pAllocation);
    12570  }
    12571  else
    12572  {
    12573  // Bit mask of memory Vulkan types acceptable for this allocation.
    12574  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12575  uint32_t memTypeIndex = UINT32_MAX;
    12576  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12577  if(res == VK_SUCCESS)
    12578  {
    12579  VkDeviceSize alignmentForMemType = VMA_MAX(
    12580  vkMemReq.alignment,
    12581  GetMemoryTypeMinAlignment(memTypeIndex));
    12582 
    12583  res = AllocateMemoryOfType(
    12584  vkMemReq.size,
    12585  alignmentForMemType,
    12586  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12587  dedicatedBuffer,
    12588  dedicatedImage,
    12589  createInfo,
    12590  memTypeIndex,
    12591  suballocType,
    12592  pAllocation);
    12593  // Succeeded on first try.
    12594  if(res == VK_SUCCESS)
    12595  {
    12596  return res;
    12597  }
    12598  // Allocation from this memory type failed. Try other compatible memory types.
    12599  else
    12600  {
    12601  for(;;)
    12602  {
    12603  // Remove old memTypeIndex from list of possibilities.
    12604  memoryTypeBits &= ~(1u << memTypeIndex);
    12605  // Find alternative memTypeIndex.
    12606  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12607  if(res == VK_SUCCESS)
    12608  {
    12609  alignmentForMemType = VMA_MAX(
    12610  vkMemReq.alignment,
    12611  GetMemoryTypeMinAlignment(memTypeIndex));
    12612 
    12613  res = AllocateMemoryOfType(
    12614  vkMemReq.size,
    12615  alignmentForMemType,
    12616  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12617  dedicatedBuffer,
    12618  dedicatedImage,
    12619  createInfo,
    12620  memTypeIndex,
    12621  suballocType,
    12622  pAllocation);
    12623  // Allocation from this alternative memory type succeeded.
    12624  if(res == VK_SUCCESS)
    12625  {
    12626  return res;
    12627  }
    12628  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12629  }
    12630  // No other matching memory type index could be found.
    12631  else
    12632  {
    12633  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12634  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12635  }
    12636  }
    12637  }
    12638  }
    12639  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12640  else
    12641  return res;
    12642  }
    12643 }
    12644 
    12645 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12646 {
    12647  VMA_ASSERT(allocation);
    12648 
    12649  if(TouchAllocation(allocation))
    12650  {
    12651  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12652  {
    12653  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12654  }
    12655 
    12656  switch(allocation->GetType())
    12657  {
    12658  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12659  {
    12660  VmaBlockVector* pBlockVector = VMA_NULL;
    12661  VmaPool hPool = allocation->GetPool();
    12662  if(hPool != VK_NULL_HANDLE)
    12663  {
    12664  pBlockVector = &hPool->m_BlockVector;
    12665  }
    12666  else
    12667  {
    12668  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12669  pBlockVector = m_pBlockVectors[memTypeIndex];
    12670  }
    12671  pBlockVector->Free(allocation);
    12672  }
    12673  break;
    12674  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12675  FreeDedicatedMemory(allocation);
    12676  break;
    12677  default:
    12678  VMA_ASSERT(0);
    12679  }
    12680  }
    12681 
    12682  allocation->SetUserData(this, VMA_NULL);
    12683  vma_delete(this, allocation);
    12684 }
    12685 
    12686 VkResult VmaAllocator_T::ResizeAllocation(
    12687  const VmaAllocation alloc,
    12688  VkDeviceSize newSize)
    12689 {
    12690  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12691  {
    12692  return VK_ERROR_VALIDATION_FAILED_EXT;
    12693  }
    12694  if(newSize == alloc->GetSize())
    12695  {
    12696  return VK_SUCCESS;
    12697  }
    12698 
    12699  switch(alloc->GetType())
    12700  {
    12701  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12702  return VK_ERROR_FEATURE_NOT_PRESENT;
    12703  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12704  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12705  {
    12706  alloc->ChangeSize(newSize);
    12707  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12708  return VK_SUCCESS;
    12709  }
    12710  else
    12711  {
    12712  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12713  }
    12714  default:
    12715  VMA_ASSERT(0);
    12716  return VK_ERROR_VALIDATION_FAILED_EXT;
    12717  }
    12718 }
    12719 
    12720 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12721 {
    12722  // Initialize.
    12723  InitStatInfo(pStats->total);
    12724  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12725  InitStatInfo(pStats->memoryType[i]);
    12726  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12727  InitStatInfo(pStats->memoryHeap[i]);
    12728 
    12729  // Process default pools.
    12730  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12731  {
    12732  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12733  VMA_ASSERT(pBlockVector);
    12734  pBlockVector->AddStats(pStats);
    12735  }
    12736 
    12737  // Process custom pools.
    12738  {
    12739  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12740  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12741  {
    12742  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12743  }
    12744  }
    12745 
    12746  // Process dedicated allocations.
    12747  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12748  {
    12749  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12750  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12751  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12752  VMA_ASSERT(pDedicatedAllocVector);
    12753  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12754  {
    12755  VmaStatInfo allocationStatInfo;
    12756  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12757  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12758  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12759  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12760  }
    12761  }
    12762 
    12763  // Postprocess.
    12764  VmaPostprocessCalcStatInfo(pStats->total);
    12765  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12766  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12767  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12768  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12769 }
    12770 
    12771 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12772 
    12773 VkResult VmaAllocator_T::Defragment(
    12774  VmaAllocation* pAllocations,
    12775  size_t allocationCount,
    12776  VkBool32* pAllocationsChanged,
    12777  const VmaDefragmentationInfo* pDefragmentationInfo,
    12778  VmaDefragmentationStats* pDefragmentationStats)
    12779 {
    12780  if(pAllocationsChanged != VMA_NULL)
    12781  {
    12782  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12783  }
    12784  if(pDefragmentationStats != VMA_NULL)
    12785  {
    12786  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12787  }
    12788 
    12789  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12790 
    12791  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12792 
    12793  const size_t poolCount = m_Pools.size();
    12794 
    12795  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12796  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12797  {
    12798  VmaAllocation hAlloc = pAllocations[allocIndex];
    12799  VMA_ASSERT(hAlloc);
    12800  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12801  // DedicatedAlloc cannot be defragmented.
    12802  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12803  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12804  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12805  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12806  // Lost allocation cannot be defragmented.
    12807  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12808  {
    12809  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12810 
    12811  const VmaPool hAllocPool = hAlloc->GetPool();
    12812  // This allocation belongs to custom pool.
    12813  if(hAllocPool != VK_NULL_HANDLE)
    12814  {
    12815  // Pools with linear or buddy algorithm are not defragmented.
    12816  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12817  {
    12818  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12819  }
    12820  }
    12821  // This allocation belongs to general pool.
    12822  else
    12823  {
    12824  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12825  }
    12826 
    12827  if(pAllocBlockVector != VMA_NULL)
    12828  {
    12829  VmaDefragmentator* const pDefragmentator =
    12830  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12831  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12832  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12833  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12834  }
    12835  }
    12836  }
    12837 
    12838  VkResult result = VK_SUCCESS;
    12839 
    12840  // ======== Main processing.
    12841 
    12842  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12843  uint32_t maxAllocationsToMove = UINT32_MAX;
    12844  if(pDefragmentationInfo != VMA_NULL)
    12845  {
    12846  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12847  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12848  }
    12849 
    12850  // Process standard memory.
    12851  for(uint32_t memTypeIndex = 0;
    12852  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12853  ++memTypeIndex)
    12854  {
    12855  // Only HOST_VISIBLE memory types can be defragmented.
    12856  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12857  {
    12858  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12859  pDefragmentationStats,
    12860  maxBytesToMove,
    12861  maxAllocationsToMove);
    12862  }
    12863  }
    12864 
    12865  // Process custom pools.
    12866  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12867  {
    12868  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12869  pDefragmentationStats,
    12870  maxBytesToMove,
    12871  maxAllocationsToMove);
    12872  }
    12873 
    12874  // ======== Destroy defragmentators.
    12875 
    12876  // Process custom pools.
    12877  for(size_t poolIndex = poolCount; poolIndex--; )
    12878  {
    12879  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12880  }
    12881 
    12882  // Process standard memory.
    12883  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12884  {
    12885  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12886  {
    12887  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12888  }
    12889  }
    12890 
    12891  return result;
    12892 }
    12893 
    12894 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12895 {
    12896  if(hAllocation->CanBecomeLost())
    12897  {
    12898  /*
    12899  Warning: This is a carefully designed algorithm.
    12900  Do not modify unless you really know what you're doing :)
    12901  */
    12902  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12903  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12904  for(;;)
    12905  {
    12906  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12907  {
    12908  pAllocationInfo->memoryType = UINT32_MAX;
    12909  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12910  pAllocationInfo->offset = 0;
    12911  pAllocationInfo->size = hAllocation->GetSize();
    12912  pAllocationInfo->pMappedData = VMA_NULL;
    12913  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12914  return;
    12915  }
    12916  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12917  {
    12918  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12919  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12920  pAllocationInfo->offset = hAllocation->GetOffset();
    12921  pAllocationInfo->size = hAllocation->GetSize();
    12922  pAllocationInfo->pMappedData = VMA_NULL;
    12923  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12924  return;
    12925  }
    12926  else // Last use time earlier than current time.
    12927  {
    12928  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12929  {
    12930  localLastUseFrameIndex = localCurrFrameIndex;
    12931  }
    12932  }
    12933  }
    12934  }
    12935  else
    12936  {
    12937 #if VMA_STATS_STRING_ENABLED
    12938  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12939  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12940  for(;;)
    12941  {
    12942  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12943  if(localLastUseFrameIndex == localCurrFrameIndex)
    12944  {
    12945  break;
    12946  }
    12947  else // Last use time earlier than current time.
    12948  {
    12949  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12950  {
    12951  localLastUseFrameIndex = localCurrFrameIndex;
    12952  }
    12953  }
    12954  }
    12955 #endif
    12956 
    12957  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12958  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12959  pAllocationInfo->offset = hAllocation->GetOffset();
    12960  pAllocationInfo->size = hAllocation->GetSize();
    12961  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12962  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12963  }
    12964 }
    12965 
    12966 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12967 {
    12968  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12969  if(hAllocation->CanBecomeLost())
    12970  {
    12971  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12972  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12973  for(;;)
    12974  {
    12975  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12976  {
    12977  return false;
    12978  }
    12979  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12980  {
    12981  return true;
    12982  }
    12983  else // Last use time earlier than current time.
    12984  {
    12985  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12986  {
    12987  localLastUseFrameIndex = localCurrFrameIndex;
    12988  }
    12989  }
    12990  }
    12991  }
    12992  else
    12993  {
    12994 #if VMA_STATS_STRING_ENABLED
    12995  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12996  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12997  for(;;)
    12998  {
    12999  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    13000  if(localLastUseFrameIndex == localCurrFrameIndex)
    13001  {
    13002  break;
    13003  }
    13004  else // Last use time earlier than current time.
    13005  {
    13006  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13007  {
    13008  localLastUseFrameIndex = localCurrFrameIndex;
    13009  }
    13010  }
    13011  }
    13012 #endif
    13013 
    13014  return true;
    13015  }
    13016 }
    13017 
    13018 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13019 {
    13020  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13021 
    13022  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13023 
    13024  if(newCreateInfo.maxBlockCount == 0)
    13025  {
    13026  newCreateInfo.maxBlockCount = SIZE_MAX;
    13027  }
    13028  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13029  {
    13030  return VK_ERROR_INITIALIZATION_FAILED;
    13031  }
    13032 
    13033  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13034 
    13035  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13036 
    13037  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13038  if(res != VK_SUCCESS)
    13039  {
    13040  vma_delete(this, *pPool);
    13041  *pPool = VMA_NULL;
    13042  return res;
    13043  }
    13044 
    13045  // Add to m_Pools.
    13046  {
    13047  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13048  (*pPool)->SetId(m_NextPoolId++);
    13049  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13050  }
    13051 
    13052  return VK_SUCCESS;
    13053 }
    13054 
    13055 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13056 {
    13057  // Remove from m_Pools.
    13058  {
    13059  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13060  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13061  VMA_ASSERT(success && "Pool not found in Allocator.");
    13062  }
    13063 
    13064  vma_delete(this, pool);
    13065 }
    13066 
    13067 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13068 {
    13069  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13070 }
    13071 
    13072 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13073 {
    13074  m_CurrentFrameIndex.store(frameIndex);
    13075 }
    13076 
    13077 void VmaAllocator_T::MakePoolAllocationsLost(
    13078  VmaPool hPool,
    13079  size_t* pLostAllocationCount)
    13080 {
    13081  hPool->m_BlockVector.MakePoolAllocationsLost(
    13082  m_CurrentFrameIndex.load(),
    13083  pLostAllocationCount);
    13084 }
    13085 
    13086 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13087 {
    13088  return hPool->m_BlockVector.CheckCorruption();
    13089 }
    13090 
    13091 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13092 {
    13093  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13094 
    13095  // Process default pools.
    13096  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13097  {
    13098  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13099  {
    13100  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13101  VMA_ASSERT(pBlockVector);
    13102  VkResult localRes = pBlockVector->CheckCorruption();
    13103  switch(localRes)
    13104  {
    13105  case VK_ERROR_FEATURE_NOT_PRESENT:
    13106  break;
    13107  case VK_SUCCESS:
    13108  finalRes = VK_SUCCESS;
    13109  break;
    13110  default:
    13111  return localRes;
    13112  }
    13113  }
    13114  }
    13115 
    13116  // Process custom pools.
    13117  {
    13118  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13119  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13120  {
    13121  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13122  {
    13123  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13124  switch(localRes)
    13125  {
    13126  case VK_ERROR_FEATURE_NOT_PRESENT:
    13127  break;
    13128  case VK_SUCCESS:
    13129  finalRes = VK_SUCCESS;
    13130  break;
    13131  default:
    13132  return localRes;
    13133  }
    13134  }
    13135  }
    13136  }
    13137 
    13138  return finalRes;
    13139 }
    13140 
    13141 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13142 {
    13143  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13144  (*pAllocation)->InitLost();
    13145 }
    13146 
    13147 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13148 {
    13149  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13150 
    13151  VkResult res;
    13152  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13153  {
    13154  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13155  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13156  {
    13157  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13158  if(res == VK_SUCCESS)
    13159  {
    13160  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13161  }
    13162  }
    13163  else
    13164  {
    13165  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13166  }
    13167  }
    13168  else
    13169  {
    13170  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13171  }
    13172 
    13173  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13174  {
    13175  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13176  }
    13177 
    13178  return res;
    13179 }
    13180 
    13181 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13182 {
    13183  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13184  {
    13185  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13186  }
    13187 
    13188  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13189 
    13190  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13191  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13192  {
    13193  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13194  m_HeapSizeLimit[heapIndex] += size;
    13195  }
    13196 }
    13197 
    13198 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13199 {
    13200  if(hAllocation->CanBecomeLost())
    13201  {
    13202  return VK_ERROR_MEMORY_MAP_FAILED;
    13203  }
    13204 
    13205  switch(hAllocation->GetType())
    13206  {
    13207  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13208  {
    13209  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13210  char *pBytes = VMA_NULL;
    13211  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13212  if(res == VK_SUCCESS)
    13213  {
    13214  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13215  hAllocation->BlockAllocMap();
    13216  }
    13217  return res;
    13218  }
    13219  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13220  return hAllocation->DedicatedAllocMap(this, ppData);
    13221  default:
    13222  VMA_ASSERT(0);
    13223  return VK_ERROR_MEMORY_MAP_FAILED;
    13224  }
    13225 }
    13226 
    13227 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13228 {
    13229  switch(hAllocation->GetType())
    13230  {
    13231  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13232  {
    13233  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13234  hAllocation->BlockAllocUnmap();
    13235  pBlock->Unmap(this, 1);
    13236  }
    13237  break;
    13238  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13239  hAllocation->DedicatedAllocUnmap(this);
    13240  break;
    13241  default:
    13242  VMA_ASSERT(0);
    13243  }
    13244 }
    13245 
    13246 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13247 {
    13248  VkResult res = VK_SUCCESS;
    13249  switch(hAllocation->GetType())
    13250  {
    13251  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13252  res = GetVulkanFunctions().vkBindBufferMemory(
    13253  m_hDevice,
    13254  hBuffer,
    13255  hAllocation->GetMemory(),
    13256  0); //memoryOffset
    13257  break;
    13258  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13259  {
    13260  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13261  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13262  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13263  break;
    13264  }
    13265  default:
    13266  VMA_ASSERT(0);
    13267  }
    13268  return res;
    13269 }
    13270 
    13271 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13272 {
    13273  VkResult res = VK_SUCCESS;
    13274  switch(hAllocation->GetType())
    13275  {
    13276  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13277  res = GetVulkanFunctions().vkBindImageMemory(
    13278  m_hDevice,
    13279  hImage,
    13280  hAllocation->GetMemory(),
    13281  0); //memoryOffset
    13282  break;
    13283  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13284  {
    13285  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13286  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13287  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13288  break;
    13289  }
    13290  default:
    13291  VMA_ASSERT(0);
    13292  }
    13293  return res;
    13294 }
    13295 
    13296 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13297  VmaAllocation hAllocation,
    13298  VkDeviceSize offset, VkDeviceSize size,
    13299  VMA_CACHE_OPERATION op)
    13300 {
    13301  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13302  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13303  {
    13304  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13305  VMA_ASSERT(offset <= allocationSize);
    13306 
    13307  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13308 
    13309  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13310  memRange.memory = hAllocation->GetMemory();
    13311 
    13312  switch(hAllocation->GetType())
    13313  {
    13314  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13315  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13316  if(size == VK_WHOLE_SIZE)
    13317  {
    13318  memRange.size = allocationSize - memRange.offset;
    13319  }
    13320  else
    13321  {
    13322  VMA_ASSERT(offset + size <= allocationSize);
    13323  memRange.size = VMA_MIN(
    13324  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13325  allocationSize - memRange.offset);
    13326  }
    13327  break;
    13328 
    13329  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13330  {
    13331  // 1. Still within this allocation.
    13332  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13333  if(size == VK_WHOLE_SIZE)
    13334  {
    13335  size = allocationSize - offset;
    13336  }
    13337  else
    13338  {
    13339  VMA_ASSERT(offset + size <= allocationSize);
    13340  }
    13341  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13342 
    13343  // 2. Adjust to whole block.
    13344  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13345  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13346  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13347  memRange.offset += allocationOffset;
    13348  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13349 
    13350  break;
    13351  }
    13352 
    13353  default:
    13354  VMA_ASSERT(0);
    13355  }
    13356 
    13357  switch(op)
    13358  {
    13359  case VMA_CACHE_FLUSH:
    13360  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13361  break;
    13362  case VMA_CACHE_INVALIDATE:
    13363  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13364  break;
    13365  default:
    13366  VMA_ASSERT(0);
    13367  }
    13368  }
    13369  // else: Just ignore this call.
    13370 }
    13371 
    13372 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13373 {
    13374  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13375 
    13376  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13377  {
    13378  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13379  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13380  VMA_ASSERT(pDedicatedAllocations);
    13381  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13382  VMA_ASSERT(success);
    13383  }
    13384 
    13385  VkDeviceMemory hMemory = allocation->GetMemory();
    13386 
    13387  /*
    13388  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13389  before vkFreeMemory.
    13390 
    13391  if(allocation->GetMappedData() != VMA_NULL)
    13392  {
    13393  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13394  }
    13395  */
    13396 
    13397  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13398 
    13399  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13400 }
    13401 
    13402 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13403 {
    13404  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13405  !hAllocation->CanBecomeLost() &&
    13406  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13407  {
    13408  void* pData = VMA_NULL;
    13409  VkResult res = Map(hAllocation, &pData);
    13410  if(res == VK_SUCCESS)
    13411  {
    13412  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13413  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13414  Unmap(hAllocation);
    13415  }
    13416  else
    13417  {
    13418  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13419  }
    13420  }
    13421 }
    13422 
    13423 #if VMA_STATS_STRING_ENABLED
    13424 
    13425 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13426 {
    13427  bool dedicatedAllocationsStarted = false;
    13428  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13429  {
    13430  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13431  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13432  VMA_ASSERT(pDedicatedAllocVector);
    13433  if(pDedicatedAllocVector->empty() == false)
    13434  {
    13435  if(dedicatedAllocationsStarted == false)
    13436  {
    13437  dedicatedAllocationsStarted = true;
    13438  json.WriteString("DedicatedAllocations");
    13439  json.BeginObject();
    13440  }
    13441 
    13442  json.BeginString("Type ");
    13443  json.ContinueString(memTypeIndex);
    13444  json.EndString();
    13445 
    13446  json.BeginArray();
    13447 
    13448  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13449  {
    13450  json.BeginObject(true);
    13451  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13452  hAlloc->PrintParameters(json);
    13453  json.EndObject();
    13454  }
    13455 
    13456  json.EndArray();
    13457  }
    13458  }
    13459  if(dedicatedAllocationsStarted)
    13460  {
    13461  json.EndObject();
    13462  }
    13463 
    13464  {
    13465  bool allocationsStarted = false;
    13466  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13467  {
    13468  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13469  {
    13470  if(allocationsStarted == false)
    13471  {
    13472  allocationsStarted = true;
    13473  json.WriteString("DefaultPools");
    13474  json.BeginObject();
    13475  }
    13476 
    13477  json.BeginString("Type ");
    13478  json.ContinueString(memTypeIndex);
    13479  json.EndString();
    13480 
    13481  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13482  }
    13483  }
    13484  if(allocationsStarted)
    13485  {
    13486  json.EndObject();
    13487  }
    13488  }
    13489 
    13490  // Custom pools
    13491  {
    13492  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13493  const size_t poolCount = m_Pools.size();
    13494  if(poolCount > 0)
    13495  {
    13496  json.WriteString("Pools");
    13497  json.BeginObject();
    13498  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13499  {
    13500  json.BeginString();
    13501  json.ContinueString(m_Pools[poolIndex]->GetId());
    13502  json.EndString();
    13503 
    13504  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13505  }
    13506  json.EndObject();
    13507  }
    13508  }
    13509 }
    13510 
    13511 #endif // #if VMA_STATS_STRING_ENABLED
    13512 
    13514 // Public interface
    13515 
    13516 VkResult vmaCreateAllocator(
    13517  const VmaAllocatorCreateInfo* pCreateInfo,
    13518  VmaAllocator* pAllocator)
    13519 {
    13520  VMA_ASSERT(pCreateInfo && pAllocator);
    13521  VMA_DEBUG_LOG("vmaCreateAllocator");
    13522  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13523  return (*pAllocator)->Init(pCreateInfo);
    13524 }
    13525 
    13526 void vmaDestroyAllocator(
    13527  VmaAllocator allocator)
    13528 {
    13529  if(allocator != VK_NULL_HANDLE)
    13530  {
    13531  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13532  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13533  vma_delete(&allocationCallbacks, allocator);
    13534  }
    13535 }
    13536 
    13538  VmaAllocator allocator,
    13539  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13540 {
    13541  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13542  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13543 }
    13544 
    13546  VmaAllocator allocator,
    13547  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13548 {
    13549  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13550  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13551 }
    13552 
    13554  VmaAllocator allocator,
    13555  uint32_t memoryTypeIndex,
    13556  VkMemoryPropertyFlags* pFlags)
    13557 {
    13558  VMA_ASSERT(allocator && pFlags);
    13559  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13560  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13561 }
    13562 
    13564  VmaAllocator allocator,
    13565  uint32_t frameIndex)
    13566 {
    13567  VMA_ASSERT(allocator);
    13568  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13569 
    13570  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13571 
    13572  allocator->SetCurrentFrameIndex(frameIndex);
    13573 }
    13574 
    13575 void vmaCalculateStats(
    13576  VmaAllocator allocator,
    13577  VmaStats* pStats)
    13578 {
    13579  VMA_ASSERT(allocator && pStats);
    13580  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13581  allocator->CalculateStats(pStats);
    13582 }
    13583 
    13584 #if VMA_STATS_STRING_ENABLED
    13585 
    13586 void vmaBuildStatsString(
    13587  VmaAllocator allocator,
    13588  char** ppStatsString,
    13589  VkBool32 detailedMap)
    13590 {
    13591  VMA_ASSERT(allocator && ppStatsString);
    13592  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13593 
    13594  VmaStringBuilder sb(allocator);
    13595  {
    13596  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13597  json.BeginObject();
    13598 
    13599  VmaStats stats;
    13600  allocator->CalculateStats(&stats);
    13601 
    13602  json.WriteString("Total");
    13603  VmaPrintStatInfo(json, stats.total);
    13604 
    13605  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13606  {
    13607  json.BeginString("Heap ");
    13608  json.ContinueString(heapIndex);
    13609  json.EndString();
    13610  json.BeginObject();
    13611 
    13612  json.WriteString("Size");
    13613  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13614 
    13615  json.WriteString("Flags");
    13616  json.BeginArray(true);
    13617  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13618  {
    13619  json.WriteString("DEVICE_LOCAL");
    13620  }
    13621  json.EndArray();
    13622 
    13623  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13624  {
    13625  json.WriteString("Stats");
    13626  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13627  }
    13628 
    13629  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13630  {
    13631  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13632  {
    13633  json.BeginString("Type ");
    13634  json.ContinueString(typeIndex);
    13635  json.EndString();
    13636 
    13637  json.BeginObject();
    13638 
    13639  json.WriteString("Flags");
    13640  json.BeginArray(true);
    13641  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13642  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13643  {
    13644  json.WriteString("DEVICE_LOCAL");
    13645  }
    13646  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13647  {
    13648  json.WriteString("HOST_VISIBLE");
    13649  }
    13650  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13651  {
    13652  json.WriteString("HOST_COHERENT");
    13653  }
    13654  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13655  {
    13656  json.WriteString("HOST_CACHED");
    13657  }
    13658  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13659  {
    13660  json.WriteString("LAZILY_ALLOCATED");
    13661  }
    13662  json.EndArray();
    13663 
    13664  if(stats.memoryType[typeIndex].blockCount > 0)
    13665  {
    13666  json.WriteString("Stats");
    13667  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13668  }
    13669 
    13670  json.EndObject();
    13671  }
    13672  }
    13673 
    13674  json.EndObject();
    13675  }
    13676  if(detailedMap == VK_TRUE)
    13677  {
    13678  allocator->PrintDetailedMap(json);
    13679  }
    13680 
    13681  json.EndObject();
    13682  }
    13683 
    13684  const size_t len = sb.GetLength();
    13685  char* const pChars = vma_new_array(allocator, char, len + 1);
    13686  if(len > 0)
    13687  {
    13688  memcpy(pChars, sb.GetData(), len);
    13689  }
    13690  pChars[len] = '\0';
    13691  *ppStatsString = pChars;
    13692 }
    13693 
    13694 void vmaFreeStatsString(
    13695  VmaAllocator allocator,
    13696  char* pStatsString)
    13697 {
    13698  if(pStatsString != VMA_NULL)
    13699  {
    13700  VMA_ASSERT(allocator);
    13701  size_t len = strlen(pStatsString);
    13702  vma_delete_array(allocator, pStatsString, len + 1);
    13703  }
    13704 }
    13705 
    13706 #endif // #if VMA_STATS_STRING_ENABLED
    13707 
    13708 /*
    13709 This function is not protected by any mutex because it just reads immutable data.
    13710 */
    13711 VkResult vmaFindMemoryTypeIndex(
    13712  VmaAllocator allocator,
    13713  uint32_t memoryTypeBits,
    13714  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13715  uint32_t* pMemoryTypeIndex)
    13716 {
    13717  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13718  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13719  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13720 
    13721  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13722  {
    13723  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13724  }
    13725 
    13726  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13727  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13728 
    13729  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13730  if(mapped)
    13731  {
    13732  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13733  }
    13734 
    13735  // Convert usage to requiredFlags and preferredFlags.
    13736  switch(pAllocationCreateInfo->usage)
    13737  {
    13739  break;
    13741  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13742  {
    13743  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13744  }
    13745  break;
    13747  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13748  break;
    13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13751  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13752  {
    13753  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13754  }
    13755  break;
    13757  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13758  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13759  break;
    13760  default:
    13761  break;
    13762  }
    13763 
    13764  *pMemoryTypeIndex = UINT32_MAX;
    13765  uint32_t minCost = UINT32_MAX;
    13766  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13767  memTypeIndex < allocator->GetMemoryTypeCount();
    13768  ++memTypeIndex, memTypeBit <<= 1)
    13769  {
    13770  // This memory type is acceptable according to memoryTypeBits bitmask.
    13771  if((memTypeBit & memoryTypeBits) != 0)
    13772  {
    13773  const VkMemoryPropertyFlags currFlags =
    13774  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13775  // This memory type contains requiredFlags.
    13776  if((requiredFlags & ~currFlags) == 0)
    13777  {
    13778  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13779  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13780  // Remember memory type with lowest cost.
    13781  if(currCost < minCost)
    13782  {
    13783  *pMemoryTypeIndex = memTypeIndex;
    13784  if(currCost == 0)
    13785  {
    13786  return VK_SUCCESS;
    13787  }
    13788  minCost = currCost;
    13789  }
    13790  }
    13791  }
    13792  }
    13793  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13794 }
    13795 
    13797  VmaAllocator allocator,
    13798  const VkBufferCreateInfo* pBufferCreateInfo,
    13799  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13800  uint32_t* pMemoryTypeIndex)
    13801 {
    13802  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13803  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13804  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13805  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13806 
    13807  const VkDevice hDev = allocator->m_hDevice;
    13808  VkBuffer hBuffer = VK_NULL_HANDLE;
    13809  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13810  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13811  if(res == VK_SUCCESS)
    13812  {
    13813  VkMemoryRequirements memReq = {};
    13814  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13815  hDev, hBuffer, &memReq);
    13816 
    13817  res = vmaFindMemoryTypeIndex(
    13818  allocator,
    13819  memReq.memoryTypeBits,
    13820  pAllocationCreateInfo,
    13821  pMemoryTypeIndex);
    13822 
    13823  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13824  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13825  }
    13826  return res;
    13827 }
    13828 
    13830  VmaAllocator allocator,
    13831  const VkImageCreateInfo* pImageCreateInfo,
    13832  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13833  uint32_t* pMemoryTypeIndex)
    13834 {
    13835  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13836  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13837  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13838  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13839 
    13840  const VkDevice hDev = allocator->m_hDevice;
    13841  VkImage hImage = VK_NULL_HANDLE;
    13842  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13843  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13844  if(res == VK_SUCCESS)
    13845  {
    13846  VkMemoryRequirements memReq = {};
    13847  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13848  hDev, hImage, &memReq);
    13849 
    13850  res = vmaFindMemoryTypeIndex(
    13851  allocator,
    13852  memReq.memoryTypeBits,
    13853  pAllocationCreateInfo,
    13854  pMemoryTypeIndex);
    13855 
    13856  allocator->GetVulkanFunctions().vkDestroyImage(
    13857  hDev, hImage, allocator->GetAllocationCallbacks());
    13858  }
    13859  return res;
    13860 }
    13861 
    13862 VkResult vmaCreatePool(
    13863  VmaAllocator allocator,
    13864  const VmaPoolCreateInfo* pCreateInfo,
    13865  VmaPool* pPool)
    13866 {
    13867  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13868 
    13869  VMA_DEBUG_LOG("vmaCreatePool");
    13870 
    13871  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13872 
    13873  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13874 
    13875 #if VMA_RECORDING_ENABLED
    13876  if(allocator->GetRecorder() != VMA_NULL)
    13877  {
    13878  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13879  }
    13880 #endif
    13881 
    13882  return res;
    13883 }
    13884 
    13885 void vmaDestroyPool(
    13886  VmaAllocator allocator,
    13887  VmaPool pool)
    13888 {
    13889  VMA_ASSERT(allocator);
    13890 
    13891  if(pool == VK_NULL_HANDLE)
    13892  {
    13893  return;
    13894  }
    13895 
    13896  VMA_DEBUG_LOG("vmaDestroyPool");
    13897 
    13898  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13899 
    13900 #if VMA_RECORDING_ENABLED
    13901  if(allocator->GetRecorder() != VMA_NULL)
    13902  {
    13903  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13904  }
    13905 #endif
    13906 
    13907  allocator->DestroyPool(pool);
    13908 }
    13909 
    13910 void vmaGetPoolStats(
    13911  VmaAllocator allocator,
    13912  VmaPool pool,
    13913  VmaPoolStats* pPoolStats)
    13914 {
    13915  VMA_ASSERT(allocator && pool && pPoolStats);
    13916 
    13917  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13918 
    13919  allocator->GetPoolStats(pool, pPoolStats);
    13920 }
    13921 
    13923  VmaAllocator allocator,
    13924  VmaPool pool,
    13925  size_t* pLostAllocationCount)
    13926 {
    13927  VMA_ASSERT(allocator && pool);
    13928 
    13929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13930 
    13931 #if VMA_RECORDING_ENABLED
    13932  if(allocator->GetRecorder() != VMA_NULL)
    13933  {
    13934  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13935  }
    13936 #endif
    13937 
    13938  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13939 }
    13940 
    13941 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13942 {
    13943  VMA_ASSERT(allocator && pool);
    13944 
    13945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13946 
    13947  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13948 
    13949  return allocator->CheckPoolCorruption(pool);
    13950 }
    13951 
    13952 VkResult vmaAllocateMemory(
    13953  VmaAllocator allocator,
    13954  const VkMemoryRequirements* pVkMemoryRequirements,
    13955  const VmaAllocationCreateInfo* pCreateInfo,
    13956  VmaAllocation* pAllocation,
    13957  VmaAllocationInfo* pAllocationInfo)
    13958 {
    13959  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13960 
    13961  VMA_DEBUG_LOG("vmaAllocateMemory");
    13962 
    13963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13964 
    13965  VkResult result = allocator->AllocateMemory(
    13966  *pVkMemoryRequirements,
    13967  false, // requiresDedicatedAllocation
    13968  false, // prefersDedicatedAllocation
    13969  VK_NULL_HANDLE, // dedicatedBuffer
    13970  VK_NULL_HANDLE, // dedicatedImage
    13971  *pCreateInfo,
    13972  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13973  pAllocation);
    13974 
    13975 #if VMA_RECORDING_ENABLED
    13976  if(allocator->GetRecorder() != VMA_NULL)
    13977  {
    13978  allocator->GetRecorder()->RecordAllocateMemory(
    13979  allocator->GetCurrentFrameIndex(),
    13980  *pVkMemoryRequirements,
    13981  *pCreateInfo,
    13982  *pAllocation);
    13983  }
    13984 #endif
    13985 
    13986  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13987  {
    13988  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13989  }
    13990 
    13991  return result;
    13992 }
    13993 
    13995  VmaAllocator allocator,
    13996  VkBuffer buffer,
    13997  const VmaAllocationCreateInfo* pCreateInfo,
    13998  VmaAllocation* pAllocation,
    13999  VmaAllocationInfo* pAllocationInfo)
    14000 {
    14001  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14002 
    14003  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14004 
    14005  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14006 
    14007  VkMemoryRequirements vkMemReq = {};
    14008  bool requiresDedicatedAllocation = false;
    14009  bool prefersDedicatedAllocation = false;
    14010  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14011  requiresDedicatedAllocation,
    14012  prefersDedicatedAllocation);
    14013 
    14014  VkResult result = allocator->AllocateMemory(
    14015  vkMemReq,
    14016  requiresDedicatedAllocation,
    14017  prefersDedicatedAllocation,
    14018  buffer, // dedicatedBuffer
    14019  VK_NULL_HANDLE, // dedicatedImage
    14020  *pCreateInfo,
    14021  VMA_SUBALLOCATION_TYPE_BUFFER,
    14022  pAllocation);
    14023 
    14024 #if VMA_RECORDING_ENABLED
    14025  if(allocator->GetRecorder() != VMA_NULL)
    14026  {
    14027  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14028  allocator->GetCurrentFrameIndex(),
    14029  vkMemReq,
    14030  requiresDedicatedAllocation,
    14031  prefersDedicatedAllocation,
    14032  *pCreateInfo,
    14033  *pAllocation);
    14034  }
    14035 #endif
    14036 
    14037  if(pAllocationInfo && result == VK_SUCCESS)
    14038  {
    14039  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14040  }
    14041 
    14042  return result;
    14043 }
    14044 
    14045 VkResult vmaAllocateMemoryForImage(
    14046  VmaAllocator allocator,
    14047  VkImage image,
    14048  const VmaAllocationCreateInfo* pCreateInfo,
    14049  VmaAllocation* pAllocation,
    14050  VmaAllocationInfo* pAllocationInfo)
    14051 {
    14052  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14053 
    14054  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14055 
    14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14057 
    14058  VkMemoryRequirements vkMemReq = {};
    14059  bool requiresDedicatedAllocation = false;
    14060  bool prefersDedicatedAllocation = false;
    14061  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14062  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14063 
    14064  VkResult result = allocator->AllocateMemory(
    14065  vkMemReq,
    14066  requiresDedicatedAllocation,
    14067  prefersDedicatedAllocation,
    14068  VK_NULL_HANDLE, // dedicatedBuffer
    14069  image, // dedicatedImage
    14070  *pCreateInfo,
    14071  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14072  pAllocation);
    14073 
    14074 #if VMA_RECORDING_ENABLED
    14075  if(allocator->GetRecorder() != VMA_NULL)
    14076  {
    14077  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14078  allocator->GetCurrentFrameIndex(),
    14079  vkMemReq,
    14080  requiresDedicatedAllocation,
    14081  prefersDedicatedAllocation,
    14082  *pCreateInfo,
    14083  *pAllocation);
    14084  }
    14085 #endif
    14086 
    14087  if(pAllocationInfo && result == VK_SUCCESS)
    14088  {
    14089  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14090  }
    14091 
    14092  return result;
    14093 }
    14094 
    14095 void vmaFreeMemory(
    14096  VmaAllocator allocator,
    14097  VmaAllocation allocation)
    14098 {
    14099  VMA_ASSERT(allocator);
    14100 
    14101  if(allocation == VK_NULL_HANDLE)
    14102  {
    14103  return;
    14104  }
    14105 
    14106  VMA_DEBUG_LOG("vmaFreeMemory");
    14107 
    14108  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14109 
    14110 #if VMA_RECORDING_ENABLED
    14111  if(allocator->GetRecorder() != VMA_NULL)
    14112  {
    14113  allocator->GetRecorder()->RecordFreeMemory(
    14114  allocator->GetCurrentFrameIndex(),
    14115  allocation);
    14116  }
    14117 #endif
    14118 
    14119  allocator->FreeMemory(allocation);
    14120 }
    14121 
    14122 VkResult vmaResizeAllocation(
    14123  VmaAllocator allocator,
    14124  VmaAllocation allocation,
    14125  VkDeviceSize newSize)
    14126 {
    14127  VMA_ASSERT(allocator && allocation);
    14128 
    14129  VMA_DEBUG_LOG("vmaResizeAllocation");
    14130 
    14131  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14132 
    14133 #if VMA_RECORDING_ENABLED
    14134  if(allocator->GetRecorder() != VMA_NULL)
    14135  {
    14136  allocator->GetRecorder()->RecordResizeAllocation(
    14137  allocator->GetCurrentFrameIndex(),
    14138  allocation,
    14139  newSize);
    14140  }
    14141 #endif
    14142 
    14143  return allocator->ResizeAllocation(allocation, newSize);
    14144 }
    14145 
    14147  VmaAllocator allocator,
    14148  VmaAllocation allocation,
    14149  VmaAllocationInfo* pAllocationInfo)
    14150 {
    14151  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14152 
    14153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14154 
    14155 #if VMA_RECORDING_ENABLED
    14156  if(allocator->GetRecorder() != VMA_NULL)
    14157  {
    14158  allocator->GetRecorder()->RecordGetAllocationInfo(
    14159  allocator->GetCurrentFrameIndex(),
    14160  allocation);
    14161  }
    14162 #endif
    14163 
    14164  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14165 }
    14166 
    14167 VkBool32 vmaTouchAllocation(
    14168  VmaAllocator allocator,
    14169  VmaAllocation allocation)
    14170 {
    14171  VMA_ASSERT(allocator && allocation);
    14172 
    14173  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14174 
    14175 #if VMA_RECORDING_ENABLED
    14176  if(allocator->GetRecorder() != VMA_NULL)
    14177  {
    14178  allocator->GetRecorder()->RecordTouchAllocation(
    14179  allocator->GetCurrentFrameIndex(),
    14180  allocation);
    14181  }
    14182 #endif
    14183 
    14184  return allocator->TouchAllocation(allocation);
    14185 }
    14186 
    14188  VmaAllocator allocator,
    14189  VmaAllocation allocation,
    14190  void* pUserData)
    14191 {
    14192  VMA_ASSERT(allocator && allocation);
    14193 
    14194  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14195 
    14196  allocation->SetUserData(allocator, pUserData);
    14197 
    14198 #if VMA_RECORDING_ENABLED
    14199  if(allocator->GetRecorder() != VMA_NULL)
    14200  {
    14201  allocator->GetRecorder()->RecordSetAllocationUserData(
    14202  allocator->GetCurrentFrameIndex(),
    14203  allocation,
    14204  pUserData);
    14205  }
    14206 #endif
    14207 }
    14208 
    14210  VmaAllocator allocator,
    14211  VmaAllocation* pAllocation)
    14212 {
    14213  VMA_ASSERT(allocator && pAllocation);
    14214 
    14215  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14216 
    14217  allocator->CreateLostAllocation(pAllocation);
    14218 
    14219 #if VMA_RECORDING_ENABLED
    14220  if(allocator->GetRecorder() != VMA_NULL)
    14221  {
    14222  allocator->GetRecorder()->RecordCreateLostAllocation(
    14223  allocator->GetCurrentFrameIndex(),
    14224  *pAllocation);
    14225  }
    14226 #endif
    14227 }
    14228 
    14229 VkResult vmaMapMemory(
    14230  VmaAllocator allocator,
    14231  VmaAllocation allocation,
    14232  void** ppData)
    14233 {
    14234  VMA_ASSERT(allocator && allocation && ppData);
    14235 
    14236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14237 
    14238  VkResult res = allocator->Map(allocation, ppData);
    14239 
    14240 #if VMA_RECORDING_ENABLED
    14241  if(allocator->GetRecorder() != VMA_NULL)
    14242  {
    14243  allocator->GetRecorder()->RecordMapMemory(
    14244  allocator->GetCurrentFrameIndex(),
    14245  allocation);
    14246  }
    14247 #endif
    14248 
    14249  return res;
    14250 }
    14251 
    14252 void vmaUnmapMemory(
    14253  VmaAllocator allocator,
    14254  VmaAllocation allocation)
    14255 {
    14256  VMA_ASSERT(allocator && allocation);
    14257 
    14258  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14259 
    14260 #if VMA_RECORDING_ENABLED
    14261  if(allocator->GetRecorder() != VMA_NULL)
    14262  {
    14263  allocator->GetRecorder()->RecordUnmapMemory(
    14264  allocator->GetCurrentFrameIndex(),
    14265  allocation);
    14266  }
    14267 #endif
    14268 
    14269  allocator->Unmap(allocation);
    14270 }
    14271 
    14272 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14273 {
    14274  VMA_ASSERT(allocator && allocation);
    14275 
    14276  VMA_DEBUG_LOG("vmaFlushAllocation");
    14277 
    14278  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14279 
    14280  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14281 
    14282 #if VMA_RECORDING_ENABLED
    14283  if(allocator->GetRecorder() != VMA_NULL)
    14284  {
    14285  allocator->GetRecorder()->RecordFlushAllocation(
    14286  allocator->GetCurrentFrameIndex(),
    14287  allocation, offset, size);
    14288  }
    14289 #endif
    14290 }
    14291 
    14292 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14293 {
    14294  VMA_ASSERT(allocator && allocation);
    14295 
    14296  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14297 
    14298  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14299 
    14300  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14301 
    14302 #if VMA_RECORDING_ENABLED
    14303  if(allocator->GetRecorder() != VMA_NULL)
    14304  {
    14305  allocator->GetRecorder()->RecordInvalidateAllocation(
    14306  allocator->GetCurrentFrameIndex(),
    14307  allocation, offset, size);
    14308  }
    14309 #endif
    14310 }
    14311 
    14312 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14313 {
    14314  VMA_ASSERT(allocator);
    14315 
    14316  VMA_DEBUG_LOG("vmaCheckCorruption");
    14317 
    14318  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14319 
    14320  return allocator->CheckCorruption(memoryTypeBits);
    14321 }
    14322 
    14323 VkResult vmaDefragment(
    14324  VmaAllocator allocator,
    14325  VmaAllocation* pAllocations,
    14326  size_t allocationCount,
    14327  VkBool32* pAllocationsChanged,
    14328  const VmaDefragmentationInfo *pDefragmentationInfo,
    14329  VmaDefragmentationStats* pDefragmentationStats)
    14330 {
    14331  VMA_ASSERT(allocator && pAllocations);
    14332 
    14333  VMA_DEBUG_LOG("vmaDefragment");
    14334 
    14335  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14336 
    14337  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14338 }
    14339 
    14340 VkResult vmaBindBufferMemory(
    14341  VmaAllocator allocator,
    14342  VmaAllocation allocation,
    14343  VkBuffer buffer)
    14344 {
    14345  VMA_ASSERT(allocator && allocation && buffer);
    14346 
    14347  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14348 
    14349  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14350 
    14351  return allocator->BindBufferMemory(allocation, buffer);
    14352 }
    14353 
    14354 VkResult vmaBindImageMemory(
    14355  VmaAllocator allocator,
    14356  VmaAllocation allocation,
    14357  VkImage image)
    14358 {
    14359  VMA_ASSERT(allocator && allocation && image);
    14360 
    14361  VMA_DEBUG_LOG("vmaBindImageMemory");
    14362 
    14363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14364 
    14365  return allocator->BindImageMemory(allocation, image);
    14366 }
    14367 
    14368 VkResult vmaCreateBuffer(
    14369  VmaAllocator allocator,
    14370  const VkBufferCreateInfo* pBufferCreateInfo,
    14371  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14372  VkBuffer* pBuffer,
    14373  VmaAllocation* pAllocation,
    14374  VmaAllocationInfo* pAllocationInfo)
    14375 {
    14376  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14377 
    14378  if(pBufferCreateInfo->size == 0)
    14379  {
    14380  return VK_ERROR_VALIDATION_FAILED_EXT;
    14381  }
    14382 
    14383  VMA_DEBUG_LOG("vmaCreateBuffer");
    14384 
    14385  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14386 
    14387  *pBuffer = VK_NULL_HANDLE;
    14388  *pAllocation = VK_NULL_HANDLE;
    14389 
    14390  // 1. Create VkBuffer.
    14391  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14392  allocator->m_hDevice,
    14393  pBufferCreateInfo,
    14394  allocator->GetAllocationCallbacks(),
    14395  pBuffer);
    14396  if(res >= 0)
    14397  {
    14398  // 2. vkGetBufferMemoryRequirements.
    14399  VkMemoryRequirements vkMemReq = {};
    14400  bool requiresDedicatedAllocation = false;
    14401  bool prefersDedicatedAllocation = false;
    14402  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14403  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14404 
    14405  // Make sure alignment requirements for specific buffer usages reported
    14406  // in Physical Device Properties are included in alignment reported by memory requirements.
    14407  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14408  {
    14409  VMA_ASSERT(vkMemReq.alignment %
    14410  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14411  }
    14412  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14413  {
    14414  VMA_ASSERT(vkMemReq.alignment %
    14415  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14416  }
    14417  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14418  {
    14419  VMA_ASSERT(vkMemReq.alignment %
    14420  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14421  }
    14422 
    14423  // 3. Allocate memory using allocator.
    14424  res = allocator->AllocateMemory(
    14425  vkMemReq,
    14426  requiresDedicatedAllocation,
    14427  prefersDedicatedAllocation,
    14428  *pBuffer, // dedicatedBuffer
    14429  VK_NULL_HANDLE, // dedicatedImage
    14430  *pAllocationCreateInfo,
    14431  VMA_SUBALLOCATION_TYPE_BUFFER,
    14432  pAllocation);
    14433 
    14434 #if VMA_RECORDING_ENABLED
    14435  if(allocator->GetRecorder() != VMA_NULL)
    14436  {
    14437  allocator->GetRecorder()->RecordCreateBuffer(
    14438  allocator->GetCurrentFrameIndex(),
    14439  *pBufferCreateInfo,
    14440  *pAllocationCreateInfo,
    14441  *pAllocation);
    14442  }
    14443 #endif
    14444 
    14445  if(res >= 0)
    14446  {
    14447  // 3. Bind buffer with memory.
    14448  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14449  if(res >= 0)
    14450  {
    14451  // All steps succeeded.
    14452  #if VMA_STATS_STRING_ENABLED
    14453  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14454  #endif
    14455  if(pAllocationInfo != VMA_NULL)
    14456  {
    14457  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14458  }
    14459 
    14460  return VK_SUCCESS;
    14461  }
    14462  allocator->FreeMemory(*pAllocation);
    14463  *pAllocation = VK_NULL_HANDLE;
    14464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14465  *pBuffer = VK_NULL_HANDLE;
    14466  return res;
    14467  }
    14468  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14469  *pBuffer = VK_NULL_HANDLE;
    14470  return res;
    14471  }
    14472  return res;
    14473 }
    14474 
    14475 void vmaDestroyBuffer(
    14476  VmaAllocator allocator,
    14477  VkBuffer buffer,
    14478  VmaAllocation allocation)
    14479 {
    14480  VMA_ASSERT(allocator);
    14481 
    14482  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14483  {
    14484  return;
    14485  }
    14486 
    14487  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14488 
    14489  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14490 
    14491 #if VMA_RECORDING_ENABLED
    14492  if(allocator->GetRecorder() != VMA_NULL)
    14493  {
    14494  allocator->GetRecorder()->RecordDestroyBuffer(
    14495  allocator->GetCurrentFrameIndex(),
    14496  allocation);
    14497  }
    14498 #endif
    14499 
    14500  if(buffer != VK_NULL_HANDLE)
    14501  {
    14502  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14503  }
    14504 
    14505  if(allocation != VK_NULL_HANDLE)
    14506  {
    14507  allocator->FreeMemory(allocation);
    14508  }
    14509 }
    14510 
    14511 VkResult vmaCreateImage(
    14512  VmaAllocator allocator,
    14513  const VkImageCreateInfo* pImageCreateInfo,
    14514  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14515  VkImage* pImage,
    14516  VmaAllocation* pAllocation,
    14517  VmaAllocationInfo* pAllocationInfo)
    14518 {
    14519  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14520 
    14521  if(pImageCreateInfo->extent.width == 0 ||
    14522  pImageCreateInfo->extent.height == 0 ||
    14523  pImageCreateInfo->extent.depth == 0 ||
    14524  pImageCreateInfo->mipLevels == 0 ||
    14525  pImageCreateInfo->arrayLayers == 0)
    14526  {
    14527  return VK_ERROR_VALIDATION_FAILED_EXT;
    14528  }
    14529 
    14530  VMA_DEBUG_LOG("vmaCreateImage");
    14531 
    14532  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14533 
    14534  *pImage = VK_NULL_HANDLE;
    14535  *pAllocation = VK_NULL_HANDLE;
    14536 
    14537  // 1. Create VkImage.
    14538  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14539  allocator->m_hDevice,
    14540  pImageCreateInfo,
    14541  allocator->GetAllocationCallbacks(),
    14542  pImage);
    14543  if(res >= 0)
    14544  {
    14545  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14546  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14547  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14548 
    14549  // 2. Allocate memory using allocator.
    14550  VkMemoryRequirements vkMemReq = {};
    14551  bool requiresDedicatedAllocation = false;
    14552  bool prefersDedicatedAllocation = false;
    14553  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14554  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14555 
    14556  res = allocator->AllocateMemory(
    14557  vkMemReq,
    14558  requiresDedicatedAllocation,
    14559  prefersDedicatedAllocation,
    14560  VK_NULL_HANDLE, // dedicatedBuffer
    14561  *pImage, // dedicatedImage
    14562  *pAllocationCreateInfo,
    14563  suballocType,
    14564  pAllocation);
    14565 
    14566 #if VMA_RECORDING_ENABLED
    14567  if(allocator->GetRecorder() != VMA_NULL)
    14568  {
    14569  allocator->GetRecorder()->RecordCreateImage(
    14570  allocator->GetCurrentFrameIndex(),
    14571  *pImageCreateInfo,
    14572  *pAllocationCreateInfo,
    14573  *pAllocation);
    14574  }
    14575 #endif
    14576 
    14577  if(res >= 0)
    14578  {
    14579  // 3. Bind image with memory.
    14580  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14581  if(res >= 0)
    14582  {
    14583  // All steps succeeded.
    14584  #if VMA_STATS_STRING_ENABLED
    14585  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14586  #endif
    14587  if(pAllocationInfo != VMA_NULL)
    14588  {
    14589  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14590  }
    14591 
    14592  return VK_SUCCESS;
    14593  }
    14594  allocator->FreeMemory(*pAllocation);
    14595  *pAllocation = VK_NULL_HANDLE;
    14596  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14597  *pImage = VK_NULL_HANDLE;
    14598  return res;
    14599  }
    14600  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14601  *pImage = VK_NULL_HANDLE;
    14602  return res;
    14603  }
    14604  return res;
    14605 }
    14606 
    14607 void vmaDestroyImage(
    14608  VmaAllocator allocator,
    14609  VkImage image,
    14610  VmaAllocation allocation)
    14611 {
    14612  VMA_ASSERT(allocator);
    14613 
    14614  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14615  {
    14616  return;
    14617  }
    14618 
    14619  VMA_DEBUG_LOG("vmaDestroyImage");
    14620 
    14621  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14622 
    14623 #if VMA_RECORDING_ENABLED
    14624  if(allocator->GetRecorder() != VMA_NULL)
    14625  {
    14626  allocator->GetRecorder()->RecordDestroyImage(
    14627  allocator->GetCurrentFrameIndex(),
    14628  allocation);
    14629  }
    14630 #endif
    14631 
    14632  if(image != VK_NULL_HANDLE)
    14633  {
    14634  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14635  }
    14636  if(allocation != VK_NULL_HANDLE)
    14637  {
    14638  allocator->FreeMemory(allocation);
    14639  }
    14640 }
    14641 
    14642 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1589
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1891
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1643
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1646
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1617
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2213
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1598
    +
    Definition: vk_mem_alloc.h:1620
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2216
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1601
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1845
    -
    Definition: vk_mem_alloc.h:1948
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1590
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2313
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1640
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2583
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2102
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1487
    +
    Definition: vk_mem_alloc.h:1848
    +
    Definition: vk_mem_alloc.h:1951
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1593
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2316
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1643
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2586
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2105
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1488
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2194
    -
    Definition: vk_mem_alloc.h:1925
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1579
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2001
    -
    Definition: vk_mem_alloc.h:1872
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1652
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2130
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2197
    +
    Definition: vk_mem_alloc.h:1928
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1582
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2004
    +
    Definition: vk_mem_alloc.h:1875
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1655
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2133
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1706
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1637
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1709
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1640
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1876
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1879
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1778
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1595
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1777
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2587
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1781
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1598
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1780
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2590
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1669
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1787
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2595
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1985
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2578
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1596
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1521
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1672
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1790
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2598
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1988
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2581
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1599
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1524
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1646
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1649
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2144
    -
    Definition: vk_mem_alloc.h:2138
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1713
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2323
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2147
    +
    Definition: vk_mem_alloc.h:2141
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1716
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2326
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1591
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1615
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2022
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2164
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2200
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1594
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1618
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2025
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2167
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2203
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1577
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2147
    +
    Definition: vk_mem_alloc.h:1580
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2150
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1823
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1826
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2573
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2576
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2591
    -
    Definition: vk_mem_alloc.h:1862
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2009
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1594
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2594
    +
    Definition: vk_mem_alloc.h:1865
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2012
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1597
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1783
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1527
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1786
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1530
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1548
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1551
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1619
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1553
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2593
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1622
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1556
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2596
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1996
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2210
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1999
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2213
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1587
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1766
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2159
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1540
    -
    Definition: vk_mem_alloc.h:2134
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1590
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1769
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2162
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1543
    +
    Definition: vk_mem_alloc.h:2137
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1932
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1779
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1544
    -
    Definition: vk_mem_alloc.h:1959
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2150
    -
    Definition: vk_mem_alloc.h:1871
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1593
    +
    Definition: vk_mem_alloc.h:1935
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1782
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1547
    +
    Definition: vk_mem_alloc.h:1962
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2153
    +
    Definition: vk_mem_alloc.h:1874
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1596
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1991
    -
    Definition: vk_mem_alloc.h:1982
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1994
    +
    Definition: vk_mem_alloc.h:1985
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1769
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1589
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2172
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1655
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2203
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1980
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2015
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1772
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1592
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2175
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1658
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2206
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1983
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2018
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1694
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1785
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1912
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1778
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1697
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1788
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1915
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1781
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1600
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1599
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1603
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1628
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1545
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1602
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2186
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1592
    -
    Definition: vk_mem_alloc.h:1943
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2189
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1595
    +
    Definition: vk_mem_alloc.h:1946
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1633
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2337
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1649
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1778
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1775
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1636
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2340
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1652
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1781
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1778
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2191
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2194
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1952
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2318
    -
    Definition: vk_mem_alloc.h:1966
    -
    Definition: vk_mem_alloc.h:1978
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2589
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1585
    +
    Definition: vk_mem_alloc.h:1955
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2321
    +
    Definition: vk_mem_alloc.h:1969
    +
    Definition: vk_mem_alloc.h:1981
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2592
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1588
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1773
    -
    Definition: vk_mem_alloc.h:1828
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2140
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1776
    +
    Definition: vk_mem_alloc.h:1831
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2143
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1622
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1771
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1597
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1601
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1899
    -
    Definition: vk_mem_alloc.h:1973
    -
    Definition: vk_mem_alloc.h:1855
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2332
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1625
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1774
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1600
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1604
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1902
    +
    Definition: vk_mem_alloc.h:1976
    +
    Definition: vk_mem_alloc.h:1858
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2335
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1575
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1578
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1588
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2119
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1591
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2122
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2299
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2302
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1963
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2084
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1779
    +
    Definition: vk_mem_alloc.h:1966
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2087
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1782
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1609
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1786
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1612
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1789
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2197
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1779
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2200
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1782
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2304
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2307