From c005a84e37f5e5465269e540058eab4da645c9bd Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 17 Oct 2017 11:57:56 +0200 Subject: [PATCH] Added documentation for VK_KHR_dedicated_allocation extension. --- .../_v_k__k_h_r_dedicated_allocation.html | 98 ++++++++ docs/html/index.html | 5 +- docs/html/search/all_e.js | 1 + docs/html/search/pages_6.js | 3 +- docs/html/vk__mem__alloc_8h_source.html | 218 +++++++++--------- src/vk_mem_alloc.h | 65 ++++++ 6 files changed, 279 insertions(+), 111 deletions(-) create mode 100644 docs/html/_v_k__k_h_r_dedicated_allocation.html diff --git a/docs/html/_v_k__k_h_r_dedicated_allocation.html b/docs/html/_v_k__k_h_r_dedicated_allocation.html new file mode 100644 index 0000000..ca38bb3 --- /dev/null +++ b/docs/html/_v_k__k_h_r_dedicated_allocation.html @@ -0,0 +1,98 @@ + + + + + + + +Vulkan Memory Allocator: VK_KHR_dedicated_allocation + + + + + + + + + +
+
+ + + + + + +
+
Vulkan Memory Allocator +
+
+
+ + + + + + + + +
+
+ + +
+ +
+ + +
+
+
+
VK_KHR_dedicated_allocation
+
+
+

VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve performance on some GPUs. It augments Vulkan API with possibility to query driver whether it prefers particular buffer or image to have its own, dedicated allocation (separate VkDeviceMemory block) for better efficiency - to be able to do some internal optimizations.

+

The extension is supported by this library. It will be used automatically when enabled. To enable it:

+

1 . When creating Vulkan device, check if following 2 device extensions are supported (call vkEnumerateDeviceExtensionProperties()). If yes, enable them (fill VkDeviceCreateInfo::ppEnabledExtensionNames).

+
    +
  • VK_KHR_get_memory_requirements2
  • +
  • VK_KHR_dedicated_allocation
  • +
+

If you enabled these extensions:

+

2 . Query device for pointers to following 2 extension functions, using vkGetDeviceProcAddr(). Pass them in structure VmaVulkanFunctions while creating your VmaAllocator.

+
    +
  • vkGetBufferMemoryRequirements2KHR
  • +
  • vkGetImageMemoryRequirements2KHR
  • +
+

Other members of this structure can be null as long as you leave VMA_STATIC_VULKAN_FUNCTIONS defined to 1, which is the default.

+
VmaVulkanFunctions vulkanFunctions = {};
(PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(device, "vkGetBufferMemoryRequirements2KHR");
(PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(device, "vkGetImageMemoryRequirements2KHR");
VmaAllocatorCreateInfo allocatorInfo = {};
allocatorInfo.pVulkanFunctions = &vulkanFunctions;
// Fill other members of allocatorInfo...

3 . Use VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating your VmaAllocator to inform the library that you enabled required extensions and you want the library to use them.

+
vmaCreateAllocator(&allocatorInfo, &allocator);

That's all. The extension will be automatically used whenever you create a buffer using vmaCreateBuffer() or image using vmaCreateImage().

+

To learn more about this extension, see:

+ +
+ + + + diff --git a/docs/html/index.html b/docs/html/index.html index 572e27b..34f866f 100644 --- a/docs/html/index.html +++ b/docs/html/index.html @@ -76,7 +76,10 @@ License: MIT

  • Lost allocations
  • -
  • Configuration
  • +
  • Configuration +
  • Thread safety
  • See also:

    diff --git a/docs/html/search/all_e.js b/docs/html/search/all_e.js index 7b28f60..718f72e 100644 --- a/docs/html/search/all_e.js +++ b/docs/html/search/all_e.js @@ -1,6 +1,7 @@ var searchData= [ ['vulkan_20memory_20allocator',['Vulkan Memory Allocator',['../index.html',1,'']]], + ['vk_5fkhr_5fdedicated_5fallocation',['VK_KHR_dedicated_allocation',['../_v_k__k_h_r_dedicated_allocation.html',1,'index']]], ['vk_5fmem_5falloc_2eh',['vk_mem_alloc.h',['../vk__mem__alloc_8h.html',1,'']]], ['vkallocatememory',['vkAllocateMemory',['../struct_vma_vulkan_functions.html#a2943bf99dfd784a0e8f599d987e22e6c',1,'VmaVulkanFunctions']]], ['vkbindbuffermemory',['vkBindBufferMemory',['../struct_vma_vulkan_functions.html#a94fc4f3a605d9880bb3c0ba2c2fc80b2',1,'VmaVulkanFunctions']]], diff --git a/docs/html/search/pages_6.js b/docs/html/search/pages_6.js index 61d5dd4..e2d4aaf 100644 --- a/docs/html/search/pages_6.js +++ b/docs/html/search/pages_6.js @@ -1,4 +1,5 @@ var searchData= [ - ['vulkan_20memory_20allocator',['Vulkan Memory Allocator',['../index.html',1,'']]] + ['vulkan_20memory_20allocator',['Vulkan Memory Allocator',['../index.html',1,'']]], + ['vk_5fkhr_5fdedicated_5fallocation',['VK_KHR_dedicated_allocation',['../_v_k__k_h_r_dedicated_allocation.html',1,'index']]] ]; diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 8422c7e..d06c214 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,156 +62,156 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    410 #include <vulkan/vulkan.h>
    411 
    412 VK_DEFINE_HANDLE(VmaAllocator)
    413 
    414 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    416  VmaAllocator allocator,
    417  uint32_t memoryType,
    418  VkDeviceMemory memory,
    419  VkDeviceSize size);
    421 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    422  VmaAllocator allocator,
    423  uint32_t memoryType,
    424  VkDeviceMemory memory,
    425  VkDeviceSize size);
    426 
    434 typedef struct VmaDeviceMemoryCallbacks {
    440 
    476 
    479 typedef VkFlags VmaAllocatorCreateFlags;
    480 
    485 typedef struct VmaVulkanFunctions {
    486  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    487  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    488  PFN_vkAllocateMemory vkAllocateMemory;
    489  PFN_vkFreeMemory vkFreeMemory;
    490  PFN_vkMapMemory vkMapMemory;
    491  PFN_vkUnmapMemory vkUnmapMemory;
    492  PFN_vkBindBufferMemory vkBindBufferMemory;
    493  PFN_vkBindImageMemory vkBindImageMemory;
    494  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    495  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    496  PFN_vkCreateBuffer vkCreateBuffer;
    497  PFN_vkDestroyBuffer vkDestroyBuffer;
    498  PFN_vkCreateImage vkCreateImage;
    499  PFN_vkDestroyImage vkDestroyImage;
    500  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    501  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    503 
    506 {
    508  VmaAllocatorCreateFlags flags;
    510 
    511  VkPhysicalDevice physicalDevice;
    513 
    514  VkDevice device;
    516 
    519 
    522 
    523  const VkAllocationCallbacks* pAllocationCallbacks;
    525 
    540  uint32_t frameInUseCount;
    558  const VkDeviceSize* pHeapSizeLimit;
    572 
    574 VkResult vmaCreateAllocator(
    575  const VmaAllocatorCreateInfo* pCreateInfo,
    576  VmaAllocator* pAllocator);
    577 
    580  VmaAllocator allocator);
    581 
    587  VmaAllocator allocator,
    588  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    589 
    595  VmaAllocator allocator,
    596  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    597 
    605  VmaAllocator allocator,
    606  uint32_t memoryTypeIndex,
    607  VkMemoryPropertyFlags* pFlags);
    608 
    618  VmaAllocator allocator,
    619  uint32_t frameIndex);
    620 
    623 typedef struct VmaStatInfo
    624 {
    626  uint32_t blockCount;
    628  uint32_t allocationCount;
    632  VkDeviceSize usedBytes;
    634  VkDeviceSize unusedBytes;
    635  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    636  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    637 } VmaStatInfo;
    638 
    640 typedef struct VmaStats
    641 {
    642  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    643  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    645 } VmaStats;
    646 
    648 void vmaCalculateStats(
    649  VmaAllocator allocator,
    650  VmaStats* pStats);
    651 
    652 #define VMA_STATS_STRING_ENABLED 1
    653 
    654 #if VMA_STATS_STRING_ENABLED
    655 
    657 
    660  VmaAllocator allocator,
    661  char** ppStatsString,
    662  VkBool32 detailedMap);
    663 
    664 void vmaFreeStatsString(
    665  VmaAllocator allocator,
    666  char* pStatsString);
    667 
    668 #endif // #if VMA_STATS_STRING_ENABLED
    669 
    670 VK_DEFINE_HANDLE(VmaPool)
    671 
    672 typedef enum VmaMemoryUsage
    673 {
    679 
    682 
    685 
    689 
    704 
    749 
    752 typedef VkFlags VmaAllocationCreateFlags;
    753 
    755 {
    757  VmaAllocationCreateFlags flags;
    768  VkMemoryPropertyFlags requiredFlags;
    774  VkMemoryPropertyFlags preferredFlags;
    776  void* pUserData;
    781  VmaPool pool;
    783 
    798 VkResult vmaFindMemoryTypeIndex(
    799  VmaAllocator allocator,
    800  uint32_t memoryTypeBits,
    801  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    802  uint32_t* pMemoryTypeIndex);
    803 
    805 typedef enum VmaPoolCreateFlagBits {
    833 
    836 typedef VkFlags VmaPoolCreateFlags;
    837 
    840 typedef struct VmaPoolCreateInfo {
    843  uint32_t memoryTypeIndex;
    846  VmaPoolCreateFlags flags;
    851  VkDeviceSize blockSize;
    878  uint32_t frameInUseCount;
    880 
    883 typedef struct VmaPoolStats {
    886  VkDeviceSize size;
    889  VkDeviceSize unusedSize;
    902  VkDeviceSize unusedRangeSizeMax;
    903 } VmaPoolStats;
    904 
    911 VkResult vmaCreatePool(
    912  VmaAllocator allocator,
    913  const VmaPoolCreateInfo* pCreateInfo,
    914  VmaPool* pPool);
    915 
    918 void vmaDestroyPool(
    919  VmaAllocator allocator,
    920  VmaPool pool);
    921 
    928 void vmaGetPoolStats(
    929  VmaAllocator allocator,
    930  VmaPool pool,
    931  VmaPoolStats* pPoolStats);
    932 
    940  VmaAllocator allocator,
    941  VmaPool pool,
    942  size_t* pLostAllocationCount);
    943 
    944 VK_DEFINE_HANDLE(VmaAllocation)
    945 
    946 
    948 typedef struct VmaAllocationInfo {
    953  uint32_t memoryType;
    962  VkDeviceMemory deviceMemory;
    967  VkDeviceSize offset;
    972  VkDeviceSize size;
    978  void* pMappedData;
    983  void* pUserData;
    985 
    996 VkResult vmaAllocateMemory(
    997  VmaAllocator allocator,
    998  const VkMemoryRequirements* pVkMemoryRequirements,
    999  const VmaAllocationCreateInfo* pCreateInfo,
    1000  VmaAllocation* pAllocation,
    1001  VmaAllocationInfo* pAllocationInfo);
    1002 
    1010  VmaAllocator allocator,
    1011  VkBuffer buffer,
    1012  const VmaAllocationCreateInfo* pCreateInfo,
    1013  VmaAllocation* pAllocation,
    1014  VmaAllocationInfo* pAllocationInfo);
    1015 
    1017 VkResult vmaAllocateMemoryForImage(
    1018  VmaAllocator allocator,
    1019  VkImage image,
    1020  const VmaAllocationCreateInfo* pCreateInfo,
    1021  VmaAllocation* pAllocation,
    1022  VmaAllocationInfo* pAllocationInfo);
    1023 
    1025 void vmaFreeMemory(
    1026  VmaAllocator allocator,
    1027  VmaAllocation allocation);
    1028 
    1031  VmaAllocator allocator,
    1032  VmaAllocation allocation,
    1033  VmaAllocationInfo* pAllocationInfo);
    1034 
    1037  VmaAllocator allocator,
    1038  VmaAllocation allocation,
    1039  void* pUserData);
    1040 
    1052  VmaAllocator allocator,
    1053  VmaAllocation* pAllocation);
    1054 
    1063 VkResult vmaMapMemory(
    1064  VmaAllocator allocator,
    1065  VmaAllocation allocation,
    1066  void** ppData);
    1067 
    1068 void vmaUnmapMemory(
    1069  VmaAllocator allocator,
    1070  VmaAllocation allocation);
    1071 
    1093 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1094 
    1102 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1103 
    1105 typedef struct VmaDefragmentationInfo {
    1110  VkDeviceSize maxBytesToMove;
    1117 
    1119 typedef struct VmaDefragmentationStats {
    1121  VkDeviceSize bytesMoved;
    1123  VkDeviceSize bytesFreed;
    1129 
    1200 VkResult vmaDefragment(
    1201  VmaAllocator allocator,
    1202  VmaAllocation* pAllocations,
    1203  size_t allocationCount,
    1204  VkBool32* pAllocationsChanged,
    1205  const VmaDefragmentationInfo *pDefragmentationInfo,
    1206  VmaDefragmentationStats* pDefragmentationStats);
    1207 
    1226 VkResult vmaCreateBuffer(
    1227  VmaAllocator allocator,
    1228  const VkBufferCreateInfo* pBufferCreateInfo,
    1229  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1230  VkBuffer* pBuffer,
    1231  VmaAllocation* pAllocation,
    1232  VmaAllocationInfo* pAllocationInfo);
    1233 
    1242 void vmaDestroyBuffer(
    1243  VmaAllocator allocator,
    1244  VkBuffer buffer,
    1245  VmaAllocation allocation);
    1246 
    1248 VkResult vmaCreateImage(
    1249  VmaAllocator allocator,
    1250  const VkImageCreateInfo* pImageCreateInfo,
    1251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1252  VkImage* pImage,
    1253  VmaAllocation* pAllocation,
    1254  VmaAllocationInfo* pAllocationInfo);
    1255 
    1264 void vmaDestroyImage(
    1265  VmaAllocator allocator,
    1266  VkImage image,
    1267  VmaAllocation allocation);
    1268 
    1269 #ifdef __cplusplus
    1270 }
    1271 #endif
    1272 
    1273 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1274 
    1275 // For Visual Studio IntelliSense.
    1276 #ifdef __INTELLISENSE__
    1277 #define VMA_IMPLEMENTATION
    1278 #endif
    1279 
    1280 #ifdef VMA_IMPLEMENTATION
    1281 #undef VMA_IMPLEMENTATION
    1282 
    1283 #include <cstdint>
    1284 #include <cstdlib>
    1285 #include <cstring>
    1286 
    1287 /*******************************************************************************
    1288 CONFIGURATION SECTION
    1289 
    1290 Define some of these macros before each #include of this header or change them
    1291 here if you need other then default behavior depending on your environment.
    1292 */
    1293 
    1294 /*
    1295 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1296 internally, like:
    1297 
    1298  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1299 
    1300 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1301 VmaAllocatorCreateInfo::pVulkanFunctions.
    1302 */
    1303 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1304 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1305 #endif
    1306 
    1307 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1308 //#define VMA_USE_STL_CONTAINERS 1
    1309 
    1310 /* Set this macro to 1 to make the library including and using STL containers:
    1311 std::pair, std::vector, std::list, std::unordered_map.
    1312 
    1313 Set it to 0 or undefined to make the library using its own implementation of
    1314 the containers.
    1315 */
    1316 #if VMA_USE_STL_CONTAINERS
    1317  #define VMA_USE_STL_VECTOR 1
    1318  #define VMA_USE_STL_UNORDERED_MAP 1
    1319  #define VMA_USE_STL_LIST 1
    1320 #endif
    1321 
    1322 #if VMA_USE_STL_VECTOR
    1323  #include <vector>
    1324 #endif
    1325 
    1326 #if VMA_USE_STL_UNORDERED_MAP
    1327  #include <unordered_map>
    1328 #endif
    1329 
    1330 #if VMA_USE_STL_LIST
    1331  #include <list>
    1332 #endif
    1333 
    1334 /*
    1335 Following headers are used in this CONFIGURATION section only, so feel free to
    1336 remove them if not needed.
    1337 */
    1338 #include <cassert> // for assert
    1339 #include <algorithm> // for min, max
    1340 #include <mutex> // for std::mutex
    1341 #include <atomic> // for std::atomic
    1342 
    1343 #if !defined(_WIN32)
    1344  #include <malloc.h> // for aligned_alloc()
    1345 #endif
    1346 
    1347 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1348 #ifndef VMA_ASSERT
    1349  #ifdef _DEBUG
    1350  #define VMA_ASSERT(expr) assert(expr)
    1351  #else
    1352  #define VMA_ASSERT(expr)
    1353  #endif
    1354 #endif
    1355 
    1356 // Assert that will be called very often, like inside data structures e.g. operator[].
    1357 // Making it non-empty can make program slow.
    1358 #ifndef VMA_HEAVY_ASSERT
    1359  #ifdef _DEBUG
    1360  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1361  #else
    1362  #define VMA_HEAVY_ASSERT(expr)
    1363  #endif
    1364 #endif
    1365 
    1366 #ifndef VMA_NULL
    1367  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1368  #define VMA_NULL nullptr
    1369 #endif
    1370 
    1371 #ifndef VMA_ALIGN_OF
    1372  #define VMA_ALIGN_OF(type) (__alignof(type))
    1373 #endif
    1374 
    1375 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1376  #if defined(_WIN32)
    1377  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1378  #else
    1379  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1380  #endif
    1381 #endif
    1382 
    1383 #ifndef VMA_SYSTEM_FREE
    1384  #if defined(_WIN32)
    1385  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1386  #else
    1387  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1388  #endif
    1389 #endif
    1390 
    1391 #ifndef VMA_MIN
    1392  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1393 #endif
    1394 
    1395 #ifndef VMA_MAX
    1396  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1397 #endif
    1398 
    1399 #ifndef VMA_SWAP
    1400  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1401 #endif
    1402 
    1403 #ifndef VMA_SORT
    1404  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1405 #endif
    1406 
    1407 #ifndef VMA_DEBUG_LOG
    1408  #define VMA_DEBUG_LOG(format, ...)
    1409  /*
    1410  #define VMA_DEBUG_LOG(format, ...) do { \
    1411  printf(format, __VA_ARGS__); \
    1412  printf("\n"); \
    1413  } while(false)
    1414  */
    1415 #endif
    1416 
    1417 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1418 #if VMA_STATS_STRING_ENABLED
    1419  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1420  {
    1421  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1422  }
    1423  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1424  {
    1425  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1426  }
    1427  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1428  {
    1429  snprintf(outStr, strLen, "%p", ptr);
    1430  }
    1431 #endif
    1432 
    1433 #ifndef VMA_MUTEX
    1434  class VmaMutex
    1435  {
    1436  public:
    1437  VmaMutex() { }
    1438  ~VmaMutex() { }
    1439  void Lock() { m_Mutex.lock(); }
    1440  void Unlock() { m_Mutex.unlock(); }
    1441  private:
    1442  std::mutex m_Mutex;
    1443  };
    1444  #define VMA_MUTEX VmaMutex
    1445 #endif
    1446 
    1447 /*
    1448 If providing your own implementation, you need to implement a subset of std::atomic:
    1449 
    1450 - Constructor(uint32_t desired)
    1451 - uint32_t load() const
    1452 - void store(uint32_t desired)
    1453 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1454 */
    1455 #ifndef VMA_ATOMIC_UINT32
    1456  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1457 #endif
    1458 
    1459 #ifndef VMA_BEST_FIT
    1460 
    1472  #define VMA_BEST_FIT (1)
    1473 #endif
    1474 
    1475 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1476 
    1480  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1481 #endif
    1482 
    1483 #ifndef VMA_DEBUG_ALIGNMENT
    1484 
    1488  #define VMA_DEBUG_ALIGNMENT (1)
    1489 #endif
    1490 
    1491 #ifndef VMA_DEBUG_MARGIN
    1492 
    1496  #define VMA_DEBUG_MARGIN (0)
    1497 #endif
    1498 
    1499 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1500 
    1504  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1505 #endif
    1506 
    1507 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1508 
    1512  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1513 #endif
    1514 
    1515 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1516  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1518 #endif
    1519 
    1520 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1521  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1523 #endif
    1524 
    1525 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1526  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1528 #endif
    1529 
    1530 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1531 
    1532 /*******************************************************************************
    1533 END OF CONFIGURATION
    1534 */
    1535 
    1536 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1537  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1538 
    1539 // Returns number of bits set to 1 in (v).
    1540 static inline uint32_t CountBitsSet(uint32_t v)
    1541 {
    1542  uint32_t c = v - ((v >> 1) & 0x55555555);
    1543  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1544  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1545  c = ((c >> 8) + c) & 0x00FF00FF;
    1546  c = ((c >> 16) + c) & 0x0000FFFF;
    1547  return c;
    1548 }
    1549 
    1550 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1551 // Use types like uint32_t, uint64_t as T.
    1552 template <typename T>
    1553 static inline T VmaAlignUp(T val, T align)
    1554 {
    1555  return (val + align - 1) / align * align;
    1556 }
    1557 
    1558 // Division with mathematical rounding to nearest number.
    1559 template <typename T>
    1560 inline T VmaRoundDiv(T x, T y)
    1561 {
    1562  return (x + (y / (T)2)) / y;
    1563 }
    1564 
    1565 #ifndef VMA_SORT
    1566 
    1567 template<typename Iterator, typename Compare>
    1568 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1569 {
    1570  Iterator centerValue = end; --centerValue;
    1571  Iterator insertIndex = beg;
    1572  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1573  {
    1574  if(cmp(*memTypeIndex, *centerValue))
    1575  {
    1576  if(insertIndex != memTypeIndex)
    1577  {
    1578  VMA_SWAP(*memTypeIndex, *insertIndex);
    1579  }
    1580  ++insertIndex;
    1581  }
    1582  }
    1583  if(insertIndex != centerValue)
    1584  {
    1585  VMA_SWAP(*insertIndex, *centerValue);
    1586  }
    1587  return insertIndex;
    1588 }
    1589 
    1590 template<typename Iterator, typename Compare>
    1591 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1592 {
    1593  if(beg < end)
    1594  {
    1595  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1596  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1597  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1598  }
    1599 }
    1600 
    1601 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1602 
    1603 #endif // #ifndef VMA_SORT
    1604 
    1605 /*
    1606 Returns true if two memory blocks occupy overlapping pages.
    1607 ResourceA must be in less memory offset than ResourceB.
    1608 
    1609 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1610 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1611 */
    1612 static inline bool VmaBlocksOnSamePage(
    1613  VkDeviceSize resourceAOffset,
    1614  VkDeviceSize resourceASize,
    1615  VkDeviceSize resourceBOffset,
    1616  VkDeviceSize pageSize)
    1617 {
    1618  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1619  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1620  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1621  VkDeviceSize resourceBStart = resourceBOffset;
    1622  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1623  return resourceAEndPage == resourceBStartPage;
    1624 }
    1625 
    1626 enum VmaSuballocationType
    1627 {
    1628  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1629  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1630  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1631  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1632  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1633  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1634  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1635 };
    1636 
    1637 /*
    1638 Returns true if given suballocation types could conflict and must respect
    1639 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1640 or linear image and another one is optimal image. If type is unknown, behave
    1641 conservatively.
    1642 */
    1643 static inline bool VmaIsBufferImageGranularityConflict(
    1644  VmaSuballocationType suballocType1,
    1645  VmaSuballocationType suballocType2)
    1646 {
    1647  if(suballocType1 > suballocType2)
    1648  {
    1649  VMA_SWAP(suballocType1, suballocType2);
    1650  }
    1651 
    1652  switch(suballocType1)
    1653  {
    1654  case VMA_SUBALLOCATION_TYPE_FREE:
    1655  return false;
    1656  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1657  return true;
    1658  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1659  return
    1660  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1661  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1662  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1663  return
    1664  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1665  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1666  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1667  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1668  return
    1669  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1670  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1671  return false;
    1672  default:
    1673  VMA_ASSERT(0);
    1674  return true;
    1675  }
    1676 }
    1677 
    1678 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1679 struct VmaMutexLock
    1680 {
    1681 public:
    1682  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1683  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1684  {
    1685  if(m_pMutex)
    1686  {
    1687  m_pMutex->Lock();
    1688  }
    1689  }
    1690 
    1691  ~VmaMutexLock()
    1692  {
    1693  if(m_pMutex)
    1694  {
    1695  m_pMutex->Unlock();
    1696  }
    1697  }
    1698 
    1699 private:
    1700  VMA_MUTEX* m_pMutex;
    1701 };
    1702 
    1703 #if VMA_DEBUG_GLOBAL_MUTEX
    1704  static VMA_MUTEX gDebugGlobalMutex;
    1705  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1706 #else
    1707  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1708 #endif
    1709 
    1710 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1711 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1712 
    1713 /*
    1714 Performs binary search and returns iterator to first element that is greater or
    1715 equal to (key), according to comparison (cmp).
    1716 
    1717 Cmp should return true if first argument is less than second argument.
    1718 
    1719 Returned value is the found element, if present in the collection or place where
    1720 new element with value (key) should be inserted.
    1721 */
    1722 template <typename IterT, typename KeyT, typename CmpT>
    1723 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1724 {
    1725  size_t down = 0, up = (end - beg);
    1726  while(down < up)
    1727  {
    1728  const size_t mid = (down + up) / 2;
    1729  if(cmp(*(beg+mid), key))
    1730  {
    1731  down = mid + 1;
    1732  }
    1733  else
    1734  {
    1735  up = mid;
    1736  }
    1737  }
    1738  return beg + down;
    1739 }
    1740 
    1742 // Memory allocation
    1743 
    1744 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1745 {
    1746  if((pAllocationCallbacks != VMA_NULL) &&
    1747  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1748  {
    1749  return (*pAllocationCallbacks->pfnAllocation)(
    1750  pAllocationCallbacks->pUserData,
    1751  size,
    1752  alignment,
    1753  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1754  }
    1755  else
    1756  {
    1757  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1758  }
    1759 }
    1760 
    1761 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1762 {
    1763  if((pAllocationCallbacks != VMA_NULL) &&
    1764  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1765  {
    1766  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1767  }
    1768  else
    1769  {
    1770  VMA_SYSTEM_FREE(ptr);
    1771  }
    1772 }
    1773 
    1774 template<typename T>
    1775 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1776 {
    1777  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1778 }
    1779 
    1780 template<typename T>
    1781 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1782 {
    1783  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1784 }
    1785 
    1786 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1787 
    1788 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1789 
    1790 template<typename T>
    1791 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1792 {
    1793  ptr->~T();
    1794  VmaFree(pAllocationCallbacks, ptr);
    1795 }
    1796 
    1797 template<typename T>
    1798 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1799 {
    1800  if(ptr != VMA_NULL)
    1801  {
    1802  for(size_t i = count; i--; )
    1803  {
    1804  ptr[i].~T();
    1805  }
    1806  VmaFree(pAllocationCallbacks, ptr);
    1807  }
    1808 }
    1809 
    1810 // STL-compatible allocator.
    1811 template<typename T>
    1812 class VmaStlAllocator
    1813 {
    1814 public:
    1815  const VkAllocationCallbacks* const m_pCallbacks;
    1816  typedef T value_type;
    1817 
    1818  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1819  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1820 
    1821  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1822  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1823 
    1824  template<typename U>
    1825  bool operator==(const VmaStlAllocator<U>& rhs) const
    1826  {
    1827  return m_pCallbacks == rhs.m_pCallbacks;
    1828  }
    1829  template<typename U>
    1830  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1831  {
    1832  return m_pCallbacks != rhs.m_pCallbacks;
    1833  }
    1834 
    1835  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1836 };
    1837 
    1838 #if VMA_USE_STL_VECTOR
    1839 
    1840 #define VmaVector std::vector
    1841 
    1842 template<typename T, typename allocatorT>
    1843 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1844 {
    1845  vec.insert(vec.begin() + index, item);
    1846 }
    1847 
    1848 template<typename T, typename allocatorT>
    1849 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1850 {
    1851  vec.erase(vec.begin() + index);
    1852 }
    1853 
    1854 #else // #if VMA_USE_STL_VECTOR
    1855 
    1856 /* Class with interface compatible with subset of std::vector.
    1857 T must be POD because constructors and destructors are not called and memcpy is
    1858 used for these objects. */
    1859 template<typename T, typename AllocatorT>
    1860 class VmaVector
    1861 {
    1862 public:
    1863  typedef T value_type;
    1864 
    1865  VmaVector(const AllocatorT& allocator) :
    1866  m_Allocator(allocator),
    1867  m_pArray(VMA_NULL),
    1868  m_Count(0),
    1869  m_Capacity(0)
    1870  {
    1871  }
    1872 
    1873  VmaVector(size_t count, const AllocatorT& allocator) :
    1874  m_Allocator(allocator),
    1875  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1876  m_Count(count),
    1877  m_Capacity(count)
    1878  {
    1879  }
    1880 
    1881  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1882  m_Allocator(src.m_Allocator),
    1883  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1884  m_Count(src.m_Count),
    1885  m_Capacity(src.m_Count)
    1886  {
    1887  if(m_Count != 0)
    1888  {
    1889  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1890  }
    1891  }
    1892 
    1893  ~VmaVector()
    1894  {
    1895  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1896  }
    1897 
    1898  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1899  {
    1900  if(&rhs != this)
    1901  {
    1902  resize(rhs.m_Count);
    1903  if(m_Count != 0)
    1904  {
    1905  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    1906  }
    1907  }
    1908  return *this;
    1909  }
    1910 
    1911  bool empty() const { return m_Count == 0; }
    1912  size_t size() const { return m_Count; }
    1913  T* data() { return m_pArray; }
    1914  const T* data() const { return m_pArray; }
    1915 
    1916  T& operator[](size_t index)
    1917  {
    1918  VMA_HEAVY_ASSERT(index < m_Count);
    1919  return m_pArray[index];
    1920  }
    1921  const T& operator[](size_t index) const
    1922  {
    1923  VMA_HEAVY_ASSERT(index < m_Count);
    1924  return m_pArray[index];
    1925  }
    1926 
    1927  T& front()
    1928  {
    1929  VMA_HEAVY_ASSERT(m_Count > 0);
    1930  return m_pArray[0];
    1931  }
    1932  const T& front() const
    1933  {
    1934  VMA_HEAVY_ASSERT(m_Count > 0);
    1935  return m_pArray[0];
    1936  }
    1937  T& back()
    1938  {
    1939  VMA_HEAVY_ASSERT(m_Count > 0);
    1940  return m_pArray[m_Count - 1];
    1941  }
    1942  const T& back() const
    1943  {
    1944  VMA_HEAVY_ASSERT(m_Count > 0);
    1945  return m_pArray[m_Count - 1];
    1946  }
    1947 
    1948  void reserve(size_t newCapacity, bool freeMemory = false)
    1949  {
    1950  newCapacity = VMA_MAX(newCapacity, m_Count);
    1951 
    1952  if((newCapacity < m_Capacity) && !freeMemory)
    1953  {
    1954  newCapacity = m_Capacity;
    1955  }
    1956 
    1957  if(newCapacity != m_Capacity)
    1958  {
    1959  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    1960  if(m_Count != 0)
    1961  {
    1962  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    1963  }
    1964  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1965  m_Capacity = newCapacity;
    1966  m_pArray = newArray;
    1967  }
    1968  }
    1969 
    1970  void resize(size_t newCount, bool freeMemory = false)
    1971  {
    1972  size_t newCapacity = m_Capacity;
    1973  if(newCount > m_Capacity)
    1974  {
    1975  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    1976  }
    1977  else if(freeMemory)
    1978  {
    1979  newCapacity = newCount;
    1980  }
    1981 
    1982  if(newCapacity != m_Capacity)
    1983  {
    1984  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    1985  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    1986  if(elementsToCopy != 0)
    1987  {
    1988  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    1989  }
    1990  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1991  m_Capacity = newCapacity;
    1992  m_pArray = newArray;
    1993  }
    1994 
    1995  m_Count = newCount;
    1996  }
    1997 
    1998  void clear(bool freeMemory = false)
    1999  {
    2000  resize(0, freeMemory);
    2001  }
    2002 
    2003  void insert(size_t index, const T& src)
    2004  {
    2005  VMA_HEAVY_ASSERT(index <= m_Count);
    2006  const size_t oldCount = size();
    2007  resize(oldCount + 1);
    2008  if(index < oldCount)
    2009  {
    2010  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2011  }
    2012  m_pArray[index] = src;
    2013  }
    2014 
    2015  void remove(size_t index)
    2016  {
    2017  VMA_HEAVY_ASSERT(index < m_Count);
    2018  const size_t oldCount = size();
    2019  if(index < oldCount - 1)
    2020  {
    2021  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2022  }
    2023  resize(oldCount - 1);
    2024  }
    2025 
    2026  void push_back(const T& src)
    2027  {
    2028  const size_t newIndex = size();
    2029  resize(newIndex + 1);
    2030  m_pArray[newIndex] = src;
    2031  }
    2032 
    2033  void pop_back()
    2034  {
    2035  VMA_HEAVY_ASSERT(m_Count > 0);
    2036  resize(size() - 1);
    2037  }
    2038 
    2039  void push_front(const T& src)
    2040  {
    2041  insert(0, src);
    2042  }
    2043 
    2044  void pop_front()
    2045  {
    2046  VMA_HEAVY_ASSERT(m_Count > 0);
    2047  remove(0);
    2048  }
    2049 
    2050  typedef T* iterator;
    2051 
    2052  iterator begin() { return m_pArray; }
    2053  iterator end() { return m_pArray + m_Count; }
    2054 
    2055 private:
    2056  AllocatorT m_Allocator;
    2057  T* m_pArray;
    2058  size_t m_Count;
    2059  size_t m_Capacity;
    2060 };
    2061 
    2062 template<typename T, typename allocatorT>
    2063 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2064 {
    2065  vec.insert(index, item);
    2066 }
    2067 
    2068 template<typename T, typename allocatorT>
    2069 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2070 {
    2071  vec.remove(index);
    2072 }
    2073 
    2074 #endif // #if VMA_USE_STL_VECTOR
    2075 
    2076 template<typename CmpLess, typename VectorT>
    2077 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2078 {
    2079  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2080  vector.data(),
    2081  vector.data() + vector.size(),
    2082  value,
    2083  CmpLess()) - vector.data();
    2084  VmaVectorInsert(vector, indexToInsert, value);
    2085  return indexToInsert;
    2086 }
    2087 
    2088 template<typename CmpLess, typename VectorT>
    2089 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2090 {
    2091  CmpLess comparator;
    2092  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2093  vector.begin(),
    2094  vector.end(),
    2095  value,
    2096  comparator);
    2097  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2098  {
    2099  size_t indexToRemove = it - vector.begin();
    2100  VmaVectorRemove(vector, indexToRemove);
    2101  return true;
    2102  }
    2103  return false;
    2104 }
    2105 
    2106 template<typename CmpLess, typename VectorT>
    2107 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2108 {
    2109  CmpLess comparator;
    2110  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2111  vector.data(),
    2112  vector.data() + vector.size(),
    2113  value,
    2114  comparator);
    2115  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2116  {
    2117  return it - vector.begin();
    2118  }
    2119  else
    2120  {
    2121  return vector.size();
    2122  }
    2123 }
    2124 
    2126 // class VmaPoolAllocator
    2127 
    2128 /*
    2129 Allocator for objects of type T using a list of arrays (pools) to speed up
    2130 allocation. Number of elements that can be allocated is not bounded because
    2131 allocator can create multiple blocks.
    2132 */
    2133 template<typename T>
    2134 class VmaPoolAllocator
    2135 {
    2136 public:
    2137  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2138  ~VmaPoolAllocator();
    2139  void Clear();
    2140  T* Alloc();
    2141  void Free(T* ptr);
    2142 
    2143 private:
    2144  union Item
    2145  {
    2146  uint32_t NextFreeIndex;
    2147  T Value;
    2148  };
    2149 
    2150  struct ItemBlock
    2151  {
    2152  Item* pItems;
    2153  uint32_t FirstFreeIndex;
    2154  };
    2155 
    2156  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2157  size_t m_ItemsPerBlock;
    2158  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2159 
    2160  ItemBlock& CreateNewBlock();
    2161 };
    2162 
    2163 template<typename T>
    2164 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2165  m_pAllocationCallbacks(pAllocationCallbacks),
    2166  m_ItemsPerBlock(itemsPerBlock),
    2167  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2168 {
    2169  VMA_ASSERT(itemsPerBlock > 0);
    2170 }
    2171 
    2172 template<typename T>
    2173 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2174 {
    2175  Clear();
    2176 }
    2177 
    2178 template<typename T>
    2179 void VmaPoolAllocator<T>::Clear()
    2180 {
    2181  for(size_t i = m_ItemBlocks.size(); i--; )
    2182  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2183  m_ItemBlocks.clear();
    2184 }
    2185 
    2186 template<typename T>
    2187 T* VmaPoolAllocator<T>::Alloc()
    2188 {
    2189  for(size_t i = m_ItemBlocks.size(); i--; )
    2190  {
    2191  ItemBlock& block = m_ItemBlocks[i];
    2192  // This block has some free items: Use first one.
    2193  if(block.FirstFreeIndex != UINT32_MAX)
    2194  {
    2195  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2196  block.FirstFreeIndex = pItem->NextFreeIndex;
    2197  return &pItem->Value;
    2198  }
    2199  }
    2200 
    2201  // No block has free item: Create new one and use it.
    2202  ItemBlock& newBlock = CreateNewBlock();
    2203  Item* const pItem = &newBlock.pItems[0];
    2204  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2205  return &pItem->Value;
    2206 }
    2207 
    2208 template<typename T>
    2209 void VmaPoolAllocator<T>::Free(T* ptr)
    2210 {
    2211  // Search all memory blocks to find ptr.
    2212  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2213  {
    2214  ItemBlock& block = m_ItemBlocks[i];
    2215 
    2216  // Casting to union.
    2217  Item* pItemPtr;
    2218  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2219 
    2220  // Check if pItemPtr is in address range of this block.
    2221  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2222  {
    2223  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2224  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2225  block.FirstFreeIndex = index;
    2226  return;
    2227  }
    2228  }
    2229  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2230 }
    2231 
    2232 template<typename T>
    2233 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2234 {
    2235  ItemBlock newBlock = {
    2236  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2237 
    2238  m_ItemBlocks.push_back(newBlock);
    2239 
    2240  // Setup singly-linked list of all free items in this block.
    2241  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2242  newBlock.pItems[i].NextFreeIndex = i + 1;
    2243  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2244  return m_ItemBlocks.back();
    2245 }
    2246 
    2248 // class VmaRawList, VmaList
    2249 
    2250 #if VMA_USE_STL_LIST
    2251 
    2252 #define VmaList std::list
    2253 
    2254 #else // #if VMA_USE_STL_LIST
    2255 
    2256 template<typename T>
    2257 struct VmaListItem
    2258 {
    2259  VmaListItem* pPrev;
    2260  VmaListItem* pNext;
    2261  T Value;
    2262 };
    2263 
    2264 // Doubly linked list.
    2265 template<typename T>
    2266 class VmaRawList
    2267 {
    2268 public:
    2269  typedef VmaListItem<T> ItemType;
    2270 
    2271  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2272  ~VmaRawList();
    2273  void Clear();
    2274 
    2275  size_t GetCount() const { return m_Count; }
    2276  bool IsEmpty() const { return m_Count == 0; }
    2277 
    2278  ItemType* Front() { return m_pFront; }
    2279  const ItemType* Front() const { return m_pFront; }
    2280  ItemType* Back() { return m_pBack; }
    2281  const ItemType* Back() const { return m_pBack; }
    2282 
    2283  ItemType* PushBack();
    2284  ItemType* PushFront();
    2285  ItemType* PushBack(const T& value);
    2286  ItemType* PushFront(const T& value);
    2287  void PopBack();
    2288  void PopFront();
    2289 
    2290  // Item can be null - it means PushBack.
    2291  ItemType* InsertBefore(ItemType* pItem);
    2292  // Item can be null - it means PushFront.
    2293  ItemType* InsertAfter(ItemType* pItem);
    2294 
    2295  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2296  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2297 
    2298  void Remove(ItemType* pItem);
    2299 
    2300 private:
    2301  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2302  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2303  ItemType* m_pFront;
    2304  ItemType* m_pBack;
    2305  size_t m_Count;
    2306 
    2307  // Declared not defined, to block copy constructor and assignment operator.
    2308  VmaRawList(const VmaRawList<T>& src);
    2309  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2310 };
    2311 
    2312 template<typename T>
    2313 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2314  m_pAllocationCallbacks(pAllocationCallbacks),
    2315  m_ItemAllocator(pAllocationCallbacks, 128),
    2316  m_pFront(VMA_NULL),
    2317  m_pBack(VMA_NULL),
    2318  m_Count(0)
    2319 {
    2320 }
    2321 
    2322 template<typename T>
    2323 VmaRawList<T>::~VmaRawList()
    2324 {
    2325  // Intentionally not calling Clear, because that would be unnecessary
    2326  // computations to return all items to m_ItemAllocator as free.
    2327 }
    2328 
    2329 template<typename T>
    2330 void VmaRawList<T>::Clear()
    2331 {
    2332  if(IsEmpty() == false)
    2333  {
    2334  ItemType* pItem = m_pBack;
    2335  while(pItem != VMA_NULL)
    2336  {
    2337  ItemType* const pPrevItem = pItem->pPrev;
    2338  m_ItemAllocator.Free(pItem);
    2339  pItem = pPrevItem;
    2340  }
    2341  m_pFront = VMA_NULL;
    2342  m_pBack = VMA_NULL;
    2343  m_Count = 0;
    2344  }
    2345 }
    2346 
    2347 template<typename T>
    2348 VmaListItem<T>* VmaRawList<T>::PushBack()
    2349 {
    2350  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2351  pNewItem->pNext = VMA_NULL;
    2352  if(IsEmpty())
    2353  {
    2354  pNewItem->pPrev = VMA_NULL;
    2355  m_pFront = pNewItem;
    2356  m_pBack = pNewItem;
    2357  m_Count = 1;
    2358  }
    2359  else
    2360  {
    2361  pNewItem->pPrev = m_pBack;
    2362  m_pBack->pNext = pNewItem;
    2363  m_pBack = pNewItem;
    2364  ++m_Count;
    2365  }
    2366  return pNewItem;
    2367 }
    2368 
    2369 template<typename T>
    2370 VmaListItem<T>* VmaRawList<T>::PushFront()
    2371 {
    2372  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2373  pNewItem->pPrev = VMA_NULL;
    2374  if(IsEmpty())
    2375  {
    2376  pNewItem->pNext = VMA_NULL;
    2377  m_pFront = pNewItem;
    2378  m_pBack = pNewItem;
    2379  m_Count = 1;
    2380  }
    2381  else
    2382  {
    2383  pNewItem->pNext = m_pFront;
    2384  m_pFront->pPrev = pNewItem;
    2385  m_pFront = pNewItem;
    2386  ++m_Count;
    2387  }
    2388  return pNewItem;
    2389 }
    2390 
    2391 template<typename T>
    2392 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2393 {
    2394  ItemType* const pNewItem = PushBack();
    2395  pNewItem->Value = value;
    2396  return pNewItem;
    2397 }
    2398 
    2399 template<typename T>
    2400 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2401 {
    2402  ItemType* const pNewItem = PushFront();
    2403  pNewItem->Value = value;
    2404  return pNewItem;
    2405 }
    2406 
    2407 template<typename T>
    2408 void VmaRawList<T>::PopBack()
    2409 {
    2410  VMA_HEAVY_ASSERT(m_Count > 0);
    2411  ItemType* const pBackItem = m_pBack;
    2412  ItemType* const pPrevItem = pBackItem->pPrev;
    2413  if(pPrevItem != VMA_NULL)
    2414  {
    2415  pPrevItem->pNext = VMA_NULL;
    2416  }
    2417  m_pBack = pPrevItem;
    2418  m_ItemAllocator.Free(pBackItem);
    2419  --m_Count;
    2420 }
    2421 
    2422 template<typename T>
    2423 void VmaRawList<T>::PopFront()
    2424 {
    2425  VMA_HEAVY_ASSERT(m_Count > 0);
    2426  ItemType* const pFrontItem = m_pFront;
    2427  ItemType* const pNextItem = pFrontItem->pNext;
    2428  if(pNextItem != VMA_NULL)
    2429  {
    2430  pNextItem->pPrev = VMA_NULL;
    2431  }
    2432  m_pFront = pNextItem;
    2433  m_ItemAllocator.Free(pFrontItem);
    2434  --m_Count;
    2435 }
    2436 
    2437 template<typename T>
    2438 void VmaRawList<T>::Remove(ItemType* pItem)
    2439 {
    2440  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2441  VMA_HEAVY_ASSERT(m_Count > 0);
    2442 
    2443  if(pItem->pPrev != VMA_NULL)
    2444  {
    2445  pItem->pPrev->pNext = pItem->pNext;
    2446  }
    2447  else
    2448  {
    2449  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2450  m_pFront = pItem->pNext;
    2451  }
    2452 
    2453  if(pItem->pNext != VMA_NULL)
    2454  {
    2455  pItem->pNext->pPrev = pItem->pPrev;
    2456  }
    2457  else
    2458  {
    2459  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2460  m_pBack = pItem->pPrev;
    2461  }
    2462 
    2463  m_ItemAllocator.Free(pItem);
    2464  --m_Count;
    2465 }
    2466 
    2467 template<typename T>
    2468 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2469 {
    2470  if(pItem != VMA_NULL)
    2471  {
    2472  ItemType* const prevItem = pItem->pPrev;
    2473  ItemType* const newItem = m_ItemAllocator.Alloc();
    2474  newItem->pPrev = prevItem;
    2475  newItem->pNext = pItem;
    2476  pItem->pPrev = newItem;
    2477  if(prevItem != VMA_NULL)
    2478  {
    2479  prevItem->pNext = newItem;
    2480  }
    2481  else
    2482  {
    2483  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2484  m_pFront = newItem;
    2485  }
    2486  ++m_Count;
    2487  return newItem;
    2488  }
    2489  else
    2490  return PushBack();
    2491 }
    2492 
    2493 template<typename T>
    2494 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2495 {
    2496  if(pItem != VMA_NULL)
    2497  {
    2498  ItemType* const nextItem = pItem->pNext;
    2499  ItemType* const newItem = m_ItemAllocator.Alloc();
    2500  newItem->pNext = nextItem;
    2501  newItem->pPrev = pItem;
    2502  pItem->pNext = newItem;
    2503  if(nextItem != VMA_NULL)
    2504  {
    2505  nextItem->pPrev = newItem;
    2506  }
    2507  else
    2508  {
    2509  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2510  m_pBack = newItem;
    2511  }
    2512  ++m_Count;
    2513  return newItem;
    2514  }
    2515  else
    2516  return PushFront();
    2517 }
    2518 
    2519 template<typename T>
    2520 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2521 {
    2522  ItemType* const newItem = InsertBefore(pItem);
    2523  newItem->Value = value;
    2524  return newItem;
    2525 }
    2526 
    2527 template<typename T>
    2528 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2529 {
    2530  ItemType* const newItem = InsertAfter(pItem);
    2531  newItem->Value = value;
    2532  return newItem;
    2533 }
    2534 
    2535 template<typename T, typename AllocatorT>
    2536 class VmaList
    2537 {
    2538 public:
    2539  class iterator
    2540  {
    2541  public:
    2542  iterator() :
    2543  m_pList(VMA_NULL),
    2544  m_pItem(VMA_NULL)
    2545  {
    2546  }
    2547 
    2548  T& operator*() const
    2549  {
    2550  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2551  return m_pItem->Value;
    2552  }
    2553  T* operator->() const
    2554  {
    2555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2556  return &m_pItem->Value;
    2557  }
    2558 
    2559  iterator& operator++()
    2560  {
    2561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2562  m_pItem = m_pItem->pNext;
    2563  return *this;
    2564  }
    2565  iterator& operator--()
    2566  {
    2567  if(m_pItem != VMA_NULL)
    2568  {
    2569  m_pItem = m_pItem->pPrev;
    2570  }
    2571  else
    2572  {
    2573  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2574  m_pItem = m_pList->Back();
    2575  }
    2576  return *this;
    2577  }
    2578 
    2579  iterator operator++(int)
    2580  {
    2581  iterator result = *this;
    2582  ++*this;
    2583  return result;
    2584  }
    2585  iterator operator--(int)
    2586  {
    2587  iterator result = *this;
    2588  --*this;
    2589  return result;
    2590  }
    2591 
    2592  bool operator==(const iterator& rhs) const
    2593  {
    2594  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2595  return m_pItem == rhs.m_pItem;
    2596  }
    2597  bool operator!=(const iterator& rhs) const
    2598  {
    2599  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2600  return m_pItem != rhs.m_pItem;
    2601  }
    2602 
    2603  private:
    2604  VmaRawList<T>* m_pList;
    2605  VmaListItem<T>* m_pItem;
    2606 
    2607  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2608  m_pList(pList),
    2609  m_pItem(pItem)
    2610  {
    2611  }
    2612 
    2613  friend class VmaList<T, AllocatorT>;
    2614  };
    2615 
    2616  class const_iterator
    2617  {
    2618  public:
    2619  const_iterator() :
    2620  m_pList(VMA_NULL),
    2621  m_pItem(VMA_NULL)
    2622  {
    2623  }
    2624 
    2625  const_iterator(const iterator& src) :
    2626  m_pList(src.m_pList),
    2627  m_pItem(src.m_pItem)
    2628  {
    2629  }
    2630 
    2631  const T& operator*() const
    2632  {
    2633  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2634  return m_pItem->Value;
    2635  }
    2636  const T* operator->() const
    2637  {
    2638  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2639  return &m_pItem->Value;
    2640  }
    2641 
    2642  const_iterator& operator++()
    2643  {
    2644  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2645  m_pItem = m_pItem->pNext;
    2646  return *this;
    2647  }
    2648  const_iterator& operator--()
    2649  {
    2650  if(m_pItem != VMA_NULL)
    2651  {
    2652  m_pItem = m_pItem->pPrev;
    2653  }
    2654  else
    2655  {
    2656  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2657  m_pItem = m_pList->Back();
    2658  }
    2659  return *this;
    2660  }
    2661 
    2662  const_iterator operator++(int)
    2663  {
    2664  const_iterator result = *this;
    2665  ++*this;
    2666  return result;
    2667  }
    2668  const_iterator operator--(int)
    2669  {
    2670  const_iterator result = *this;
    2671  --*this;
    2672  return result;
    2673  }
    2674 
    2675  bool operator==(const const_iterator& rhs) const
    2676  {
    2677  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2678  return m_pItem == rhs.m_pItem;
    2679  }
    2680  bool operator!=(const const_iterator& rhs) const
    2681  {
    2682  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2683  return m_pItem != rhs.m_pItem;
    2684  }
    2685 
    2686  private:
    2687  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2688  m_pList(pList),
    2689  m_pItem(pItem)
    2690  {
    2691  }
    2692 
    2693  const VmaRawList<T>* m_pList;
    2694  const VmaListItem<T>* m_pItem;
    2695 
    2696  friend class VmaList<T, AllocatorT>;
    2697  };
    2698 
    2699  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2700 
    2701  bool empty() const { return m_RawList.IsEmpty(); }
    2702  size_t size() const { return m_RawList.GetCount(); }
    2703 
    2704  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2705  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2706 
    2707  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2708  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2709 
    2710  void clear() { m_RawList.Clear(); }
    2711  void push_back(const T& value) { m_RawList.PushBack(value); }
    2712  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2713  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2714 
    2715 private:
    2716  VmaRawList<T> m_RawList;
    2717 };
    2718 
    2719 #endif // #if VMA_USE_STL_LIST
    2720 
    2722 // class VmaMap
    2723 
    2724 // Unused in this version.
    2725 #if 0
    2726 
    2727 #if VMA_USE_STL_UNORDERED_MAP
    2728 
    2729 #define VmaPair std::pair
    2730 
    2731 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2732  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2733 
    2734 #else // #if VMA_USE_STL_UNORDERED_MAP
    2735 
    2736 template<typename T1, typename T2>
    2737 struct VmaPair
    2738 {
    2739  T1 first;
    2740  T2 second;
    2741 
    2742  VmaPair() : first(), second() { }
    2743  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2744 };
    2745 
    2746 /* Class compatible with subset of interface of std::unordered_map.
    2747 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2748 */
    2749 template<typename KeyT, typename ValueT>
    2750 class VmaMap
    2751 {
    2752 public:
    2753  typedef VmaPair<KeyT, ValueT> PairType;
    2754  typedef PairType* iterator;
    2755 
    2756  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2757 
    2758  iterator begin() { return m_Vector.begin(); }
    2759  iterator end() { return m_Vector.end(); }
    2760 
    2761  void insert(const PairType& pair);
    2762  iterator find(const KeyT& key);
    2763  void erase(iterator it);
    2764 
    2765 private:
    2766  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2767 };
    2768 
    2769 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2770 
    2771 template<typename FirstT, typename SecondT>
    2772 struct VmaPairFirstLess
    2773 {
    2774  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2775  {
    2776  return lhs.first < rhs.first;
    2777  }
    2778  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2779  {
    2780  return lhs.first < rhsFirst;
    2781  }
    2782 };
    2783 
    2784 template<typename KeyT, typename ValueT>
    2785 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2786 {
    2787  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2788  m_Vector.data(),
    2789  m_Vector.data() + m_Vector.size(),
    2790  pair,
    2791  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2792  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2793 }
    2794 
    2795 template<typename KeyT, typename ValueT>
    2796 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2797 {
    2798  PairType* it = VmaBinaryFindFirstNotLess(
    2799  m_Vector.data(),
    2800  m_Vector.data() + m_Vector.size(),
    2801  key,
    2802  VmaPairFirstLess<KeyT, ValueT>());
    2803  if((it != m_Vector.end()) && (it->first == key))
    2804  {
    2805  return it;
    2806  }
    2807  else
    2808  {
    2809  return m_Vector.end();
    2810  }
    2811 }
    2812 
    2813 template<typename KeyT, typename ValueT>
    2814 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2815 {
    2816  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2817 }
    2818 
    2819 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2820 
    2821 #endif // #if 0
    2822 
    2824 
    2825 class VmaDeviceMemoryBlock;
    2826 
    2827 enum VMA_BLOCK_VECTOR_TYPE
    2828 {
    2829  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2830  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2831  VMA_BLOCK_VECTOR_TYPE_COUNT
    2832 };
    2833 
    2834 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2835 {
    2836  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2837  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2838  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2839 }
    2840 
    2841 struct VmaAllocation_T
    2842 {
    2843 public:
    2844  enum ALLOCATION_TYPE
    2845  {
    2846  ALLOCATION_TYPE_NONE,
    2847  ALLOCATION_TYPE_BLOCK,
    2848  ALLOCATION_TYPE_DEDICATED,
    2849  };
    2850 
    2851  VmaAllocation_T(uint32_t currentFrameIndex) :
    2852  m_Alignment(1),
    2853  m_Size(0),
    2854  m_pUserData(VMA_NULL),
    2855  m_Type(ALLOCATION_TYPE_NONE),
    2856  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2857  m_LastUseFrameIndex(currentFrameIndex)
    2858  {
    2859  }
    2860 
    2861  void InitBlockAllocation(
    2862  VmaPool hPool,
    2863  VmaDeviceMemoryBlock* block,
    2864  VkDeviceSize offset,
    2865  VkDeviceSize alignment,
    2866  VkDeviceSize size,
    2867  VmaSuballocationType suballocationType,
    2868  void* pUserData,
    2869  bool canBecomeLost)
    2870  {
    2871  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2872  VMA_ASSERT(block != VMA_NULL);
    2873  m_Type = ALLOCATION_TYPE_BLOCK;
    2874  m_Alignment = alignment;
    2875  m_Size = size;
    2876  m_pUserData = pUserData;
    2877  m_SuballocationType = suballocationType;
    2878  m_BlockAllocation.m_hPool = hPool;
    2879  m_BlockAllocation.m_Block = block;
    2880  m_BlockAllocation.m_Offset = offset;
    2881  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2882  }
    2883 
    2884  void InitLost()
    2885  {
    2886  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2887  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2888  m_Type = ALLOCATION_TYPE_BLOCK;
    2889  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2890  m_BlockAllocation.m_Block = VMA_NULL;
    2891  m_BlockAllocation.m_Offset = 0;
    2892  m_BlockAllocation.m_CanBecomeLost = true;
    2893  }
    2894 
    2895  void ChangeBlockAllocation(
    2896  VmaDeviceMemoryBlock* block,
    2897  VkDeviceSize offset)
    2898  {
    2899  VMA_ASSERT(block != VMA_NULL);
    2900  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2901  m_BlockAllocation.m_Block = block;
    2902  m_BlockAllocation.m_Offset = offset;
    2903  }
    2904 
    2905  void InitDedicatedAllocation(
    2906  uint32_t memoryTypeIndex,
    2907  VkDeviceMemory hMemory,
    2908  VmaSuballocationType suballocationType,
    2909  bool persistentMap,
    2910  void* pMappedData,
    2911  VkDeviceSize size,
    2912  void* pUserData)
    2913  {
    2914  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2915  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    2916  m_Type = ALLOCATION_TYPE_DEDICATED;
    2917  m_Alignment = 0;
    2918  m_Size = size;
    2919  m_pUserData = pUserData;
    2920  m_SuballocationType = suballocationType;
    2921  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    2922  m_DedicatedAllocation.m_hMemory = hMemory;
    2923  m_DedicatedAllocation.m_PersistentMap = persistentMap;
    2924  m_DedicatedAllocation.m_pMappedData = pMappedData;
    2925  }
    2926 
    2927  ALLOCATION_TYPE GetType() const { return m_Type; }
    2928  VkDeviceSize GetAlignment() const { return m_Alignment; }
    2929  VkDeviceSize GetSize() const { return m_Size; }
    2930  void* GetUserData() const { return m_pUserData; }
    2931  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    2932  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    2933 
    2934  VmaDeviceMemoryBlock* GetBlock() const
    2935  {
    2936  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2937  return m_BlockAllocation.m_Block;
    2938  }
    2939  VkDeviceSize GetOffset() const;
    2940  VkDeviceMemory GetMemory() const;
    2941  uint32_t GetMemoryTypeIndex() const;
    2942  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    2943  void* GetMappedData() const;
    2944  bool CanBecomeLost() const;
    2945  VmaPool GetPool() const;
    2946 
    2947  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    2948  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    2949 
    2950  uint32_t GetLastUseFrameIndex() const
    2951  {
    2952  return m_LastUseFrameIndex.load();
    2953  }
    2954  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    2955  {
    2956  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    2957  }
    2958  /*
    2959  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    2960  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    2961  - Else, returns false.
    2962 
    2963  If hAllocation is already lost, assert - you should not call it then.
    2964  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    2965  */
    2966  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    2967 
    2968  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    2969  {
    2970  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    2971  outInfo.blockCount = 1;
    2972  outInfo.allocationCount = 1;
    2973  outInfo.unusedRangeCount = 0;
    2974  outInfo.usedBytes = m_Size;
    2975  outInfo.unusedBytes = 0;
    2976  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    2977  outInfo.unusedRangeSizeMin = UINT64_MAX;
    2978  outInfo.unusedRangeSizeMax = 0;
    2979  }
    2980 
    2981 private:
    2982  VkDeviceSize m_Alignment;
    2983  VkDeviceSize m_Size;
    2984  void* m_pUserData;
    2985  ALLOCATION_TYPE m_Type;
    2986  VmaSuballocationType m_SuballocationType;
    2987  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    2988 
    2989  // Allocation out of VmaDeviceMemoryBlock.
    2990  struct BlockAllocation
    2991  {
    2992  VmaPool m_hPool; // Null if belongs to general memory.
    2993  VmaDeviceMemoryBlock* m_Block;
    2994  VkDeviceSize m_Offset;
    2995  bool m_CanBecomeLost;
    2996  };
    2997 
    2998  // Allocation for an object that has its own private VkDeviceMemory.
    2999  struct DedicatedAllocation
    3000  {
    3001  uint32_t m_MemoryTypeIndex;
    3002  VkDeviceMemory m_hMemory;
    3003  bool m_PersistentMap;
    3004  void* m_pMappedData;
    3005  };
    3006 
    3007  union
    3008  {
    3009  // Allocation out of VmaDeviceMemoryBlock.
    3010  BlockAllocation m_BlockAllocation;
    3011  // Allocation for an object that has its own private VkDeviceMemory.
    3012  DedicatedAllocation m_DedicatedAllocation;
    3013  };
    3014 };
    3015 
    3016 /*
    3017 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3018 allocated memory block or free.
    3019 */
    3020 struct VmaSuballocation
    3021 {
    3022  VkDeviceSize offset;
    3023  VkDeviceSize size;
    3024  VmaAllocation hAllocation;
    3025  VmaSuballocationType type;
    3026 };
    3027 
    3028 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3029 
    3030 // Cost of one additional allocation lost, as equivalent in bytes.
    3031 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3032 
    3033 /*
    3034 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3035 
    3036 If canMakeOtherLost was false:
    3037 - item points to a FREE suballocation.
    3038 - itemsToMakeLostCount is 0.
    3039 
    3040 If canMakeOtherLost was true:
    3041 - item points to first of sequence of suballocations, which are either FREE,
    3042  or point to VmaAllocations that can become lost.
    3043 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3044  the requested allocation to succeed.
    3045 */
    3046 struct VmaAllocationRequest
    3047 {
    3048  VkDeviceSize offset;
    3049  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3050  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3051  VmaSuballocationList::iterator item;
    3052  size_t itemsToMakeLostCount;
    3053 
    3054  VkDeviceSize CalcCost() const
    3055  {
    3056  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3057  }
    3058 };
    3059 
    3060 /*
    3061 Data structure used for bookkeeping of allocations and unused ranges of memory
    3062 in a single VkDeviceMemory block.
    3063 */
    3064 class VmaBlockMetadata
    3065 {
    3066 public:
    3067  VmaBlockMetadata(VmaAllocator hAllocator);
    3068  ~VmaBlockMetadata();
    3069  void Init(VkDeviceSize size);
    3070 
    3071  // Validates all data structures inside this object. If not valid, returns false.
    3072  bool Validate() const;
    3073  VkDeviceSize GetSize() const { return m_Size; }
    3074  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3075  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3076  VkDeviceSize GetUnusedRangeSizeMax() const;
    3077  // Returns true if this block is empty - contains only single free suballocation.
    3078  bool IsEmpty() const;
    3079 
    3080  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3081  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3082 
    3083 #if VMA_STATS_STRING_ENABLED
    3084  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3085 #endif
    3086 
    3087  // Creates trivial request for case when block is empty.
    3088  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3089 
    3090  // Tries to find a place for suballocation with given parameters inside this block.
    3091  // If succeeded, fills pAllocationRequest and returns true.
    3092  // If failed, returns false.
    3093  bool CreateAllocationRequest(
    3094  uint32_t currentFrameIndex,
    3095  uint32_t frameInUseCount,
    3096  VkDeviceSize bufferImageGranularity,
    3097  VkDeviceSize allocSize,
    3098  VkDeviceSize allocAlignment,
    3099  VmaSuballocationType allocType,
    3100  bool canMakeOtherLost,
    3101  VmaAllocationRequest* pAllocationRequest);
    3102 
    3103  bool MakeRequestedAllocationsLost(
    3104  uint32_t currentFrameIndex,
    3105  uint32_t frameInUseCount,
    3106  VmaAllocationRequest* pAllocationRequest);
    3107 
    3108  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3109 
    3110  // Makes actual allocation based on request. Request must already be checked and valid.
    3111  void Alloc(
    3112  const VmaAllocationRequest& request,
    3113  VmaSuballocationType type,
    3114  VkDeviceSize allocSize,
    3115  VmaAllocation hAllocation);
    3116 
    3117  // Frees suballocation assigned to given memory region.
    3118  void Free(const VmaAllocation allocation);
    3119 
    3120 private:
    3121  VkDeviceSize m_Size;
    3122  uint32_t m_FreeCount;
    3123  VkDeviceSize m_SumFreeSize;
    3124  VmaSuballocationList m_Suballocations;
    3125  // Suballocations that are free and have size greater than certain threshold.
    3126  // Sorted by size, ascending.
    3127  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3128 
    3129  bool ValidateFreeSuballocationList() const;
    3130 
    3131  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3132  // If yes, fills pOffset and returns true. If no, returns false.
    3133  bool CheckAllocation(
    3134  uint32_t currentFrameIndex,
    3135  uint32_t frameInUseCount,
    3136  VkDeviceSize bufferImageGranularity,
    3137  VkDeviceSize allocSize,
    3138  VkDeviceSize allocAlignment,
    3139  VmaSuballocationType allocType,
    3140  VmaSuballocationList::const_iterator suballocItem,
    3141  bool canMakeOtherLost,
    3142  VkDeviceSize* pOffset,
    3143  size_t* itemsToMakeLostCount,
    3144  VkDeviceSize* pSumFreeSize,
    3145  VkDeviceSize* pSumItemSize) const;
    3146  // Given free suballocation, it merges it with following one, which must also be free.
    3147  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3148  // Releases given suballocation, making it free.
    3149  // Merges it with adjacent free suballocations if applicable.
    3150  // Returns iterator to new free suballocation at this place.
    3151  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3152  // Given free suballocation, it inserts it into sorted list of
    3153  // m_FreeSuballocationsBySize if it's suitable.
    3154  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3155  // Given free suballocation, it removes it from sorted list of
    3156  // m_FreeSuballocationsBySize if it's suitable.
    3157  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3158 };
    3159 
    3160 /*
    3161 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3162 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3163 
    3164 Thread-safety: This class must be externally synchronized.
    3165 */
    3166 class VmaDeviceMemoryBlock
    3167 {
    3168 public:
    3169  uint32_t m_MemoryTypeIndex;
    3170  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3171  VkDeviceMemory m_hMemory;
    3172  bool m_PersistentMap;
    3173  void* m_pMappedData;
    3174  VmaBlockMetadata m_Metadata;
    3175 
    3176  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3177 
    3178  ~VmaDeviceMemoryBlock()
    3179  {
    3180  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3181  }
    3182 
    3183  // Always call after construction.
    3184  void Init(
    3185  uint32_t newMemoryTypeIndex,
    3186  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3187  VkDeviceMemory newMemory,
    3188  VkDeviceSize newSize,
    3189  bool persistentMap,
    3190  void* pMappedData);
    3191  // Always call before destruction.
    3192  void Destroy(VmaAllocator allocator);
    3193 
    3194  // Validates all data structures inside this object. If not valid, returns false.
    3195  bool Validate() const;
    3196 };
    3197 
    3198 struct VmaPointerLess
    3199 {
    3200  bool operator()(const void* lhs, const void* rhs) const
    3201  {
    3202  return lhs < rhs;
    3203  }
    3204 };
    3205 
    3206 class VmaDefragmentator;
    3207 
    3208 /*
    3209 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3210 Vulkan memory type.
    3211 
    3212 Synchronized internally with a mutex.
    3213 */
    3214 struct VmaBlockVector
    3215 {
    3216  VmaBlockVector(
    3217  VmaAllocator hAllocator,
    3218  uint32_t memoryTypeIndex,
    3219  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3220  VkDeviceSize preferredBlockSize,
    3221  size_t minBlockCount,
    3222  size_t maxBlockCount,
    3223  VkDeviceSize bufferImageGranularity,
    3224  uint32_t frameInUseCount,
    3225  bool isCustomPool);
    3226  ~VmaBlockVector();
    3227 
    3228  VkResult CreateMinBlocks();
    3229 
    3230  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3231  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3232  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3233  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3234  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3235 
    3236  void GetPoolStats(VmaPoolStats* pStats);
    3237 
    3238  bool IsEmpty() const { return m_Blocks.empty(); }
    3239 
    3240  VkResult Allocate(
    3241  VmaPool hCurrentPool,
    3242  uint32_t currentFrameIndex,
    3243  const VkMemoryRequirements& vkMemReq,
    3244  const VmaAllocationCreateInfo& createInfo,
    3245  VmaSuballocationType suballocType,
    3246  VmaAllocation* pAllocation);
    3247 
    3248  void Free(
    3249  VmaAllocation hAllocation);
    3250 
    3251  // Adds statistics of this BlockVector to pStats.
    3252  void AddStats(VmaStats* pStats);
    3253 
    3254 #if VMA_STATS_STRING_ENABLED
    3255  void PrintDetailedMap(class VmaJsonWriter& json);
    3256 #endif
    3257 
    3258  void UnmapPersistentlyMappedMemory();
    3259  VkResult MapPersistentlyMappedMemory();
    3260 
    3261  void MakePoolAllocationsLost(
    3262  uint32_t currentFrameIndex,
    3263  size_t* pLostAllocationCount);
    3264 
    3265  VmaDefragmentator* EnsureDefragmentator(
    3266  VmaAllocator hAllocator,
    3267  uint32_t currentFrameIndex);
    3268 
    3269  VkResult Defragment(
    3270  VmaDefragmentationStats* pDefragmentationStats,
    3271  VkDeviceSize& maxBytesToMove,
    3272  uint32_t& maxAllocationsToMove);
    3273 
    3274  void DestroyDefragmentator();
    3275 
    3276 private:
    3277  friend class VmaDefragmentator;
    3278 
    3279  const VmaAllocator m_hAllocator;
    3280  const uint32_t m_MemoryTypeIndex;
    3281  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3282  const VkDeviceSize m_PreferredBlockSize;
    3283  const size_t m_MinBlockCount;
    3284  const size_t m_MaxBlockCount;
    3285  const VkDeviceSize m_BufferImageGranularity;
    3286  const uint32_t m_FrameInUseCount;
    3287  const bool m_IsCustomPool;
    3288  VMA_MUTEX m_Mutex;
    3289  // Incrementally sorted by sumFreeSize, ascending.
    3290  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3291  /* There can be at most one allocation that is completely empty - a
    3292  hysteresis to avoid pessimistic case of alternating creation and destruction
    3293  of a VkDeviceMemory. */
    3294  bool m_HasEmptyBlock;
    3295  VmaDefragmentator* m_pDefragmentator;
    3296 
    3297  // Finds and removes given block from vector.
    3298  void Remove(VmaDeviceMemoryBlock* pBlock);
    3299 
    3300  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3301  // after this call.
    3302  void IncrementallySortBlocks();
    3303 
    3304  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3305 };
    3306 
    3307 struct VmaPool_T
    3308 {
    3309 public:
    3310  VmaBlockVector m_BlockVector;
    3311 
    3312  // Takes ownership.
    3313  VmaPool_T(
    3314  VmaAllocator hAllocator,
    3315  const VmaPoolCreateInfo& createInfo);
    3316  ~VmaPool_T();
    3317 
    3318  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3319 
    3320 #if VMA_STATS_STRING_ENABLED
    3321  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3322 #endif
    3323 };
    3324 
    3325 class VmaDefragmentator
    3326 {
    3327  const VmaAllocator m_hAllocator;
    3328  VmaBlockVector* const m_pBlockVector;
    3329  uint32_t m_CurrentFrameIndex;
    3330  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3331  VkDeviceSize m_BytesMoved;
    3332  uint32_t m_AllocationsMoved;
    3333 
    3334  struct AllocationInfo
    3335  {
    3336  VmaAllocation m_hAllocation;
    3337  VkBool32* m_pChanged;
    3338 
    3339  AllocationInfo() :
    3340  m_hAllocation(VK_NULL_HANDLE),
    3341  m_pChanged(VMA_NULL)
    3342  {
    3343  }
    3344  };
    3345 
    3346  struct AllocationInfoSizeGreater
    3347  {
    3348  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3349  {
    3350  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3351  }
    3352  };
    3353 
    3354  // Used between AddAllocation and Defragment.
    3355  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3356 
    3357  struct BlockInfo
    3358  {
    3359  VmaDeviceMemoryBlock* m_pBlock;
    3360  bool m_HasNonMovableAllocations;
    3361  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3362 
    3363  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3364  m_pBlock(VMA_NULL),
    3365  m_HasNonMovableAllocations(true),
    3366  m_Allocations(pAllocationCallbacks),
    3367  m_pMappedDataForDefragmentation(VMA_NULL)
    3368  {
    3369  }
    3370 
    3371  void CalcHasNonMovableAllocations()
    3372  {
    3373  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3374  const size_t defragmentAllocCount = m_Allocations.size();
    3375  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3376  }
    3377 
    3378  void SortAllocationsBySizeDescecnding()
    3379  {
    3380  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3381  }
    3382 
    3383  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3384  void Unmap(VmaAllocator hAllocator);
    3385 
    3386  private:
    3387  // Not null if mapped for defragmentation only, not persistently mapped.
    3388  void* m_pMappedDataForDefragmentation;
    3389  };
    3390 
    3391  struct BlockPointerLess
    3392  {
    3393  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3394  {
    3395  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3396  }
    3397  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3398  {
    3399  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3400  }
    3401  };
    3402 
    3403  // 1. Blocks with some non-movable allocations go first.
    3404  // 2. Blocks with smaller sumFreeSize go first.
    3405  struct BlockInfoCompareMoveDestination
    3406  {
    3407  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3408  {
    3409  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3410  {
    3411  return true;
    3412  }
    3413  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3414  {
    3415  return false;
    3416  }
    3417  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3418  {
    3419  return true;
    3420  }
    3421  return false;
    3422  }
    3423  };
    3424 
    3425  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3426  BlockInfoVector m_Blocks;
    3427 
    3428  VkResult DefragmentRound(
    3429  VkDeviceSize maxBytesToMove,
    3430  uint32_t maxAllocationsToMove);
    3431 
    3432  static bool MoveMakesSense(
    3433  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3434  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3435 
    3436 public:
    3437  VmaDefragmentator(
    3438  VmaAllocator hAllocator,
    3439  VmaBlockVector* pBlockVector,
    3440  uint32_t currentFrameIndex);
    3441 
    3442  ~VmaDefragmentator();
    3443 
    3444  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3445  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3446 
    3447  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3448 
    3449  VkResult Defragment(
    3450  VkDeviceSize maxBytesToMove,
    3451  uint32_t maxAllocationsToMove);
    3452 };
    3453 
    3454 // Main allocator object.
    3455 struct VmaAllocator_T
    3456 {
    3457  bool m_UseMutex;
    3458  bool m_UseKhrDedicatedAllocation;
    3459  VkDevice m_hDevice;
    3460  bool m_AllocationCallbacksSpecified;
    3461  VkAllocationCallbacks m_AllocationCallbacks;
    3462  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3463  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3464  // Counter to allow nested calls to these functions.
    3465  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3466 
    3467  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3468  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3469  VMA_MUTEX m_HeapSizeLimitMutex;
    3470 
    3471  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3472  VkPhysicalDeviceMemoryProperties m_MemProps;
    3473 
    3474  // Default pools.
    3475  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3476 
    3477  // Each vector is sorted by memory (handle value).
    3478  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3479  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3480  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3481 
    3482  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3483  ~VmaAllocator_T();
    3484 
    3485  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3486  {
    3487  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3488  }
    3489  const VmaVulkanFunctions& GetVulkanFunctions() const
    3490  {
    3491  return m_VulkanFunctions;
    3492  }
    3493 
    3494  VkDeviceSize GetBufferImageGranularity() const
    3495  {
    3496  return VMA_MAX(
    3497  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3498  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3499  }
    3500 
    3501  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3502  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3503 
    3504  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3505  {
    3506  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3507  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3508  }
    3509 
    3510  void GetBufferMemoryRequirements(
    3511  VkBuffer hBuffer,
    3512  VkMemoryRequirements& memReq,
    3513  bool& requiresDedicatedAllocation,
    3514  bool& prefersDedicatedAllocation) const;
    3515  void GetImageMemoryRequirements(
    3516  VkImage hImage,
    3517  VkMemoryRequirements& memReq,
    3518  bool& requiresDedicatedAllocation,
    3519  bool& prefersDedicatedAllocation) const;
    3520 
    3521  // Main allocation function.
    3522  VkResult AllocateMemory(
    3523  const VkMemoryRequirements& vkMemReq,
    3524  bool requiresDedicatedAllocation,
    3525  bool prefersDedicatedAllocation,
    3526  VkBuffer dedicatedBuffer,
    3527  VkImage dedicatedImage,
    3528  const VmaAllocationCreateInfo& createInfo,
    3529  VmaSuballocationType suballocType,
    3530  VmaAllocation* pAllocation);
    3531 
    3532  // Main deallocation function.
    3533  void FreeMemory(const VmaAllocation allocation);
    3534 
    3535  void CalculateStats(VmaStats* pStats);
    3536 
    3537 #if VMA_STATS_STRING_ENABLED
    3538  void PrintDetailedMap(class VmaJsonWriter& json);
    3539 #endif
    3540 
    3541  void UnmapPersistentlyMappedMemory();
    3542  VkResult MapPersistentlyMappedMemory();
    3543 
    3544  VkResult Defragment(
    3545  VmaAllocation* pAllocations,
    3546  size_t allocationCount,
    3547  VkBool32* pAllocationsChanged,
    3548  const VmaDefragmentationInfo* pDefragmentationInfo,
    3549  VmaDefragmentationStats* pDefragmentationStats);
    3550 
    3551  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3552 
    3553  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3554  void DestroyPool(VmaPool pool);
    3555  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3556 
    3557  void SetCurrentFrameIndex(uint32_t frameIndex);
    3558 
    3559  void MakePoolAllocationsLost(
    3560  VmaPool hPool,
    3561  size_t* pLostAllocationCount);
    3562 
    3563  void CreateLostAllocation(VmaAllocation* pAllocation);
    3564 
    3565  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3566  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3567 
    3568 private:
    3569  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3570  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3571 
    3572  VkPhysicalDevice m_PhysicalDevice;
    3573  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3574 
    3575  VMA_MUTEX m_PoolsMutex;
    3576  // Protected by m_PoolsMutex. Sorted by pointer value.
    3577  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3578 
    3579  VmaVulkanFunctions m_VulkanFunctions;
    3580 
    3581  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3582 
    3583  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3584 
    3585  VkResult AllocateMemoryOfType(
    3586  const VkMemoryRequirements& vkMemReq,
    3587  bool dedicatedAllocation,
    3588  VkBuffer dedicatedBuffer,
    3589  VkImage dedicatedImage,
    3590  const VmaAllocationCreateInfo& createInfo,
    3591  uint32_t memTypeIndex,
    3592  VmaSuballocationType suballocType,
    3593  VmaAllocation* pAllocation);
    3594 
    3595  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3596  VkResult AllocateDedicatedMemory(
    3597  VkDeviceSize size,
    3598  VmaSuballocationType suballocType,
    3599  uint32_t memTypeIndex,
    3600  bool map,
    3601  void* pUserData,
    3602  VkBuffer dedicatedBuffer,
    3603  VkImage dedicatedImage,
    3604  VmaAllocation* pAllocation);
    3605 
    3606  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3607  void FreeDedicatedMemory(VmaAllocation allocation);
    3608 };
    3609 
    3611 // Memory allocation #2 after VmaAllocator_T definition
    3612 
    3613 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3614 {
    3615  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3616 }
    3617 
    3618 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3619 {
    3620  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3621 }
    3622 
    3623 template<typename T>
    3624 static T* VmaAllocate(VmaAllocator hAllocator)
    3625 {
    3626  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3627 }
    3628 
    3629 template<typename T>
    3630 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3631 {
    3632  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3633 }
    3634 
    3635 template<typename T>
    3636 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3637 {
    3638  if(ptr != VMA_NULL)
    3639  {
    3640  ptr->~T();
    3641  VmaFree(hAllocator, ptr);
    3642  }
    3643 }
    3644 
    3645 template<typename T>
    3646 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3647 {
    3648  if(ptr != VMA_NULL)
    3649  {
    3650  for(size_t i = count; i--; )
    3651  ptr[i].~T();
    3652  VmaFree(hAllocator, ptr);
    3653  }
    3654 }
    3655 
    3657 // VmaStringBuilder
    3658 
    3659 #if VMA_STATS_STRING_ENABLED
    3660 
    3661 class VmaStringBuilder
    3662 {
    3663 public:
    3664  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3665  size_t GetLength() const { return m_Data.size(); }
    3666  const char* GetData() const { return m_Data.data(); }
    3667 
    3668  void Add(char ch) { m_Data.push_back(ch); }
    3669  void Add(const char* pStr);
    3670  void AddNewLine() { Add('\n'); }
    3671  void AddNumber(uint32_t num);
    3672  void AddNumber(uint64_t num);
    3673  void AddPointer(const void* ptr);
    3674 
    3675 private:
    3676  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3677 };
    3678 
    3679 void VmaStringBuilder::Add(const char* pStr)
    3680 {
    3681  const size_t strLen = strlen(pStr);
    3682  if(strLen > 0)
    3683  {
    3684  const size_t oldCount = m_Data.size();
    3685  m_Data.resize(oldCount + strLen);
    3686  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3687  }
    3688 }
    3689 
    3690 void VmaStringBuilder::AddNumber(uint32_t num)
    3691 {
    3692  char buf[11];
    3693  VmaUint32ToStr(buf, sizeof(buf), num);
    3694  Add(buf);
    3695 }
    3696 
    3697 void VmaStringBuilder::AddNumber(uint64_t num)
    3698 {
    3699  char buf[21];
    3700  VmaUint64ToStr(buf, sizeof(buf), num);
    3701  Add(buf);
    3702 }
    3703 
    3704 void VmaStringBuilder::AddPointer(const void* ptr)
    3705 {
    3706  char buf[21];
    3707  VmaPtrToStr(buf, sizeof(buf), ptr);
    3708  Add(buf);
    3709 }
    3710 
    3711 #endif // #if VMA_STATS_STRING_ENABLED
    3712 
    3714 // VmaJsonWriter
    3715 
    3716 #if VMA_STATS_STRING_ENABLED
    3717 
    3718 class VmaJsonWriter
    3719 {
    3720 public:
    3721  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3722  ~VmaJsonWriter();
    3723 
    3724  void BeginObject(bool singleLine = false);
    3725  void EndObject();
    3726 
    3727  void BeginArray(bool singleLine = false);
    3728  void EndArray();
    3729 
    3730  void WriteString(const char* pStr);
    3731  void BeginString(const char* pStr = VMA_NULL);
    3732  void ContinueString(const char* pStr);
    3733  void ContinueString(uint32_t n);
    3734  void ContinueString(uint64_t n);
    3735  void EndString(const char* pStr = VMA_NULL);
    3736 
    3737  void WriteNumber(uint32_t n);
    3738  void WriteNumber(uint64_t n);
    3739  void WriteBool(bool b);
    3740  void WriteNull();
    3741 
    3742 private:
    3743  static const char* const INDENT;
    3744 
    3745  enum COLLECTION_TYPE
    3746  {
    3747  COLLECTION_TYPE_OBJECT,
    3748  COLLECTION_TYPE_ARRAY,
    3749  };
    3750  struct StackItem
    3751  {
    3752  COLLECTION_TYPE type;
    3753  uint32_t valueCount;
    3754  bool singleLineMode;
    3755  };
    3756 
    3757  VmaStringBuilder& m_SB;
    3758  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3759  bool m_InsideString;
    3760 
    3761  void BeginValue(bool isString);
    3762  void WriteIndent(bool oneLess = false);
    3763 };
    3764 
    3765 const char* const VmaJsonWriter::INDENT = " ";
    3766 
    3767 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3768  m_SB(sb),
    3769  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3770  m_InsideString(false)
    3771 {
    3772 }
    3773 
    3774 VmaJsonWriter::~VmaJsonWriter()
    3775 {
    3776  VMA_ASSERT(!m_InsideString);
    3777  VMA_ASSERT(m_Stack.empty());
    3778 }
    3779 
    3780 void VmaJsonWriter::BeginObject(bool singleLine)
    3781 {
    3782  VMA_ASSERT(!m_InsideString);
    3783 
    3784  BeginValue(false);
    3785  m_SB.Add('{');
    3786 
    3787  StackItem item;
    3788  item.type = COLLECTION_TYPE_OBJECT;
    3789  item.valueCount = 0;
    3790  item.singleLineMode = singleLine;
    3791  m_Stack.push_back(item);
    3792 }
    3793 
    3794 void VmaJsonWriter::EndObject()
    3795 {
    3796  VMA_ASSERT(!m_InsideString);
    3797 
    3798  WriteIndent(true);
    3799  m_SB.Add('}');
    3800 
    3801  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3802  m_Stack.pop_back();
    3803 }
    3804 
    3805 void VmaJsonWriter::BeginArray(bool singleLine)
    3806 {
    3807  VMA_ASSERT(!m_InsideString);
    3808 
    3809  BeginValue(false);
    3810  m_SB.Add('[');
    3811 
    3812  StackItem item;
    3813  item.type = COLLECTION_TYPE_ARRAY;
    3814  item.valueCount = 0;
    3815  item.singleLineMode = singleLine;
    3816  m_Stack.push_back(item);
    3817 }
    3818 
    3819 void VmaJsonWriter::EndArray()
    3820 {
    3821  VMA_ASSERT(!m_InsideString);
    3822 
    3823  WriteIndent(true);
    3824  m_SB.Add(']');
    3825 
    3826  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3827  m_Stack.pop_back();
    3828 }
    3829 
    3830 void VmaJsonWriter::WriteString(const char* pStr)
    3831 {
    3832  BeginString(pStr);
    3833  EndString();
    3834 }
    3835 
    3836 void VmaJsonWriter::BeginString(const char* pStr)
    3837 {
    3838  VMA_ASSERT(!m_InsideString);
    3839 
    3840  BeginValue(true);
    3841  m_SB.Add('"');
    3842  m_InsideString = true;
    3843  if(pStr != VMA_NULL && pStr[0] != '\0')
    3844  {
    3845  ContinueString(pStr);
    3846  }
    3847 }
    3848 
    3849 void VmaJsonWriter::ContinueString(const char* pStr)
    3850 {
    3851  VMA_ASSERT(m_InsideString);
    3852 
    3853  const size_t strLen = strlen(pStr);
    3854  for(size_t i = 0; i < strLen; ++i)
    3855  {
    3856  char ch = pStr[i];
    3857  if(ch == '\'')
    3858  {
    3859  m_SB.Add("\\\\");
    3860  }
    3861  else if(ch == '"')
    3862  {
    3863  m_SB.Add("\\\"");
    3864  }
    3865  else if(ch >= 32)
    3866  {
    3867  m_SB.Add(ch);
    3868  }
    3869  else switch(ch)
    3870  {
    3871  case '\n':
    3872  m_SB.Add("\\n");
    3873  break;
    3874  case '\r':
    3875  m_SB.Add("\\r");
    3876  break;
    3877  case '\t':
    3878  m_SB.Add("\\t");
    3879  break;
    3880  default:
    3881  VMA_ASSERT(0 && "Character not currently supported.");
    3882  break;
    3883  }
    3884  }
    3885 }
    3886 
    3887 void VmaJsonWriter::ContinueString(uint32_t n)
    3888 {
    3889  VMA_ASSERT(m_InsideString);
    3890  m_SB.AddNumber(n);
    3891 }
    3892 
    3893 void VmaJsonWriter::ContinueString(uint64_t n)
    3894 {
    3895  VMA_ASSERT(m_InsideString);
    3896  m_SB.AddNumber(n);
    3897 }
    3898 
    3899 void VmaJsonWriter::EndString(const char* pStr)
    3900 {
    3901  VMA_ASSERT(m_InsideString);
    3902  if(pStr != VMA_NULL && pStr[0] != '\0')
    3903  {
    3904  ContinueString(pStr);
    3905  }
    3906  m_SB.Add('"');
    3907  m_InsideString = false;
    3908 }
    3909 
    3910 void VmaJsonWriter::WriteNumber(uint32_t n)
    3911 {
    3912  VMA_ASSERT(!m_InsideString);
    3913  BeginValue(false);
    3914  m_SB.AddNumber(n);
    3915 }
    3916 
    3917 void VmaJsonWriter::WriteNumber(uint64_t n)
    3918 {
    3919  VMA_ASSERT(!m_InsideString);
    3920  BeginValue(false);
    3921  m_SB.AddNumber(n);
    3922 }
    3923 
    3924 void VmaJsonWriter::WriteBool(bool b)
    3925 {
    3926  VMA_ASSERT(!m_InsideString);
    3927  BeginValue(false);
    3928  m_SB.Add(b ? "true" : "false");
    3929 }
    3930 
    3931 void VmaJsonWriter::WriteNull()
    3932 {
    3933  VMA_ASSERT(!m_InsideString);
    3934  BeginValue(false);
    3935  m_SB.Add("null");
    3936 }
    3937 
    3938 void VmaJsonWriter::BeginValue(bool isString)
    3939 {
    3940  if(!m_Stack.empty())
    3941  {
    3942  StackItem& currItem = m_Stack.back();
    3943  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    3944  currItem.valueCount % 2 == 0)
    3945  {
    3946  VMA_ASSERT(isString);
    3947  }
    3948 
    3949  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    3950  currItem.valueCount % 2 != 0)
    3951  {
    3952  m_SB.Add(": ");
    3953  }
    3954  else if(currItem.valueCount > 0)
    3955  {
    3956  m_SB.Add(", ");
    3957  WriteIndent();
    3958  }
    3959  else
    3960  {
    3961  WriteIndent();
    3962  }
    3963  ++currItem.valueCount;
    3964  }
    3965 }
    3966 
    3967 void VmaJsonWriter::WriteIndent(bool oneLess)
    3968 {
    3969  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    3970  {
    3971  m_SB.AddNewLine();
    3972 
    3973  size_t count = m_Stack.size();
    3974  if(count > 0 && oneLess)
    3975  {
    3976  --count;
    3977  }
    3978  for(size_t i = 0; i < count; ++i)
    3979  {
    3980  m_SB.Add(INDENT);
    3981  }
    3982  }
    3983 }
    3984 
    3985 #endif // #if VMA_STATS_STRING_ENABLED
    3986 
    3988 
    3989 VkDeviceSize VmaAllocation_T::GetOffset() const
    3990 {
    3991  switch(m_Type)
    3992  {
    3993  case ALLOCATION_TYPE_BLOCK:
    3994  return m_BlockAllocation.m_Offset;
    3995  case ALLOCATION_TYPE_DEDICATED:
    3996  return 0;
    3997  default:
    3998  VMA_ASSERT(0);
    3999  return 0;
    4000  }
    4001 }
    4002 
    4003 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4004 {
    4005  switch(m_Type)
    4006  {
    4007  case ALLOCATION_TYPE_BLOCK:
    4008  return m_BlockAllocation.m_Block->m_hMemory;
    4009  case ALLOCATION_TYPE_DEDICATED:
    4010  return m_DedicatedAllocation.m_hMemory;
    4011  default:
    4012  VMA_ASSERT(0);
    4013  return VK_NULL_HANDLE;
    4014  }
    4015 }
    4016 
    4017 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4018 {
    4019  switch(m_Type)
    4020  {
    4021  case ALLOCATION_TYPE_BLOCK:
    4022  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4023  case ALLOCATION_TYPE_DEDICATED:
    4024  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4025  default:
    4026  VMA_ASSERT(0);
    4027  return UINT32_MAX;
    4028  }
    4029 }
    4030 
    4031 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    4032 {
    4033  switch(m_Type)
    4034  {
    4035  case ALLOCATION_TYPE_BLOCK:
    4036  return m_BlockAllocation.m_Block->m_BlockVectorType;
    4037  case ALLOCATION_TYPE_DEDICATED:
    4038  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    4039  default:
    4040  VMA_ASSERT(0);
    4041  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4042  }
    4043 }
    4044 
    4045 void* VmaAllocation_T::GetMappedData() const
    4046 {
    4047  switch(m_Type)
    4048  {
    4049  case ALLOCATION_TYPE_BLOCK:
    4050  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4051  {
    4052  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4053  }
    4054  else
    4055  {
    4056  return VMA_NULL;
    4057  }
    4058  break;
    4059  case ALLOCATION_TYPE_DEDICATED:
    4060  return m_DedicatedAllocation.m_pMappedData;
    4061  default:
    4062  VMA_ASSERT(0);
    4063  return VMA_NULL;
    4064  }
    4065 }
    4066 
    4067 bool VmaAllocation_T::CanBecomeLost() const
    4068 {
    4069  switch(m_Type)
    4070  {
    4071  case ALLOCATION_TYPE_BLOCK:
    4072  return m_BlockAllocation.m_CanBecomeLost;
    4073  case ALLOCATION_TYPE_DEDICATED:
    4074  return false;
    4075  default:
    4076  VMA_ASSERT(0);
    4077  return false;
    4078  }
    4079 }
    4080 
    4081 VmaPool VmaAllocation_T::GetPool() const
    4082 {
    4083  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4084  return m_BlockAllocation.m_hPool;
    4085 }
    4086 
    4087 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4088 {
    4089  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4090  if(m_DedicatedAllocation.m_PersistentMap)
    4091  {
    4092  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4093  hAllocator->m_hDevice,
    4094  m_DedicatedAllocation.m_hMemory,
    4095  0,
    4096  VK_WHOLE_SIZE,
    4097  0,
    4098  &m_DedicatedAllocation.m_pMappedData);
    4099  }
    4100  return VK_SUCCESS;
    4101 }
    4102 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4103 {
    4104  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4105  if(m_DedicatedAllocation.m_pMappedData)
    4106  {
    4107  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
    4108  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
    4109  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4110  }
    4111 }
    4112 
    4113 
    4114 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4115 {
    4116  VMA_ASSERT(CanBecomeLost());
    4117 
    4118  /*
    4119  Warning: This is a carefully designed algorithm.
    4120  Do not modify unless you really know what you're doing :)
    4121  */
    4122  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4123  for(;;)
    4124  {
    4125  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4126  {
    4127  VMA_ASSERT(0);
    4128  return false;
    4129  }
    4130  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4131  {
    4132  return false;
    4133  }
    4134  else // Last use time earlier than current time.
    4135  {
    4136  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4137  {
    4138  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4139  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4140  return true;
    4141  }
    4142  }
    4143  }
    4144 }
    4145 
    4146 #if VMA_STATS_STRING_ENABLED
    4147 
    4148 // Correspond to values of enum VmaSuballocationType.
    4149 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4150  "FREE",
    4151  "UNKNOWN",
    4152  "BUFFER",
    4153  "IMAGE_UNKNOWN",
    4154  "IMAGE_LINEAR",
    4155  "IMAGE_OPTIMAL",
    4156 };
    4157 
    4158 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4159 {
    4160  json.BeginObject();
    4161 
    4162  json.WriteString("Blocks");
    4163  json.WriteNumber(stat.blockCount);
    4164 
    4165  json.WriteString("Allocations");
    4166  json.WriteNumber(stat.allocationCount);
    4167 
    4168  json.WriteString("UnusedRanges");
    4169  json.WriteNumber(stat.unusedRangeCount);
    4170 
    4171  json.WriteString("UsedBytes");
    4172  json.WriteNumber(stat.usedBytes);
    4173 
    4174  json.WriteString("UnusedBytes");
    4175  json.WriteNumber(stat.unusedBytes);
    4176 
    4177  if(stat.allocationCount > 1)
    4178  {
    4179  json.WriteString("AllocationSize");
    4180  json.BeginObject(true);
    4181  json.WriteString("Min");
    4182  json.WriteNumber(stat.allocationSizeMin);
    4183  json.WriteString("Avg");
    4184  json.WriteNumber(stat.allocationSizeAvg);
    4185  json.WriteString("Max");
    4186  json.WriteNumber(stat.allocationSizeMax);
    4187  json.EndObject();
    4188  }
    4189 
    4190  if(stat.unusedRangeCount > 1)
    4191  {
    4192  json.WriteString("UnusedRangeSize");
    4193  json.BeginObject(true);
    4194  json.WriteString("Min");
    4195  json.WriteNumber(stat.unusedRangeSizeMin);
    4196  json.WriteString("Avg");
    4197  json.WriteNumber(stat.unusedRangeSizeAvg);
    4198  json.WriteString("Max");
    4199  json.WriteNumber(stat.unusedRangeSizeMax);
    4200  json.EndObject();
    4201  }
    4202 
    4203  json.EndObject();
    4204 }
    4205 
    4206 #endif // #if VMA_STATS_STRING_ENABLED
    4207 
    4208 struct VmaSuballocationItemSizeLess
    4209 {
    4210  bool operator()(
    4211  const VmaSuballocationList::iterator lhs,
    4212  const VmaSuballocationList::iterator rhs) const
    4213  {
    4214  return lhs->size < rhs->size;
    4215  }
    4216  bool operator()(
    4217  const VmaSuballocationList::iterator lhs,
    4218  VkDeviceSize rhsSize) const
    4219  {
    4220  return lhs->size < rhsSize;
    4221  }
    4222 };
    4223 
    4225 // class VmaBlockMetadata
    4226 
    4227 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4228  m_Size(0),
    4229  m_FreeCount(0),
    4230  m_SumFreeSize(0),
    4231  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4232  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4233 {
    4234 }
    4235 
    4236 VmaBlockMetadata::~VmaBlockMetadata()
    4237 {
    4238 }
    4239 
    4240 void VmaBlockMetadata::Init(VkDeviceSize size)
    4241 {
    4242  m_Size = size;
    4243  m_FreeCount = 1;
    4244  m_SumFreeSize = size;
    4245 
    4246  VmaSuballocation suballoc = {};
    4247  suballoc.offset = 0;
    4248  suballoc.size = size;
    4249  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4250  suballoc.hAllocation = VK_NULL_HANDLE;
    4251 
    4252  m_Suballocations.push_back(suballoc);
    4253  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4254  --suballocItem;
    4255  m_FreeSuballocationsBySize.push_back(suballocItem);
    4256 }
    4257 
    4258 bool VmaBlockMetadata::Validate() const
    4259 {
    4260  if(m_Suballocations.empty())
    4261  {
    4262  return false;
    4263  }
    4264 
    4265  // Expected offset of new suballocation as calculates from previous ones.
    4266  VkDeviceSize calculatedOffset = 0;
    4267  // Expected number of free suballocations as calculated from traversing their list.
    4268  uint32_t calculatedFreeCount = 0;
    4269  // Expected sum size of free suballocations as calculated from traversing their list.
    4270  VkDeviceSize calculatedSumFreeSize = 0;
    4271  // Expected number of free suballocations that should be registered in
    4272  // m_FreeSuballocationsBySize calculated from traversing their list.
    4273  size_t freeSuballocationsToRegister = 0;
    4274  // True if previous visisted suballocation was free.
    4275  bool prevFree = false;
    4276 
    4277  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4278  suballocItem != m_Suballocations.cend();
    4279  ++suballocItem)
    4280  {
    4281  const VmaSuballocation& subAlloc = *suballocItem;
    4282 
    4283  // Actual offset of this suballocation doesn't match expected one.
    4284  if(subAlloc.offset != calculatedOffset)
    4285  {
    4286  return false;
    4287  }
    4288 
    4289  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4290  // Two adjacent free suballocations are invalid. They should be merged.
    4291  if(prevFree && currFree)
    4292  {
    4293  return false;
    4294  }
    4295  prevFree = currFree;
    4296 
    4297  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4298  {
    4299  return false;
    4300  }
    4301 
    4302  if(currFree)
    4303  {
    4304  calculatedSumFreeSize += subAlloc.size;
    4305  ++calculatedFreeCount;
    4306  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4307  {
    4308  ++freeSuballocationsToRegister;
    4309  }
    4310  }
    4311 
    4312  calculatedOffset += subAlloc.size;
    4313  }
    4314 
    4315  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4316  // match expected one.
    4317  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4318  {
    4319  return false;
    4320  }
    4321 
    4322  VkDeviceSize lastSize = 0;
    4323  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4324  {
    4325  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4326 
    4327  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4328  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4329  {
    4330  return false;
    4331  }
    4332  // They must be sorted by size ascending.
    4333  if(suballocItem->size < lastSize)
    4334  {
    4335  return false;
    4336  }
    4337 
    4338  lastSize = suballocItem->size;
    4339  }
    4340 
    4341  // Check if totals match calculacted values.
    4342  return
    4343  ValidateFreeSuballocationList() &&
    4344  (calculatedOffset == m_Size) &&
    4345  (calculatedSumFreeSize == m_SumFreeSize) &&
    4346  (calculatedFreeCount == m_FreeCount);
    4347 }
    4348 
    4349 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4350 {
    4351  if(!m_FreeSuballocationsBySize.empty())
    4352  {
    4353  return m_FreeSuballocationsBySize.back()->size;
    4354  }
    4355  else
    4356  {
    4357  return 0;
    4358  }
    4359 }
    4360 
    4361 bool VmaBlockMetadata::IsEmpty() const
    4362 {
    4363  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4364 }
    4365 
    4366 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4367 {
    4368  outInfo.blockCount = 1;
    4369 
    4370  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4371  outInfo.allocationCount = rangeCount - m_FreeCount;
    4372  outInfo.unusedRangeCount = m_FreeCount;
    4373 
    4374  outInfo.unusedBytes = m_SumFreeSize;
    4375  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4376 
    4377  outInfo.allocationSizeMin = UINT64_MAX;
    4378  outInfo.allocationSizeMax = 0;
    4379  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4380  outInfo.unusedRangeSizeMax = 0;
    4381 
    4382  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4383  suballocItem != m_Suballocations.cend();
    4384  ++suballocItem)
    4385  {
    4386  const VmaSuballocation& suballoc = *suballocItem;
    4387  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4388  {
    4389  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4390  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4391  }
    4392  else
    4393  {
    4394  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4395  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4396  }
    4397  }
    4398 }
    4399 
    4400 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4401 {
    4402  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4403 
    4404  inoutStats.size += m_Size;
    4405  inoutStats.unusedSize += m_SumFreeSize;
    4406  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4407  inoutStats.unusedRangeCount += m_FreeCount;
    4408  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4409 }
    4410 
    4411 #if VMA_STATS_STRING_ENABLED
    4412 
    4413 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4414 {
    4415  json.BeginObject();
    4416 
    4417  json.WriteString("TotalBytes");
    4418  json.WriteNumber(m_Size);
    4419 
    4420  json.WriteString("UnusedBytes");
    4421  json.WriteNumber(m_SumFreeSize);
    4422 
    4423  json.WriteString("Allocations");
    4424  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4425 
    4426  json.WriteString("UnusedRanges");
    4427  json.WriteNumber(m_FreeCount);
    4428 
    4429  json.WriteString("Suballocations");
    4430  json.BeginArray();
    4431  size_t i = 0;
    4432  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4433  suballocItem != m_Suballocations.cend();
    4434  ++suballocItem, ++i)
    4435  {
    4436  json.BeginObject(true);
    4437 
    4438  json.WriteString("Type");
    4439  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4440 
    4441  json.WriteString("Size");
    4442  json.WriteNumber(suballocItem->size);
    4443 
    4444  json.WriteString("Offset");
    4445  json.WriteNumber(suballocItem->offset);
    4446 
    4447  json.EndObject();
    4448  }
    4449  json.EndArray();
    4450 
    4451  json.EndObject();
    4452 }
    4453 
    4454 #endif // #if VMA_STATS_STRING_ENABLED
    4455 
    4456 /*
    4457 How many suitable free suballocations to analyze before choosing best one.
    4458 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4459  be chosen.
    4460 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4461  suballocations will be analized and best one will be chosen.
    4462 - Any other value is also acceptable.
    4463 */
    4464 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4465 
    4466 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4467 {
    4468  VMA_ASSERT(IsEmpty());
    4469  pAllocationRequest->offset = 0;
    4470  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4471  pAllocationRequest->sumItemSize = 0;
    4472  pAllocationRequest->item = m_Suballocations.begin();
    4473  pAllocationRequest->itemsToMakeLostCount = 0;
    4474 }
    4475 
    4476 bool VmaBlockMetadata::CreateAllocationRequest(
    4477  uint32_t currentFrameIndex,
    4478  uint32_t frameInUseCount,
    4479  VkDeviceSize bufferImageGranularity,
    4480  VkDeviceSize allocSize,
    4481  VkDeviceSize allocAlignment,
    4482  VmaSuballocationType allocType,
    4483  bool canMakeOtherLost,
    4484  VmaAllocationRequest* pAllocationRequest)
    4485 {
    4486  VMA_ASSERT(allocSize > 0);
    4487  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4488  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4489  VMA_HEAVY_ASSERT(Validate());
    4490 
    4491  // There is not enough total free space in this block to fullfill the request: Early return.
    4492  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4493  {
    4494  return false;
    4495  }
    4496 
    4497  // New algorithm, efficiently searching freeSuballocationsBySize.
    4498  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4499  if(freeSuballocCount > 0)
    4500  {
    4501  if(VMA_BEST_FIT)
    4502  {
    4503  // Find first free suballocation with size not less than allocSize.
    4504  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4505  m_FreeSuballocationsBySize.data(),
    4506  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4507  allocSize,
    4508  VmaSuballocationItemSizeLess());
    4509  size_t index = it - m_FreeSuballocationsBySize.data();
    4510  for(; index < freeSuballocCount; ++index)
    4511  {
    4512  if(CheckAllocation(
    4513  currentFrameIndex,
    4514  frameInUseCount,
    4515  bufferImageGranularity,
    4516  allocSize,
    4517  allocAlignment,
    4518  allocType,
    4519  m_FreeSuballocationsBySize[index],
    4520  false, // canMakeOtherLost
    4521  &pAllocationRequest->offset,
    4522  &pAllocationRequest->itemsToMakeLostCount,
    4523  &pAllocationRequest->sumFreeSize,
    4524  &pAllocationRequest->sumItemSize))
    4525  {
    4526  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4527  return true;
    4528  }
    4529  }
    4530  }
    4531  else
    4532  {
    4533  // Search staring from biggest suballocations.
    4534  for(size_t index = freeSuballocCount; index--; )
    4535  {
    4536  if(CheckAllocation(
    4537  currentFrameIndex,
    4538  frameInUseCount,
    4539  bufferImageGranularity,
    4540  allocSize,
    4541  allocAlignment,
    4542  allocType,
    4543  m_FreeSuballocationsBySize[index],
    4544  false, // canMakeOtherLost
    4545  &pAllocationRequest->offset,
    4546  &pAllocationRequest->itemsToMakeLostCount,
    4547  &pAllocationRequest->sumFreeSize,
    4548  &pAllocationRequest->sumItemSize))
    4549  {
    4550  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4551  return true;
    4552  }
    4553  }
    4554  }
    4555  }
    4556 
    4557  if(canMakeOtherLost)
    4558  {
    4559  // Brute-force algorithm. TODO: Come up with something better.
    4560 
    4561  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4562  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4563 
    4564  VmaAllocationRequest tmpAllocRequest = {};
    4565  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4566  suballocIt != m_Suballocations.end();
    4567  ++suballocIt)
    4568  {
    4569  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4570  suballocIt->hAllocation->CanBecomeLost())
    4571  {
    4572  if(CheckAllocation(
    4573  currentFrameIndex,
    4574  frameInUseCount,
    4575  bufferImageGranularity,
    4576  allocSize,
    4577  allocAlignment,
    4578  allocType,
    4579  suballocIt,
    4580  canMakeOtherLost,
    4581  &tmpAllocRequest.offset,
    4582  &tmpAllocRequest.itemsToMakeLostCount,
    4583  &tmpAllocRequest.sumFreeSize,
    4584  &tmpAllocRequest.sumItemSize))
    4585  {
    4586  tmpAllocRequest.item = suballocIt;
    4587 
    4588  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4589  {
    4590  *pAllocationRequest = tmpAllocRequest;
    4591  }
    4592  }
    4593  }
    4594  }
    4595 
    4596  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4597  {
    4598  return true;
    4599  }
    4600  }
    4601 
    4602  return false;
    4603 }
    4604 
    4605 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4606  uint32_t currentFrameIndex,
    4607  uint32_t frameInUseCount,
    4608  VmaAllocationRequest* pAllocationRequest)
    4609 {
    4610  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4611  {
    4612  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4613  {
    4614  ++pAllocationRequest->item;
    4615  }
    4616  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4617  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4618  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4619  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4620  {
    4621  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4622  --pAllocationRequest->itemsToMakeLostCount;
    4623  }
    4624  else
    4625  {
    4626  return false;
    4627  }
    4628  }
    4629 
    4630  VMA_HEAVY_ASSERT(Validate());
    4631  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4632  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4633 
    4634  return true;
    4635 }
    4636 
    4637 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4638 {
    4639  uint32_t lostAllocationCount = 0;
    4640  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4641  it != m_Suballocations.end();
    4642  ++it)
    4643  {
    4644  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4645  it->hAllocation->CanBecomeLost() &&
    4646  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4647  {
    4648  it = FreeSuballocation(it);
    4649  ++lostAllocationCount;
    4650  }
    4651  }
    4652  return lostAllocationCount;
    4653 }
    4654 
    4655 void VmaBlockMetadata::Alloc(
    4656  const VmaAllocationRequest& request,
    4657  VmaSuballocationType type,
    4658  VkDeviceSize allocSize,
    4659  VmaAllocation hAllocation)
    4660 {
    4661  VMA_ASSERT(request.item != m_Suballocations.end());
    4662  VmaSuballocation& suballoc = *request.item;
    4663  // Given suballocation is a free block.
    4664  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4665  // Given offset is inside this suballocation.
    4666  VMA_ASSERT(request.offset >= suballoc.offset);
    4667  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4668  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4669  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4670 
    4671  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4672  // it to become used.
    4673  UnregisterFreeSuballocation(request.item);
    4674 
    4675  suballoc.offset = request.offset;
    4676  suballoc.size = allocSize;
    4677  suballoc.type = type;
    4678  suballoc.hAllocation = hAllocation;
    4679 
    4680  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4681  if(paddingEnd)
    4682  {
    4683  VmaSuballocation paddingSuballoc = {};
    4684  paddingSuballoc.offset = request.offset + allocSize;
    4685  paddingSuballoc.size = paddingEnd;
    4686  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4687  VmaSuballocationList::iterator next = request.item;
    4688  ++next;
    4689  const VmaSuballocationList::iterator paddingEndItem =
    4690  m_Suballocations.insert(next, paddingSuballoc);
    4691  RegisterFreeSuballocation(paddingEndItem);
    4692  }
    4693 
    4694  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4695  if(paddingBegin)
    4696  {
    4697  VmaSuballocation paddingSuballoc = {};
    4698  paddingSuballoc.offset = request.offset - paddingBegin;
    4699  paddingSuballoc.size = paddingBegin;
    4700  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4701  const VmaSuballocationList::iterator paddingBeginItem =
    4702  m_Suballocations.insert(request.item, paddingSuballoc);
    4703  RegisterFreeSuballocation(paddingBeginItem);
    4704  }
    4705 
    4706  // Update totals.
    4707  m_FreeCount = m_FreeCount - 1;
    4708  if(paddingBegin > 0)
    4709  {
    4710  ++m_FreeCount;
    4711  }
    4712  if(paddingEnd > 0)
    4713  {
    4714  ++m_FreeCount;
    4715  }
    4716  m_SumFreeSize -= allocSize;
    4717 }
    4718 
    4719 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4720 {
    4721  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4722  suballocItem != m_Suballocations.end();
    4723  ++suballocItem)
    4724  {
    4725  VmaSuballocation& suballoc = *suballocItem;
    4726  if(suballoc.hAllocation == allocation)
    4727  {
    4728  FreeSuballocation(suballocItem);
    4729  VMA_HEAVY_ASSERT(Validate());
    4730  return;
    4731  }
    4732  }
    4733  VMA_ASSERT(0 && "Not found!");
    4734 }
    4735 
    4736 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4737 {
    4738  VkDeviceSize lastSize = 0;
    4739  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4740  {
    4741  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4742 
    4743  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4744  {
    4745  VMA_ASSERT(0);
    4746  return false;
    4747  }
    4748  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4749  {
    4750  VMA_ASSERT(0);
    4751  return false;
    4752  }
    4753  if(it->size < lastSize)
    4754  {
    4755  VMA_ASSERT(0);
    4756  return false;
    4757  }
    4758 
    4759  lastSize = it->size;
    4760  }
    4761  return true;
    4762 }
    4763 
    4764 bool VmaBlockMetadata::CheckAllocation(
    4765  uint32_t currentFrameIndex,
    4766  uint32_t frameInUseCount,
    4767  VkDeviceSize bufferImageGranularity,
    4768  VkDeviceSize allocSize,
    4769  VkDeviceSize allocAlignment,
    4770  VmaSuballocationType allocType,
    4771  VmaSuballocationList::const_iterator suballocItem,
    4772  bool canMakeOtherLost,
    4773  VkDeviceSize* pOffset,
    4774  size_t* itemsToMakeLostCount,
    4775  VkDeviceSize* pSumFreeSize,
    4776  VkDeviceSize* pSumItemSize) const
    4777 {
    4778  VMA_ASSERT(allocSize > 0);
    4779  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4780  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4781  VMA_ASSERT(pOffset != VMA_NULL);
    4782 
    4783  *itemsToMakeLostCount = 0;
    4784  *pSumFreeSize = 0;
    4785  *pSumItemSize = 0;
    4786 
    4787  if(canMakeOtherLost)
    4788  {
    4789  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4790  {
    4791  *pSumFreeSize = suballocItem->size;
    4792  }
    4793  else
    4794  {
    4795  if(suballocItem->hAllocation->CanBecomeLost() &&
    4796  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4797  {
    4798  ++*itemsToMakeLostCount;
    4799  *pSumItemSize = suballocItem->size;
    4800  }
    4801  else
    4802  {
    4803  return false;
    4804  }
    4805  }
    4806 
    4807  // Remaining size is too small for this request: Early return.
    4808  if(m_Size - suballocItem->offset < allocSize)
    4809  {
    4810  return false;
    4811  }
    4812 
    4813  // Start from offset equal to beginning of this suballocation.
    4814  *pOffset = suballocItem->offset;
    4815 
    4816  // Apply VMA_DEBUG_MARGIN at the beginning.
    4817  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4818  {
    4819  *pOffset += VMA_DEBUG_MARGIN;
    4820  }
    4821 
    4822  // Apply alignment.
    4823  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4824  *pOffset = VmaAlignUp(*pOffset, alignment);
    4825 
    4826  // Check previous suballocations for BufferImageGranularity conflicts.
    4827  // Make bigger alignment if necessary.
    4828  if(bufferImageGranularity > 1)
    4829  {
    4830  bool bufferImageGranularityConflict = false;
    4831  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4832  while(prevSuballocItem != m_Suballocations.cbegin())
    4833  {
    4834  --prevSuballocItem;
    4835  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4836  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4837  {
    4838  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4839  {
    4840  bufferImageGranularityConflict = true;
    4841  break;
    4842  }
    4843  }
    4844  else
    4845  // Already on previous page.
    4846  break;
    4847  }
    4848  if(bufferImageGranularityConflict)
    4849  {
    4850  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4851  }
    4852  }
    4853 
    4854  // Now that we have final *pOffset, check if we are past suballocItem.
    4855  // If yes, return false - this function should be called for another suballocItem as starting point.
    4856  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4857  {
    4858  return false;
    4859  }
    4860 
    4861  // Calculate padding at the beginning based on current offset.
    4862  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4863 
    4864  // Calculate required margin at the end if this is not last suballocation.
    4865  VmaSuballocationList::const_iterator next = suballocItem;
    4866  ++next;
    4867  const VkDeviceSize requiredEndMargin =
    4868  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4869 
    4870  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4871  // Another early return check.
    4872  if(suballocItem->offset + totalSize > m_Size)
    4873  {
    4874  return false;
    4875  }
    4876 
    4877  // Advance lastSuballocItem until desired size is reached.
    4878  // Update itemsToMakeLostCount.
    4879  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4880  if(totalSize > suballocItem->size)
    4881  {
    4882  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4883  while(remainingSize > 0)
    4884  {
    4885  ++lastSuballocItem;
    4886  if(lastSuballocItem == m_Suballocations.cend())
    4887  {
    4888  return false;
    4889  }
    4890  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4891  {
    4892  *pSumFreeSize += lastSuballocItem->size;
    4893  }
    4894  else
    4895  {
    4896  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4897  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4898  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4899  {
    4900  ++*itemsToMakeLostCount;
    4901  *pSumItemSize += lastSuballocItem->size;
    4902  }
    4903  else
    4904  {
    4905  return false;
    4906  }
    4907  }
    4908  remainingSize = (lastSuballocItem->size < remainingSize) ?
    4909  remainingSize - lastSuballocItem->size : 0;
    4910  }
    4911  }
    4912 
    4913  // Check next suballocations for BufferImageGranularity conflicts.
    4914  // If conflict exists, we must mark more allocations lost or fail.
    4915  if(bufferImageGranularity > 1)
    4916  {
    4917  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    4918  ++nextSuballocItem;
    4919  while(nextSuballocItem != m_Suballocations.cend())
    4920  {
    4921  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    4922  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    4923  {
    4924  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    4925  {
    4926  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    4927  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    4928  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4929  {
    4930  ++*itemsToMakeLostCount;
    4931  }
    4932  else
    4933  {
    4934  return false;
    4935  }
    4936  }
    4937  }
    4938  else
    4939  {
    4940  // Already on next page.
    4941  break;
    4942  }
    4943  ++nextSuballocItem;
    4944  }
    4945  }
    4946  }
    4947  else
    4948  {
    4949  const VmaSuballocation& suballoc = *suballocItem;
    4950  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4951 
    4952  *pSumFreeSize = suballoc.size;
    4953 
    4954  // Size of this suballocation is too small for this request: Early return.
    4955  if(suballoc.size < allocSize)
    4956  {
    4957  return false;
    4958  }
    4959 
    4960  // Start from offset equal to beginning of this suballocation.
    4961  *pOffset = suballoc.offset;
    4962 
    4963  // Apply VMA_DEBUG_MARGIN at the beginning.
    4964  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4965  {
    4966  *pOffset += VMA_DEBUG_MARGIN;
    4967  }
    4968 
    4969  // Apply alignment.
    4970  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4971  *pOffset = VmaAlignUp(*pOffset, alignment);
    4972 
    4973  // Check previous suballocations for BufferImageGranularity conflicts.
    4974  // Make bigger alignment if necessary.
    4975  if(bufferImageGranularity > 1)
    4976  {
    4977  bool bufferImageGranularityConflict = false;
    4978  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4979  while(prevSuballocItem != m_Suballocations.cbegin())
    4980  {
    4981  --prevSuballocItem;
    4982  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4983  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4984  {
    4985  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4986  {
    4987  bufferImageGranularityConflict = true;
    4988  break;
    4989  }
    4990  }
    4991  else
    4992  // Already on previous page.
    4993  break;
    4994  }
    4995  if(bufferImageGranularityConflict)
    4996  {
    4997  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4998  }
    4999  }
    5000 
    5001  // Calculate padding at the beginning based on current offset.
    5002  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5003 
    5004  // Calculate required margin at the end if this is not last suballocation.
    5005  VmaSuballocationList::const_iterator next = suballocItem;
    5006  ++next;
    5007  const VkDeviceSize requiredEndMargin =
    5008  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5009 
    5010  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5011  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5012  {
    5013  return false;
    5014  }
    5015 
    5016  // Check next suballocations for BufferImageGranularity conflicts.
    5017  // If conflict exists, allocation cannot be made here.
    5018  if(bufferImageGranularity > 1)
    5019  {
    5020  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5021  ++nextSuballocItem;
    5022  while(nextSuballocItem != m_Suballocations.cend())
    5023  {
    5024  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5025  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5026  {
    5027  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5028  {
    5029  return false;
    5030  }
    5031  }
    5032  else
    5033  {
    5034  // Already on next page.
    5035  break;
    5036  }
    5037  ++nextSuballocItem;
    5038  }
    5039  }
    5040  }
    5041 
    5042  // All tests passed: Success. pOffset is already filled.
    5043  return true;
    5044 }
    5045 
    5046 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5047 {
    5048  VMA_ASSERT(item != m_Suballocations.end());
    5049  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5050 
    5051  VmaSuballocationList::iterator nextItem = item;
    5052  ++nextItem;
    5053  VMA_ASSERT(nextItem != m_Suballocations.end());
    5054  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5055 
    5056  item->size += nextItem->size;
    5057  --m_FreeCount;
    5058  m_Suballocations.erase(nextItem);
    5059 }
    5060 
    5061 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5062 {
    5063  // Change this suballocation to be marked as free.
    5064  VmaSuballocation& suballoc = *suballocItem;
    5065  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5066  suballoc.hAllocation = VK_NULL_HANDLE;
    5067 
    5068  // Update totals.
    5069  ++m_FreeCount;
    5070  m_SumFreeSize += suballoc.size;
    5071 
    5072  // Merge with previous and/or next suballocation if it's also free.
    5073  bool mergeWithNext = false;
    5074  bool mergeWithPrev = false;
    5075 
    5076  VmaSuballocationList::iterator nextItem = suballocItem;
    5077  ++nextItem;
    5078  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5079  {
    5080  mergeWithNext = true;
    5081  }
    5082 
    5083  VmaSuballocationList::iterator prevItem = suballocItem;
    5084  if(suballocItem != m_Suballocations.begin())
    5085  {
    5086  --prevItem;
    5087  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5088  {
    5089  mergeWithPrev = true;
    5090  }
    5091  }
    5092 
    5093  if(mergeWithNext)
    5094  {
    5095  UnregisterFreeSuballocation(nextItem);
    5096  MergeFreeWithNext(suballocItem);
    5097  }
    5098 
    5099  if(mergeWithPrev)
    5100  {
    5101  UnregisterFreeSuballocation(prevItem);
    5102  MergeFreeWithNext(prevItem);
    5103  RegisterFreeSuballocation(prevItem);
    5104  return prevItem;
    5105  }
    5106  else
    5107  {
    5108  RegisterFreeSuballocation(suballocItem);
    5109  return suballocItem;
    5110  }
    5111 }
    5112 
    5113 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5114 {
    5115  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5116  VMA_ASSERT(item->size > 0);
    5117 
    5118  // You may want to enable this validation at the beginning or at the end of
    5119  // this function, depending on what do you want to check.
    5120  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5121 
    5122  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5123  {
    5124  if(m_FreeSuballocationsBySize.empty())
    5125  {
    5126  m_FreeSuballocationsBySize.push_back(item);
    5127  }
    5128  else
    5129  {
    5130  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5131  }
    5132  }
    5133 
    5134  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5135 }
    5136 
    5137 
    5138 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5139 {
    5140  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5141  VMA_ASSERT(item->size > 0);
    5142 
    5143  // You may want to enable this validation at the beginning or at the end of
    5144  // this function, depending on what do you want to check.
    5145  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5146 
    5147  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5148  {
    5149  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5150  m_FreeSuballocationsBySize.data(),
    5151  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5152  item,
    5153  VmaSuballocationItemSizeLess());
    5154  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5155  index < m_FreeSuballocationsBySize.size();
    5156  ++index)
    5157  {
    5158  if(m_FreeSuballocationsBySize[index] == item)
    5159  {
    5160  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5161  return;
    5162  }
    5163  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5164  }
    5165  VMA_ASSERT(0 && "Not found.");
    5166  }
    5167 
    5168  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5169 }
    5170 
    5172 // class VmaDeviceMemoryBlock
    5173 
    5174 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5175  m_MemoryTypeIndex(UINT32_MAX),
    5176  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5177  m_hMemory(VK_NULL_HANDLE),
    5178  m_PersistentMap(false),
    5179  m_pMappedData(VMA_NULL),
    5180  m_Metadata(hAllocator)
    5181 {
    5182 }
    5183 
    5184 void VmaDeviceMemoryBlock::Init(
    5185  uint32_t newMemoryTypeIndex,
    5186  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5187  VkDeviceMemory newMemory,
    5188  VkDeviceSize newSize,
    5189  bool persistentMap,
    5190  void* pMappedData)
    5191 {
    5192  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5193 
    5194  m_MemoryTypeIndex = newMemoryTypeIndex;
    5195  m_BlockVectorType = newBlockVectorType;
    5196  m_hMemory = newMemory;
    5197  m_PersistentMap = persistentMap;
    5198  m_pMappedData = pMappedData;
    5199 
    5200  m_Metadata.Init(newSize);
    5201 }
    5202 
    5203 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5204 {
    5205  // This is the most important assert in the entire library.
    5206  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5207  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5208 
    5209  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5210  if(m_pMappedData != VMA_NULL)
    5211  {
    5212  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5213  m_pMappedData = VMA_NULL;
    5214  }
    5215 
    5216  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5217  m_hMemory = VK_NULL_HANDLE;
    5218 }
    5219 
    5220 bool VmaDeviceMemoryBlock::Validate() const
    5221 {
    5222  if((m_hMemory == VK_NULL_HANDLE) ||
    5223  (m_Metadata.GetSize() == 0))
    5224  {
    5225  return false;
    5226  }
    5227 
    5228  return m_Metadata.Validate();
    5229 }
    5230 
    5231 static void InitStatInfo(VmaStatInfo& outInfo)
    5232 {
    5233  memset(&outInfo, 0, sizeof(outInfo));
    5234  outInfo.allocationSizeMin = UINT64_MAX;
    5235  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5236 }
    5237 
    5238 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5239 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5240 {
    5241  inoutInfo.blockCount += srcInfo.blockCount;
    5242  inoutInfo.allocationCount += srcInfo.allocationCount;
    5243  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5244  inoutInfo.usedBytes += srcInfo.usedBytes;
    5245  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5246  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5247  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5248  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5249  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5250 }
    5251 
    5252 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5253 {
    5254  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5255  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5256  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5257  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5258 }
    5259 
    5260 VmaPool_T::VmaPool_T(
    5261  VmaAllocator hAllocator,
    5262  const VmaPoolCreateInfo& createInfo) :
    5263  m_BlockVector(
    5264  hAllocator,
    5265  createInfo.memoryTypeIndex,
    5266  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5267  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5268  createInfo.blockSize,
    5269  createInfo.minBlockCount,
    5270  createInfo.maxBlockCount,
    5271  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5272  createInfo.frameInUseCount,
    5273  true) // isCustomPool
    5274 {
    5275 }
    5276 
    5277 VmaPool_T::~VmaPool_T()
    5278 {
    5279 }
    5280 
    5281 #if VMA_STATS_STRING_ENABLED
    5282 
    5283 #endif // #if VMA_STATS_STRING_ENABLED
    5284 
    5285 VmaBlockVector::VmaBlockVector(
    5286  VmaAllocator hAllocator,
    5287  uint32_t memoryTypeIndex,
    5288  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5289  VkDeviceSize preferredBlockSize,
    5290  size_t minBlockCount,
    5291  size_t maxBlockCount,
    5292  VkDeviceSize bufferImageGranularity,
    5293  uint32_t frameInUseCount,
    5294  bool isCustomPool) :
    5295  m_hAllocator(hAllocator),
    5296  m_MemoryTypeIndex(memoryTypeIndex),
    5297  m_BlockVectorType(blockVectorType),
    5298  m_PreferredBlockSize(preferredBlockSize),
    5299  m_MinBlockCount(minBlockCount),
    5300  m_MaxBlockCount(maxBlockCount),
    5301  m_BufferImageGranularity(bufferImageGranularity),
    5302  m_FrameInUseCount(frameInUseCount),
    5303  m_IsCustomPool(isCustomPool),
    5304  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5305  m_HasEmptyBlock(false),
    5306  m_pDefragmentator(VMA_NULL)
    5307 {
    5308 }
    5309 
    5310 VmaBlockVector::~VmaBlockVector()
    5311 {
    5312  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5313 
    5314  for(size_t i = m_Blocks.size(); i--; )
    5315  {
    5316  m_Blocks[i]->Destroy(m_hAllocator);
    5317  vma_delete(m_hAllocator, m_Blocks[i]);
    5318  }
    5319 }
    5320 
    5321 VkResult VmaBlockVector::CreateMinBlocks()
    5322 {
    5323  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5324  {
    5325  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5326  if(res != VK_SUCCESS)
    5327  {
    5328  return res;
    5329  }
    5330  }
    5331  return VK_SUCCESS;
    5332 }
    5333 
    5334 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5335 {
    5336  pStats->size = 0;
    5337  pStats->unusedSize = 0;
    5338  pStats->allocationCount = 0;
    5339  pStats->unusedRangeCount = 0;
    5340  pStats->unusedRangeSizeMax = 0;
    5341 
    5342  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5343 
    5344  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5345  {
    5346  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5347  VMA_ASSERT(pBlock);
    5348  VMA_HEAVY_ASSERT(pBlock->Validate());
    5349  pBlock->m_Metadata.AddPoolStats(*pStats);
    5350  }
    5351 }
    5352 
    5353 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5354 
    5355 VkResult VmaBlockVector::Allocate(
    5356  VmaPool hCurrentPool,
    5357  uint32_t currentFrameIndex,
    5358  const VkMemoryRequirements& vkMemReq,
    5359  const VmaAllocationCreateInfo& createInfo,
    5360  VmaSuballocationType suballocType,
    5361  VmaAllocation* pAllocation)
    5362 {
    5363  // Validate flags.
    5364  if(createInfo.pool != VK_NULL_HANDLE &&
    5365  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5366  {
    5367  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5368  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5369  }
    5370 
    5371  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5372 
    5373  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5374  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5375  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5376  {
    5377  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5378  VMA_ASSERT(pCurrBlock);
    5379  VmaAllocationRequest currRequest = {};
    5380  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5381  currentFrameIndex,
    5382  m_FrameInUseCount,
    5383  m_BufferImageGranularity,
    5384  vkMemReq.size,
    5385  vkMemReq.alignment,
    5386  suballocType,
    5387  false, // canMakeOtherLost
    5388  &currRequest))
    5389  {
    5390  // Allocate from pCurrBlock.
    5391  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5392 
    5393  // We no longer have an empty Allocation.
    5394  if(pCurrBlock->m_Metadata.IsEmpty())
    5395  {
    5396  m_HasEmptyBlock = false;
    5397  }
    5398 
    5399  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5400  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5401  (*pAllocation)->InitBlockAllocation(
    5402  hCurrentPool,
    5403  pCurrBlock,
    5404  currRequest.offset,
    5405  vkMemReq.alignment,
    5406  vkMemReq.size,
    5407  suballocType,
    5408  createInfo.pUserData,
    5409  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5410  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5411  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5412  return VK_SUCCESS;
    5413  }
    5414  }
    5415 
    5416  const bool canCreateNewBlock =
    5417  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5418  (m_Blocks.size() < m_MaxBlockCount);
    5419 
    5420  // 2. Try to create new block.
    5421  if(canCreateNewBlock)
    5422  {
    5423  // 2.1. Start with full preferredBlockSize.
    5424  VkDeviceSize blockSize = m_PreferredBlockSize;
    5425  size_t newBlockIndex = 0;
    5426  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5427  // Allocating blocks of other sizes is allowed only in default pools.
    5428  // In custom pools block size is fixed.
    5429  if(res < 0 && m_IsCustomPool == false)
    5430  {
    5431  // 2.2. Try half the size.
    5432  blockSize /= 2;
    5433  if(blockSize >= vkMemReq.size)
    5434  {
    5435  res = CreateBlock(blockSize, &newBlockIndex);
    5436  if(res < 0)
    5437  {
    5438  // 2.3. Try quarter the size.
    5439  blockSize /= 2;
    5440  if(blockSize >= vkMemReq.size)
    5441  {
    5442  res = CreateBlock(blockSize, &newBlockIndex);
    5443  }
    5444  }
    5445  }
    5446  }
    5447  if(res == VK_SUCCESS)
    5448  {
    5449  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5450  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5451 
    5452  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5453  VmaAllocationRequest allocRequest;
    5454  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5455  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5456  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5457  (*pAllocation)->InitBlockAllocation(
    5458  hCurrentPool,
    5459  pBlock,
    5460  allocRequest.offset,
    5461  vkMemReq.alignment,
    5462  vkMemReq.size,
    5463  suballocType,
    5464  createInfo.pUserData,
    5465  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5466  VMA_HEAVY_ASSERT(pBlock->Validate());
    5467  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5468 
    5469  return VK_SUCCESS;
    5470  }
    5471  }
    5472 
    5473  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5474 
    5475  // 3. Try to allocate from existing blocks with making other allocations lost.
    5476  if(canMakeOtherLost)
    5477  {
    5478  uint32_t tryIndex = 0;
    5479  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5480  {
    5481  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5482  VmaAllocationRequest bestRequest = {};
    5483  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5484 
    5485  // 1. Search existing allocations.
    5486  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5487  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5488  {
    5489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5490  VMA_ASSERT(pCurrBlock);
    5491  VmaAllocationRequest currRequest = {};
    5492  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5493  currentFrameIndex,
    5494  m_FrameInUseCount,
    5495  m_BufferImageGranularity,
    5496  vkMemReq.size,
    5497  vkMemReq.alignment,
    5498  suballocType,
    5499  canMakeOtherLost,
    5500  &currRequest))
    5501  {
    5502  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5503  if(pBestRequestBlock == VMA_NULL ||
    5504  currRequestCost < bestRequestCost)
    5505  {
    5506  pBestRequestBlock = pCurrBlock;
    5507  bestRequest = currRequest;
    5508  bestRequestCost = currRequestCost;
    5509 
    5510  if(bestRequestCost == 0)
    5511  {
    5512  break;
    5513  }
    5514  }
    5515  }
    5516  }
    5517 
    5518  if(pBestRequestBlock != VMA_NULL)
    5519  {
    5520  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5521  currentFrameIndex,
    5522  m_FrameInUseCount,
    5523  &bestRequest))
    5524  {
    5525  // We no longer have an empty Allocation.
    5526  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5527  {
    5528  m_HasEmptyBlock = false;
    5529  }
    5530  // Allocate from this pBlock.
    5531  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5532  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5533  (*pAllocation)->InitBlockAllocation(
    5534  hCurrentPool,
    5535  pBestRequestBlock,
    5536  bestRequest.offset,
    5537  vkMemReq.alignment,
    5538  vkMemReq.size,
    5539  suballocType,
    5540  createInfo.pUserData,
    5541  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5542  VMA_HEAVY_ASSERT(pBlock->Validate());
    5543  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5544  return VK_SUCCESS;
    5545  }
    5546  // else: Some allocations must have been touched while we are here. Next try.
    5547  }
    5548  else
    5549  {
    5550  // Could not find place in any of the blocks - break outer loop.
    5551  break;
    5552  }
    5553  }
    5554  /* Maximum number of tries exceeded - a very unlike event when many other
    5555  threads are simultaneously touching allocations making it impossible to make
    5556  lost at the same time as we try to allocate. */
    5557  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5558  {
    5559  return VK_ERROR_TOO_MANY_OBJECTS;
    5560  }
    5561  }
    5562 
    5563  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5564 }
    5565 
    5566 void VmaBlockVector::Free(
    5567  VmaAllocation hAllocation)
    5568 {
    5569  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5570 
    5571  // Scope for lock.
    5572  {
    5573  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5574 
    5575  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5576 
    5577  pBlock->m_Metadata.Free(hAllocation);
    5578  VMA_HEAVY_ASSERT(pBlock->Validate());
    5579 
    5580  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5581 
    5582  // pBlock became empty after this deallocation.
    5583  if(pBlock->m_Metadata.IsEmpty())
    5584  {
    5585  // Already has empty Allocation. We don't want to have two, so delete this one.
    5586  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5587  {
    5588  pBlockToDelete = pBlock;
    5589  Remove(pBlock);
    5590  }
    5591  // We now have first empty Allocation.
    5592  else
    5593  {
    5594  m_HasEmptyBlock = true;
    5595  }
    5596  }
    5597  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5598  // (This is optional, heuristics.)
    5599  else if(m_HasEmptyBlock)
    5600  {
    5601  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5602  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5603  {
    5604  pBlockToDelete = pLastBlock;
    5605  m_Blocks.pop_back();
    5606  m_HasEmptyBlock = false;
    5607  }
    5608  }
    5609 
    5610  IncrementallySortBlocks();
    5611  }
    5612 
    5613  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5614  // lock, for performance reason.
    5615  if(pBlockToDelete != VMA_NULL)
    5616  {
    5617  VMA_DEBUG_LOG(" Deleted empty allocation");
    5618  pBlockToDelete->Destroy(m_hAllocator);
    5619  vma_delete(m_hAllocator, pBlockToDelete);
    5620  }
    5621 }
    5622 
    5623 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5624 {
    5625  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5626  {
    5627  if(m_Blocks[blockIndex] == pBlock)
    5628  {
    5629  VmaVectorRemove(m_Blocks, blockIndex);
    5630  return;
    5631  }
    5632  }
    5633  VMA_ASSERT(0);
    5634 }
    5635 
    5636 void VmaBlockVector::IncrementallySortBlocks()
    5637 {
    5638  // Bubble sort only until first swap.
    5639  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5640  {
    5641  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5642  {
    5643  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5644  return;
    5645  }
    5646  }
    5647 }
    5648 
    5649 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5650 {
    5651  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5652  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5653  allocInfo.allocationSize = blockSize;
    5654  VkDeviceMemory mem = VK_NULL_HANDLE;
    5655  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5656  if(res < 0)
    5657  {
    5658  return res;
    5659  }
    5660 
    5661  // New VkDeviceMemory successfully created.
    5662 
    5663  // Map memory if needed.
    5664  void* pMappedData = VMA_NULL;
    5665  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5666  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5667  {
    5668  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5669  m_hAllocator->m_hDevice,
    5670  mem,
    5671  0,
    5672  VK_WHOLE_SIZE,
    5673  0,
    5674  &pMappedData);
    5675  if(res < 0)
    5676  {
    5677  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5678  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5679  return res;
    5680  }
    5681  }
    5682 
    5683  // Create new Allocation for it.
    5684  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5685  pBlock->Init(
    5686  m_MemoryTypeIndex,
    5687  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5688  mem,
    5689  allocInfo.allocationSize,
    5690  persistentMap,
    5691  pMappedData);
    5692 
    5693  m_Blocks.push_back(pBlock);
    5694  if(pNewBlockIndex != VMA_NULL)
    5695  {
    5696  *pNewBlockIndex = m_Blocks.size() - 1;
    5697  }
    5698 
    5699  return VK_SUCCESS;
    5700 }
    5701 
    5702 #if VMA_STATS_STRING_ENABLED
    5703 
    5704 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5705 {
    5706  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5707 
    5708  json.BeginObject();
    5709 
    5710  if(m_IsCustomPool)
    5711  {
    5712  json.WriteString("MemoryTypeIndex");
    5713  json.WriteNumber(m_MemoryTypeIndex);
    5714 
    5715  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5716  {
    5717  json.WriteString("Mapped");
    5718  json.WriteBool(true);
    5719  }
    5720 
    5721  json.WriteString("BlockSize");
    5722  json.WriteNumber(m_PreferredBlockSize);
    5723 
    5724  json.WriteString("BlockCount");
    5725  json.BeginObject(true);
    5726  if(m_MinBlockCount > 0)
    5727  {
    5728  json.WriteString("Min");
    5729  json.WriteNumber(m_MinBlockCount);
    5730  }
    5731  if(m_MaxBlockCount < SIZE_MAX)
    5732  {
    5733  json.WriteString("Max");
    5734  json.WriteNumber(m_MaxBlockCount);
    5735  }
    5736  json.WriteString("Cur");
    5737  json.WriteNumber(m_Blocks.size());
    5738  json.EndObject();
    5739 
    5740  if(m_FrameInUseCount > 0)
    5741  {
    5742  json.WriteString("FrameInUseCount");
    5743  json.WriteNumber(m_FrameInUseCount);
    5744  }
    5745  }
    5746  else
    5747  {
    5748  json.WriteString("PreferredBlockSize");
    5749  json.WriteNumber(m_PreferredBlockSize);
    5750  }
    5751 
    5752  json.WriteString("Blocks");
    5753  json.BeginArray();
    5754  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5755  {
    5756  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5757  }
    5758  json.EndArray();
    5759 
    5760  json.EndObject();
    5761 }
    5762 
    5763 #endif // #if VMA_STATS_STRING_ENABLED
    5764 
    5765 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5766 {
    5767  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5768 
    5769  for(size_t i = m_Blocks.size(); i--; )
    5770  {
    5771  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5772  if(pBlock->m_pMappedData != VMA_NULL)
    5773  {
    5774  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5775  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5776  pBlock->m_pMappedData = VMA_NULL;
    5777  }
    5778  }
    5779 }
    5780 
    5781 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5782 {
    5783  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5784 
    5785  VkResult finalResult = VK_SUCCESS;
    5786  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5787  {
    5788  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5789  if(pBlock->m_PersistentMap)
    5790  {
    5791  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5792  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5793  m_hAllocator->m_hDevice,
    5794  pBlock->m_hMemory,
    5795  0,
    5796  VK_WHOLE_SIZE,
    5797  0,
    5798  &pBlock->m_pMappedData);
    5799  if(localResult != VK_SUCCESS)
    5800  {
    5801  finalResult = localResult;
    5802  }
    5803  }
    5804  }
    5805  return finalResult;
    5806 }
    5807 
    5808 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5809  VmaAllocator hAllocator,
    5810  uint32_t currentFrameIndex)
    5811 {
    5812  if(m_pDefragmentator == VMA_NULL)
    5813  {
    5814  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5815  hAllocator,
    5816  this,
    5817  currentFrameIndex);
    5818  }
    5819 
    5820  return m_pDefragmentator;
    5821 }
    5822 
    5823 VkResult VmaBlockVector::Defragment(
    5824  VmaDefragmentationStats* pDefragmentationStats,
    5825  VkDeviceSize& maxBytesToMove,
    5826  uint32_t& maxAllocationsToMove)
    5827 {
    5828  if(m_pDefragmentator == VMA_NULL)
    5829  {
    5830  return VK_SUCCESS;
    5831  }
    5832 
    5833  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5834 
    5835  // Defragment.
    5836  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5837 
    5838  // Accumulate statistics.
    5839  if(pDefragmentationStats != VMA_NULL)
    5840  {
    5841  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5842  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5843  pDefragmentationStats->bytesMoved += bytesMoved;
    5844  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5845  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5846  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5847  maxBytesToMove -= bytesMoved;
    5848  maxAllocationsToMove -= allocationsMoved;
    5849  }
    5850 
    5851  // Free empty blocks.
    5852  m_HasEmptyBlock = false;
    5853  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5854  {
    5855  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5856  if(pBlock->m_Metadata.IsEmpty())
    5857  {
    5858  if(m_Blocks.size() > m_MinBlockCount)
    5859  {
    5860  if(pDefragmentationStats != VMA_NULL)
    5861  {
    5862  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5863  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5864  }
    5865 
    5866  VmaVectorRemove(m_Blocks, blockIndex);
    5867  pBlock->Destroy(m_hAllocator);
    5868  vma_delete(m_hAllocator, pBlock);
    5869  }
    5870  else
    5871  {
    5872  m_HasEmptyBlock = true;
    5873  }
    5874  }
    5875  }
    5876 
    5877  return result;
    5878 }
    5879 
    5880 void VmaBlockVector::DestroyDefragmentator()
    5881 {
    5882  if(m_pDefragmentator != VMA_NULL)
    5883  {
    5884  vma_delete(m_hAllocator, m_pDefragmentator);
    5885  m_pDefragmentator = VMA_NULL;
    5886  }
    5887 }
    5888 
    5889 void VmaBlockVector::MakePoolAllocationsLost(
    5890  uint32_t currentFrameIndex,
    5891  size_t* pLostAllocationCount)
    5892 {
    5893  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5894 
    5895  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5896  {
    5897  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5898  VMA_ASSERT(pBlock);
    5899  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5900  }
    5901 }
    5902 
    5903 void VmaBlockVector::AddStats(VmaStats* pStats)
    5904 {
    5905  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    5906  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    5907 
    5908  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5909 
    5910  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5911  {
    5912  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5913  VMA_ASSERT(pBlock);
    5914  VMA_HEAVY_ASSERT(pBlock->Validate());
    5915  VmaStatInfo allocationStatInfo;
    5916  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    5917  VmaAddStatInfo(pStats->total, allocationStatInfo);
    5918  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    5919  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    5920  }
    5921 }
    5922 
    5924 // VmaDefragmentator members definition
    5925 
    5926 VmaDefragmentator::VmaDefragmentator(
    5927  VmaAllocator hAllocator,
    5928  VmaBlockVector* pBlockVector,
    5929  uint32_t currentFrameIndex) :
    5930  m_hAllocator(hAllocator),
    5931  m_pBlockVector(pBlockVector),
    5932  m_CurrentFrameIndex(currentFrameIndex),
    5933  m_BytesMoved(0),
    5934  m_AllocationsMoved(0),
    5935  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    5936  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    5937 {
    5938 }
    5939 
    5940 VmaDefragmentator::~VmaDefragmentator()
    5941 {
    5942  for(size_t i = m_Blocks.size(); i--; )
    5943  {
    5944  vma_delete(m_hAllocator, m_Blocks[i]);
    5945  }
    5946 }
    5947 
    5948 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    5949 {
    5950  AllocationInfo allocInfo;
    5951  allocInfo.m_hAllocation = hAlloc;
    5952  allocInfo.m_pChanged = pChanged;
    5953  m_Allocations.push_back(allocInfo);
    5954 }
    5955 
    5956 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    5957 {
    5958  // It has already been mapped for defragmentation.
    5959  if(m_pMappedDataForDefragmentation)
    5960  {
    5961  *ppMappedData = m_pMappedDataForDefragmentation;
    5962  return VK_SUCCESS;
    5963  }
    5964 
    5965  // It is persistently mapped.
    5966  if(m_pBlock->m_PersistentMap)
    5967  {
    5968  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    5969  *ppMappedData = m_pBlock->m_pMappedData;
    5970  return VK_SUCCESS;
    5971  }
    5972 
    5973  // Map on first usage.
    5974  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5975  hAllocator->m_hDevice,
    5976  m_pBlock->m_hMemory,
    5977  0,
    5978  VK_WHOLE_SIZE,
    5979  0,
    5980  &m_pMappedDataForDefragmentation);
    5981  *ppMappedData = m_pMappedDataForDefragmentation;
    5982  return res;
    5983 }
    5984 
    5985 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    5986 {
    5987  if(m_pMappedDataForDefragmentation != VMA_NULL)
    5988  {
    5989  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    5990  }
    5991 }
    5992 
    5993 VkResult VmaDefragmentator::DefragmentRound(
    5994  VkDeviceSize maxBytesToMove,
    5995  uint32_t maxAllocationsToMove)
    5996 {
    5997  if(m_Blocks.empty())
    5998  {
    5999  return VK_SUCCESS;
    6000  }
    6001 
    6002  size_t srcBlockIndex = m_Blocks.size() - 1;
    6003  size_t srcAllocIndex = SIZE_MAX;
    6004  for(;;)
    6005  {
    6006  // 1. Find next allocation to move.
    6007  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6008  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6009  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6010  {
    6011  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6012  {
    6013  // Finished: no more allocations to process.
    6014  if(srcBlockIndex == 0)
    6015  {
    6016  return VK_SUCCESS;
    6017  }
    6018  else
    6019  {
    6020  --srcBlockIndex;
    6021  srcAllocIndex = SIZE_MAX;
    6022  }
    6023  }
    6024  else
    6025  {
    6026  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6027  }
    6028  }
    6029 
    6030  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6031  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6032 
    6033  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6034  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6035  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6036  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6037 
    6038  // 2. Try to find new place for this allocation in preceding or current block.
    6039  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6040  {
    6041  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6042  VmaAllocationRequest dstAllocRequest;
    6043  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6044  m_CurrentFrameIndex,
    6045  m_pBlockVector->GetFrameInUseCount(),
    6046  m_pBlockVector->GetBufferImageGranularity(),
    6047  size,
    6048  alignment,
    6049  suballocType,
    6050  false, // canMakeOtherLost
    6051  &dstAllocRequest) &&
    6052  MoveMakesSense(
    6053  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6054  {
    6055  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6056 
    6057  // Reached limit on number of allocations or bytes to move.
    6058  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6059  (m_BytesMoved + size > maxBytesToMove))
    6060  {
    6061  return VK_INCOMPLETE;
    6062  }
    6063 
    6064  void* pDstMappedData = VMA_NULL;
    6065  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6066  if(res != VK_SUCCESS)
    6067  {
    6068  return res;
    6069  }
    6070 
    6071  void* pSrcMappedData = VMA_NULL;
    6072  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6073  if(res != VK_SUCCESS)
    6074  {
    6075  return res;
    6076  }
    6077 
    6078  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6079  memcpy(
    6080  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6081  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6082  static_cast<size_t>(size));
    6083 
    6084  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6085  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6086 
    6087  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6088 
    6089  if(allocInfo.m_pChanged != VMA_NULL)
    6090  {
    6091  *allocInfo.m_pChanged = VK_TRUE;
    6092  }
    6093 
    6094  ++m_AllocationsMoved;
    6095  m_BytesMoved += size;
    6096 
    6097  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6098 
    6099  break;
    6100  }
    6101  }
    6102 
    6103  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6104 
    6105  if(srcAllocIndex > 0)
    6106  {
    6107  --srcAllocIndex;
    6108  }
    6109  else
    6110  {
    6111  if(srcBlockIndex > 0)
    6112  {
    6113  --srcBlockIndex;
    6114  srcAllocIndex = SIZE_MAX;
    6115  }
    6116  else
    6117  {
    6118  return VK_SUCCESS;
    6119  }
    6120  }
    6121  }
    6122 }
    6123 
    6124 VkResult VmaDefragmentator::Defragment(
    6125  VkDeviceSize maxBytesToMove,
    6126  uint32_t maxAllocationsToMove)
    6127 {
    6128  if(m_Allocations.empty())
    6129  {
    6130  return VK_SUCCESS;
    6131  }
    6132 
    6133  // Create block info for each block.
    6134  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6135  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6136  {
    6137  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6138  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6139  m_Blocks.push_back(pBlockInfo);
    6140  }
    6141 
    6142  // Sort them by m_pBlock pointer value.
    6143  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6144 
    6145  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6146  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6147  {
    6148  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6149  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6150  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6151  {
    6152  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6153  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6154  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6155  {
    6156  (*it)->m_Allocations.push_back(allocInfo);
    6157  }
    6158  else
    6159  {
    6160  VMA_ASSERT(0);
    6161  }
    6162  }
    6163  }
    6164  m_Allocations.clear();
    6165 
    6166  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6167  {
    6168  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6169  pBlockInfo->CalcHasNonMovableAllocations();
    6170  pBlockInfo->SortAllocationsBySizeDescecnding();
    6171  }
    6172 
    6173  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6174  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6175 
    6176  // Execute defragmentation rounds (the main part).
    6177  VkResult result = VK_SUCCESS;
    6178  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6179  {
    6180  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6181  }
    6182 
    6183  // Unmap blocks that were mapped for defragmentation.
    6184  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6185  {
    6186  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6187  }
    6188 
    6189  return result;
    6190 }
    6191 
    6192 bool VmaDefragmentator::MoveMakesSense(
    6193  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6194  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6195 {
    6196  if(dstBlockIndex < srcBlockIndex)
    6197  {
    6198  return true;
    6199  }
    6200  if(dstBlockIndex > srcBlockIndex)
    6201  {
    6202  return false;
    6203  }
    6204  if(dstOffset < srcOffset)
    6205  {
    6206  return true;
    6207  }
    6208  return false;
    6209 }
    6210 
    6212 // VmaAllocator_T
    6213 
    6214 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6215  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6216  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6217  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6218  m_hDevice(pCreateInfo->device),
    6219  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6220  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6221  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6222  m_UnmapPersistentlyMappedMemoryCounter(0),
    6223  m_PreferredLargeHeapBlockSize(0),
    6224  m_PreferredSmallHeapBlockSize(0),
    6225  m_CurrentFrameIndex(0),
    6226  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6227 {
    6228  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6229 
    6230  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6231  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6232  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6233 
    6234  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6235  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6236 
    6237  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6238  {
    6239  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6240  }
    6241 
    6242  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6243  {
    6244  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6245  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6246  }
    6247 
    6248  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6249 
    6250  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6251  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6252 
    6253  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6254  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6255  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6256  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6257 
    6258  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6259  {
    6260  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6261  {
    6262  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6263  if(limit != VK_WHOLE_SIZE)
    6264  {
    6265  m_HeapSizeLimit[heapIndex] = limit;
    6266  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6267  {
    6268  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6269  }
    6270  }
    6271  }
    6272  }
    6273 
    6274  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6275  {
    6276  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6277 
    6278  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6279  {
    6280  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6281  this,
    6282  memTypeIndex,
    6283  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6284  preferredBlockSize,
    6285  0,
    6286  SIZE_MAX,
    6287  GetBufferImageGranularity(),
    6288  pCreateInfo->frameInUseCount,
    6289  false); // isCustomPool
    6290  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6291  // becase minBlockCount is 0.
    6292  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6293  }
    6294  }
    6295 }
    6296 
    6297 VmaAllocator_T::~VmaAllocator_T()
    6298 {
    6299  VMA_ASSERT(m_Pools.empty());
    6300 
    6301  for(size_t i = GetMemoryTypeCount(); i--; )
    6302  {
    6303  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6304  {
    6305  vma_delete(this, m_pDedicatedAllocations[i][j]);
    6306  vma_delete(this, m_pBlockVectors[i][j]);
    6307  }
    6308  }
    6309 }
    6310 
    6311 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6312 {
    6313 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6314  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6315  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6316  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6317  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6318  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6319  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6320  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6321  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6322  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6323  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6324  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6325  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6326  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6327  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6328  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6329  // Ignoring vkGetImageMemoryRequirements2KHR.
    6330 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6331 
    6332 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6333  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6334 
    6335  if(pVulkanFunctions != VMA_NULL)
    6336  {
    6337  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6338  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6339  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6340  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6341  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6342  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6343  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6344  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6345  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6346  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6347  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6348  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6349  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6350  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6351  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6352  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6353  }
    6354 
    6355 #undef VMA_COPY_IF_NOT_NULL
    6356 
    6357  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6358  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6359  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6360  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6361  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6362  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6363  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6364  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6365  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6366  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6367  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6368  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6369  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6370  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6371  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6372  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6373  if(m_UseKhrDedicatedAllocation)
    6374  {
    6375  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6376  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6377  }
    6378 }
    6379 
    6380 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6381 {
    6382  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6383  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6384  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6385  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6386 }
    6387 
    6388 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6389  const VkMemoryRequirements& vkMemReq,
    6390  bool dedicatedAllocation,
    6391  VkBuffer dedicatedBuffer,
    6392  VkImage dedicatedImage,
    6393  const VmaAllocationCreateInfo& createInfo,
    6394  uint32_t memTypeIndex,
    6395  VmaSuballocationType suballocType,
    6396  VmaAllocation* pAllocation)
    6397 {
    6398  VMA_ASSERT(pAllocation != VMA_NULL);
    6399  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6400 
    6401  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6402  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6403  VMA_ASSERT(blockVector);
    6404 
    6405  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6406 
    6407  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6408  bool preferDedicatedMemory =
    6409  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6410  dedicatedAllocation ||
    6411  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6412  vkMemReq.size > preferredBlockSize / 2;
    6413 
    6414  if(preferDedicatedMemory &&
    6415  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6416  finalCreateInfo.pool == VK_NULL_HANDLE)
    6417  {
    6419  }
    6420 
    6421  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6422  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6423  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6424  {
    6425  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6426  }
    6427 
    6428  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6429  {
    6430  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6431  {
    6432  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6433  }
    6434  else
    6435  {
    6436  return AllocateDedicatedMemory(
    6437  vkMemReq.size,
    6438  suballocType,
    6439  memTypeIndex,
    6440  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6441  finalCreateInfo.pUserData,
    6442  dedicatedBuffer,
    6443  dedicatedImage,
    6444  pAllocation);
    6445  }
    6446  }
    6447  else
    6448  {
    6449  VkResult res = blockVector->Allocate(
    6450  VK_NULL_HANDLE, // hCurrentPool
    6451  m_CurrentFrameIndex.load(),
    6452  vkMemReq,
    6453  finalCreateInfo,
    6454  suballocType,
    6455  pAllocation);
    6456  if(res == VK_SUCCESS)
    6457  {
    6458  return res;
    6459  }
    6460 
    6461  // 5. Try dedicated memory.
    6462  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6463  {
    6464  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6465  }
    6466  else
    6467  {
    6468  res = AllocateDedicatedMemory(
    6469  vkMemReq.size,
    6470  suballocType,
    6471  memTypeIndex,
    6472  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6473  finalCreateInfo.pUserData,
    6474  dedicatedBuffer,
    6475  dedicatedImage,
    6476  pAllocation);
    6477  if(res == VK_SUCCESS)
    6478  {
    6479  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6480  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6481  return VK_SUCCESS;
    6482  }
    6483  else
    6484  {
    6485  // Everything failed: Return error code.
    6486  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6487  return res;
    6488  }
    6489  }
    6490  }
    6491 }
    6492 
    6493 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6494  VkDeviceSize size,
    6495  VmaSuballocationType suballocType,
    6496  uint32_t memTypeIndex,
    6497  bool map,
    6498  void* pUserData,
    6499  VkBuffer dedicatedBuffer,
    6500  VkImage dedicatedImage,
    6501  VmaAllocation* pAllocation)
    6502 {
    6503  VMA_ASSERT(pAllocation);
    6504 
    6505  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6506  allocInfo.memoryTypeIndex = memTypeIndex;
    6507  allocInfo.allocationSize = size;
    6508 
    6509  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    6510  if(m_UseKhrDedicatedAllocation)
    6511  {
    6512  if(dedicatedBuffer != VK_NULL_HANDLE)
    6513  {
    6514  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    6515  dedicatedAllocInfo.buffer = dedicatedBuffer;
    6516  allocInfo.pNext = &dedicatedAllocInfo;
    6517  }
    6518  else if(dedicatedImage != VK_NULL_HANDLE)
    6519  {
    6520  dedicatedAllocInfo.image = dedicatedImage;
    6521  allocInfo.pNext = &dedicatedAllocInfo;
    6522  }
    6523  }
    6524 
    6525  // Allocate VkDeviceMemory.
    6526  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6527  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6528  if(res < 0)
    6529  {
    6530  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6531  return res;
    6532  }
    6533 
    6534  void* pMappedData = nullptr;
    6535  if(map)
    6536  {
    6537  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6538  {
    6539  res = (*m_VulkanFunctions.vkMapMemory)(
    6540  m_hDevice,
    6541  hMemory,
    6542  0,
    6543  VK_WHOLE_SIZE,
    6544  0,
    6545  &pMappedData);
    6546  if(res < 0)
    6547  {
    6548  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6549  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6550  return res;
    6551  }
    6552  }
    6553  }
    6554 
    6555  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6556  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6557 
    6558  // Register it in m_pDedicatedAllocations.
    6559  {
    6560  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6561  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6562  VMA_ASSERT(pDedicatedAllocations);
    6563  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6564  }
    6565 
    6566  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6567 
    6568  return VK_SUCCESS;
    6569 }
    6570 
    6571 void VmaAllocator_T::GetBufferMemoryRequirements(
    6572  VkBuffer hBuffer,
    6573  VkMemoryRequirements& memReq,
    6574  bool& requiresDedicatedAllocation,
    6575  bool& prefersDedicatedAllocation) const
    6576 {
    6577  if(m_UseKhrDedicatedAllocation)
    6578  {
    6579  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6580  memReqInfo.buffer = hBuffer;
    6581 
    6582  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6583 
    6584  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6585  memReq2.pNext = &memDedicatedReq;
    6586 
    6587  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6588 
    6589  memReq = memReq2.memoryRequirements;
    6590  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6591  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6592  }
    6593  else
    6594  {
    6595  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6596  requiresDedicatedAllocation = false;
    6597  prefersDedicatedAllocation = false;
    6598  }
    6599 }
    6600 
    6601 void VmaAllocator_T::GetImageMemoryRequirements(
    6602  VkImage hImage,
    6603  VkMemoryRequirements& memReq,
    6604  bool& requiresDedicatedAllocation,
    6605  bool& prefersDedicatedAllocation) const
    6606 {
    6607  if(m_UseKhrDedicatedAllocation)
    6608  {
    6609  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6610  memReqInfo.image = hImage;
    6611 
    6612  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6613 
    6614  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6615  memReq2.pNext = &memDedicatedReq;
    6616 
    6617  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6618 
    6619  memReq = memReq2.memoryRequirements;
    6620  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6621  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6622  }
    6623  else
    6624  {
    6625  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6626  requiresDedicatedAllocation = false;
    6627  prefersDedicatedAllocation = false;
    6628  }
    6629 }
    6630 
    6631 VkResult VmaAllocator_T::AllocateMemory(
    6632  const VkMemoryRequirements& vkMemReq,
    6633  bool requiresDedicatedAllocation,
    6634  bool prefersDedicatedAllocation,
    6635  VkBuffer dedicatedBuffer,
    6636  VkImage dedicatedImage,
    6637  const VmaAllocationCreateInfo& createInfo,
    6638  VmaSuballocationType suballocType,
    6639  VmaAllocation* pAllocation)
    6640 {
    6641  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6642  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6643  {
    6644  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6645  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6646  }
    6647  if(requiresDedicatedAllocation)
    6648  {
    6649  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6650  {
    6651  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    6652  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6653  }
    6654  if(createInfo.pool != VK_NULL_HANDLE)
    6655  {
    6656  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    6657  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6658  }
    6659  }
    6660  if((createInfo.pool != VK_NULL_HANDLE) &&
    6661  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6662  {
    6663  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6664  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6665  }
    6666 
    6667  if(createInfo.pool != VK_NULL_HANDLE)
    6668  {
    6669  return createInfo.pool->m_BlockVector.Allocate(
    6670  createInfo.pool,
    6671  m_CurrentFrameIndex.load(),
    6672  vkMemReq,
    6673  createInfo,
    6674  suballocType,
    6675  pAllocation);
    6676  }
    6677  else
    6678  {
    6679  // Bit mask of memory Vulkan types acceptable for this allocation.
    6680  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6681  uint32_t memTypeIndex = UINT32_MAX;
    6682  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6683  if(res == VK_SUCCESS)
    6684  {
    6685  res = AllocateMemoryOfType(
    6686  vkMemReq,
    6687  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6688  dedicatedBuffer,
    6689  dedicatedImage,
    6690  createInfo,
    6691  memTypeIndex,
    6692  suballocType,
    6693  pAllocation);
    6694  // Succeeded on first try.
    6695  if(res == VK_SUCCESS)
    6696  {
    6697  return res;
    6698  }
    6699  // Allocation from this memory type failed. Try other compatible memory types.
    6700  else
    6701  {
    6702  for(;;)
    6703  {
    6704  // Remove old memTypeIndex from list of possibilities.
    6705  memoryTypeBits &= ~(1u << memTypeIndex);
    6706  // Find alternative memTypeIndex.
    6707  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6708  if(res == VK_SUCCESS)
    6709  {
    6710  res = AllocateMemoryOfType(
    6711  vkMemReq,
    6712  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6713  dedicatedBuffer,
    6714  dedicatedImage,
    6715  createInfo,
    6716  memTypeIndex,
    6717  suballocType,
    6718  pAllocation);
    6719  // Allocation from this alternative memory type succeeded.
    6720  if(res == VK_SUCCESS)
    6721  {
    6722  return res;
    6723  }
    6724  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6725  }
    6726  // No other matching memory type index could be found.
    6727  else
    6728  {
    6729  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6730  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6731  }
    6732  }
    6733  }
    6734  }
    6735  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6736  else
    6737  return res;
    6738  }
    6739 }
    6740 
    6741 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6742 {
    6743  VMA_ASSERT(allocation);
    6744 
    6745  if(allocation->CanBecomeLost() == false ||
    6746  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6747  {
    6748  switch(allocation->GetType())
    6749  {
    6750  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6751  {
    6752  VmaBlockVector* pBlockVector = VMA_NULL;
    6753  VmaPool hPool = allocation->GetPool();
    6754  if(hPool != VK_NULL_HANDLE)
    6755  {
    6756  pBlockVector = &hPool->m_BlockVector;
    6757  }
    6758  else
    6759  {
    6760  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6761  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6762  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6763  }
    6764  pBlockVector->Free(allocation);
    6765  }
    6766  break;
    6767  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6768  FreeDedicatedMemory(allocation);
    6769  break;
    6770  default:
    6771  VMA_ASSERT(0);
    6772  }
    6773  }
    6774 
    6775  vma_delete(this, allocation);
    6776 }
    6777 
    6778 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6779 {
    6780  // Initialize.
    6781  InitStatInfo(pStats->total);
    6782  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6783  InitStatInfo(pStats->memoryType[i]);
    6784  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6785  InitStatInfo(pStats->memoryHeap[i]);
    6786 
    6787  // Process default pools.
    6788  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6789  {
    6790  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6791  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6792  {
    6793  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6794  VMA_ASSERT(pBlockVector);
    6795  pBlockVector->AddStats(pStats);
    6796  }
    6797  }
    6798 
    6799  // Process custom pools.
    6800  {
    6801  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6802  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6803  {
    6804  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6805  }
    6806  }
    6807 
    6808  // Process dedicated allocations.
    6809  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6810  {
    6811  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6812  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6813  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6814  {
    6815  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    6816  VMA_ASSERT(pDedicatedAllocVector);
    6817  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6818  {
    6819  VmaStatInfo allocationStatInfo;
    6820  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6821  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6822  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6823  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6824  }
    6825  }
    6826  }
    6827 
    6828  // Postprocess.
    6829  VmaPostprocessCalcStatInfo(pStats->total);
    6830  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6831  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6832  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6833  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6834 }
    6835 
    6836 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6837 
    6838 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6839 {
    6840  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6841  {
    6842  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6843  {
    6844  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6845  {
    6846  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6847  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6848  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6849  {
    6850  // Process DedicatedAllocations.
    6851  {
    6852  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6853  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6854  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
    6855  {
    6856  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
    6857  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
    6858  }
    6859  }
    6860 
    6861  // Process normal Allocations.
    6862  {
    6863  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6864  pBlockVector->UnmapPersistentlyMappedMemory();
    6865  }
    6866  }
    6867  }
    6868 
    6869  // Process custom pools.
    6870  {
    6871  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6872  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6873  {
    6874  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6875  }
    6876  }
    6877  }
    6878  }
    6879 }
    6880 
    6881 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6882 {
    6883  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6884  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6885  {
    6886  VkResult finalResult = VK_SUCCESS;
    6887  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6888  {
    6889  // Process custom pools.
    6890  {
    6891  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6892  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6893  {
    6894  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6895  }
    6896  }
    6897 
    6898  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6899  {
    6900  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6901  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6902  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6903  {
    6904  // Process DedicatedAllocations.
    6905  {
    6906  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6907  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6908  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
    6909  {
    6910  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
    6911  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
    6912  }
    6913  }
    6914 
    6915  // Process normal Allocations.
    6916  {
    6917  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6918  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    6919  if(localResult != VK_SUCCESS)
    6920  {
    6921  finalResult = localResult;
    6922  }
    6923  }
    6924  }
    6925  }
    6926  }
    6927  return finalResult;
    6928  }
    6929  else
    6930  return VK_SUCCESS;
    6931 }
    6932 
    6933 VkResult VmaAllocator_T::Defragment(
    6934  VmaAllocation* pAllocations,
    6935  size_t allocationCount,
    6936  VkBool32* pAllocationsChanged,
    6937  const VmaDefragmentationInfo* pDefragmentationInfo,
    6938  VmaDefragmentationStats* pDefragmentationStats)
    6939 {
    6940  if(pAllocationsChanged != VMA_NULL)
    6941  {
    6942  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    6943  }
    6944  if(pDefragmentationStats != VMA_NULL)
    6945  {
    6946  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    6947  }
    6948 
    6949  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    6950  {
    6951  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    6952  return VK_ERROR_MEMORY_MAP_FAILED;
    6953  }
    6954 
    6955  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    6956 
    6957  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    6958 
    6959  const size_t poolCount = m_Pools.size();
    6960 
    6961  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    6962  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    6963  {
    6964  VmaAllocation hAlloc = pAllocations[allocIndex];
    6965  VMA_ASSERT(hAlloc);
    6966  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    6967  // DedicatedAlloc cannot be defragmented.
    6968  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    6969  // Only HOST_VISIBLE memory types can be defragmented.
    6970  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    6971  // Lost allocation cannot be defragmented.
    6972  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    6973  {
    6974  VmaBlockVector* pAllocBlockVector = nullptr;
    6975 
    6976  const VmaPool hAllocPool = hAlloc->GetPool();
    6977  // This allocation belongs to custom pool.
    6978  if(hAllocPool != VK_NULL_HANDLE)
    6979  {
    6980  pAllocBlockVector = &hAllocPool->GetBlockVector();
    6981  }
    6982  // This allocation belongs to general pool.
    6983  else
    6984  {
    6985  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    6986  }
    6987 
    6988  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    6989 
    6990  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    6991  &pAllocationsChanged[allocIndex] : VMA_NULL;
    6992  pDefragmentator->AddAllocation(hAlloc, pChanged);
    6993  }
    6994  }
    6995 
    6996  VkResult result = VK_SUCCESS;
    6997 
    6998  // ======== Main processing.
    6999 
    7000  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7001  uint32_t maxAllocationsToMove = UINT32_MAX;
    7002  if(pDefragmentationInfo != VMA_NULL)
    7003  {
    7004  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7005  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7006  }
    7007 
    7008  // Process standard memory.
    7009  for(uint32_t memTypeIndex = 0;
    7010  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7011  ++memTypeIndex)
    7012  {
    7013  // Only HOST_VISIBLE memory types can be defragmented.
    7014  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7015  {
    7016  for(uint32_t blockVectorType = 0;
    7017  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    7018  ++blockVectorType)
    7019  {
    7020  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    7021  pDefragmentationStats,
    7022  maxBytesToMove,
    7023  maxAllocationsToMove);
    7024  }
    7025  }
    7026  }
    7027 
    7028  // Process custom pools.
    7029  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7030  {
    7031  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7032  pDefragmentationStats,
    7033  maxBytesToMove,
    7034  maxAllocationsToMove);
    7035  }
    7036 
    7037  // ======== Destroy defragmentators.
    7038 
    7039  // Process custom pools.
    7040  for(size_t poolIndex = poolCount; poolIndex--; )
    7041  {
    7042  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7043  }
    7044 
    7045  // Process standard memory.
    7046  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7047  {
    7048  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7049  {
    7050  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    7051  {
    7052  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    7053  }
    7054  }
    7055  }
    7056 
    7057  return result;
    7058 }
    7059 
    7060 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7061 {
    7062  if(hAllocation->CanBecomeLost())
    7063  {
    7064  /*
    7065  Warning: This is a carefully designed algorithm.
    7066  Do not modify unless you really know what you're doing :)
    7067  */
    7068  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7069  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7070  for(;;)
    7071  {
    7072  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7073  {
    7074  pAllocationInfo->memoryType = UINT32_MAX;
    7075  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7076  pAllocationInfo->offset = 0;
    7077  pAllocationInfo->size = hAllocation->GetSize();
    7078  pAllocationInfo->pMappedData = VMA_NULL;
    7079  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7080  return;
    7081  }
    7082  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7083  {
    7084  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7085  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7086  pAllocationInfo->offset = hAllocation->GetOffset();
    7087  pAllocationInfo->size = hAllocation->GetSize();
    7088  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7089  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7090  return;
    7091  }
    7092  else // Last use time earlier than current time.
    7093  {
    7094  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7095  {
    7096  localLastUseFrameIndex = localCurrFrameIndex;
    7097  }
    7098  }
    7099  }
    7100  }
    7101  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    7102  else
    7103  {
    7104  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7105  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7106  pAllocationInfo->offset = hAllocation->GetOffset();
    7107  pAllocationInfo->size = hAllocation->GetSize();
    7108  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7109  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7110  }
    7111 }
    7112 
    7113 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7114 {
    7115  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7116 
    7117  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7118 
    7119  if(newCreateInfo.maxBlockCount == 0)
    7120  {
    7121  newCreateInfo.maxBlockCount = SIZE_MAX;
    7122  }
    7123  if(newCreateInfo.blockSize == 0)
    7124  {
    7125  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7126  }
    7127 
    7128  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7129 
    7130  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7131  if(res != VK_SUCCESS)
    7132  {
    7133  vma_delete(this, *pPool);
    7134  *pPool = VMA_NULL;
    7135  return res;
    7136  }
    7137 
    7138  // Add to m_Pools.
    7139  {
    7140  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7141  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7142  }
    7143 
    7144  return VK_SUCCESS;
    7145 }
    7146 
    7147 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7148 {
    7149  // Remove from m_Pools.
    7150  {
    7151  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7152  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7153  VMA_ASSERT(success && "Pool not found in Allocator.");
    7154  }
    7155 
    7156  vma_delete(this, pool);
    7157 }
    7158 
    7159 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7160 {
    7161  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7162 }
    7163 
    7164 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7165 {
    7166  m_CurrentFrameIndex.store(frameIndex);
    7167 }
    7168 
    7169 void VmaAllocator_T::MakePoolAllocationsLost(
    7170  VmaPool hPool,
    7171  size_t* pLostAllocationCount)
    7172 {
    7173  hPool->m_BlockVector.MakePoolAllocationsLost(
    7174  m_CurrentFrameIndex.load(),
    7175  pLostAllocationCount);
    7176 }
    7177 
    7178 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7179 {
    7180  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7181  (*pAllocation)->InitLost();
    7182 }
    7183 
    7184 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7185 {
    7186  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7187 
    7188  VkResult res;
    7189  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7190  {
    7191  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7192  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7193  {
    7194  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7195  if(res == VK_SUCCESS)
    7196  {
    7197  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7198  }
    7199  }
    7200  else
    7201  {
    7202  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7203  }
    7204  }
    7205  else
    7206  {
    7207  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7208  }
    7209 
    7210  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7211  {
    7212  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7213  }
    7214 
    7215  return res;
    7216 }
    7217 
    7218 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7219 {
    7220  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7221  {
    7222  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7223  }
    7224 
    7225  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7226 
    7227  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7228  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7229  {
    7230  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7231  m_HeapSizeLimit[heapIndex] += size;
    7232  }
    7233 }
    7234 
    7235 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7236 {
    7237  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7238 
    7239  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7240  {
    7241  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7242  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7243  VMA_ASSERT(pDedicatedAllocations);
    7244  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7245  VMA_ASSERT(success);
    7246  }
    7247 
    7248  VkDeviceMemory hMemory = allocation->GetMemory();
    7249 
    7250  if(allocation->GetMappedData() != VMA_NULL)
    7251  {
    7252  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7253  }
    7254 
    7255  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7256 
    7257  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7258 }
    7259 
    7260 #if VMA_STATS_STRING_ENABLED
    7261 
    7262 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7263 {
    7264  bool dedicatedAllocationsStarted = false;
    7265  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7266  {
    7267  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7268  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7269  {
    7270  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    7271  VMA_ASSERT(pDedicatedAllocVector);
    7272  if(pDedicatedAllocVector->empty() == false)
    7273  {
    7274  if(dedicatedAllocationsStarted == false)
    7275  {
    7276  dedicatedAllocationsStarted = true;
    7277  json.WriteString("DedicatedAllocations");
    7278  json.BeginObject();
    7279  }
    7280 
    7281  json.BeginString("Type ");
    7282  json.ContinueString(memTypeIndex);
    7283  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7284  {
    7285  json.ContinueString(" Mapped");
    7286  }
    7287  json.EndString();
    7288 
    7289  json.BeginArray();
    7290 
    7291  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7292  {
    7293  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7294  json.BeginObject(true);
    7295 
    7296  json.WriteString("Size");
    7297  json.WriteNumber(hAlloc->GetSize());
    7298 
    7299  json.WriteString("Type");
    7300  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7301 
    7302  json.EndObject();
    7303  }
    7304 
    7305  json.EndArray();
    7306  }
    7307  }
    7308  }
    7309  if(dedicatedAllocationsStarted)
    7310  {
    7311  json.EndObject();
    7312  }
    7313 
    7314  {
    7315  bool allocationsStarted = false;
    7316  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7317  {
    7318  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7319  {
    7320  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7321  {
    7322  if(allocationsStarted == false)
    7323  {
    7324  allocationsStarted = true;
    7325  json.WriteString("DefaultPools");
    7326  json.BeginObject();
    7327  }
    7328 
    7329  json.BeginString("Type ");
    7330  json.ContinueString(memTypeIndex);
    7331  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7332  {
    7333  json.ContinueString(" Mapped");
    7334  }
    7335  json.EndString();
    7336 
    7337  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7338  }
    7339  }
    7340  }
    7341  if(allocationsStarted)
    7342  {
    7343  json.EndObject();
    7344  }
    7345  }
    7346 
    7347  {
    7348  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7349  const size_t poolCount = m_Pools.size();
    7350  if(poolCount > 0)
    7351  {
    7352  json.WriteString("Pools");
    7353  json.BeginArray();
    7354  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7355  {
    7356  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7357  }
    7358  json.EndArray();
    7359  }
    7360  }
    7361 }
    7362 
    7363 #endif // #if VMA_STATS_STRING_ENABLED
    7364 
    7365 static VkResult AllocateMemoryForImage(
    7366  VmaAllocator allocator,
    7367  VkImage image,
    7368  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7369  VmaSuballocationType suballocType,
    7370  VmaAllocation* pAllocation)
    7371 {
    7372  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7373 
    7374  VkMemoryRequirements vkMemReq = {};
    7375  bool requiresDedicatedAllocation = false;
    7376  bool prefersDedicatedAllocation = false;
    7377  allocator->GetImageMemoryRequirements(image, vkMemReq,
    7378  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7379 
    7380  return allocator->AllocateMemory(
    7381  vkMemReq,
    7382  requiresDedicatedAllocation,
    7383  prefersDedicatedAllocation,
    7384  VK_NULL_HANDLE, // dedicatedBuffer
    7385  image, // dedicatedImage
    7386  *pAllocationCreateInfo,
    7387  suballocType,
    7388  pAllocation);
    7389 }
    7390 
    7392 // Public interface
    7393 
    7394 VkResult vmaCreateAllocator(
    7395  const VmaAllocatorCreateInfo* pCreateInfo,
    7396  VmaAllocator* pAllocator)
    7397 {
    7398  VMA_ASSERT(pCreateInfo && pAllocator);
    7399  VMA_DEBUG_LOG("vmaCreateAllocator");
    7400  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7401  return VK_SUCCESS;
    7402 }
    7403 
    7404 void vmaDestroyAllocator(
    7405  VmaAllocator allocator)
    7406 {
    7407  if(allocator != VK_NULL_HANDLE)
    7408  {
    7409  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7410  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7411  vma_delete(&allocationCallbacks, allocator);
    7412  }
    7413 }
    7414 
    7416  VmaAllocator allocator,
    7417  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7418 {
    7419  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7420  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7421 }
    7422 
    7424  VmaAllocator allocator,
    7425  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7426 {
    7427  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7428  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7429 }
    7430 
    7432  VmaAllocator allocator,
    7433  uint32_t memoryTypeIndex,
    7434  VkMemoryPropertyFlags* pFlags)
    7435 {
    7436  VMA_ASSERT(allocator && pFlags);
    7437  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7438  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7439 }
    7440 
    7442  VmaAllocator allocator,
    7443  uint32_t frameIndex)
    7444 {
    7445  VMA_ASSERT(allocator);
    7446  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7447 
    7448  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7449 
    7450  allocator->SetCurrentFrameIndex(frameIndex);
    7451 }
    7452 
    7453 void vmaCalculateStats(
    7454  VmaAllocator allocator,
    7455  VmaStats* pStats)
    7456 {
    7457  VMA_ASSERT(allocator && pStats);
    7458  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7459  allocator->CalculateStats(pStats);
    7460 }
    7461 
    7462 #if VMA_STATS_STRING_ENABLED
    7463 
    7464 void vmaBuildStatsString(
    7465  VmaAllocator allocator,
    7466  char** ppStatsString,
    7467  VkBool32 detailedMap)
    7468 {
    7469  VMA_ASSERT(allocator && ppStatsString);
    7470  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7471 
    7472  VmaStringBuilder sb(allocator);
    7473  {
    7474  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7475  json.BeginObject();
    7476 
    7477  VmaStats stats;
    7478  allocator->CalculateStats(&stats);
    7479 
    7480  json.WriteString("Total");
    7481  VmaPrintStatInfo(json, stats.total);
    7482 
    7483  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7484  {
    7485  json.BeginString("Heap ");
    7486  json.ContinueString(heapIndex);
    7487  json.EndString();
    7488  json.BeginObject();
    7489 
    7490  json.WriteString("Size");
    7491  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7492 
    7493  json.WriteString("Flags");
    7494  json.BeginArray(true);
    7495  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7496  {
    7497  json.WriteString("DEVICE_LOCAL");
    7498  }
    7499  json.EndArray();
    7500 
    7501  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7502  {
    7503  json.WriteString("Stats");
    7504  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7505  }
    7506 
    7507  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7508  {
    7509  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7510  {
    7511  json.BeginString("Type ");
    7512  json.ContinueString(typeIndex);
    7513  json.EndString();
    7514 
    7515  json.BeginObject();
    7516 
    7517  json.WriteString("Flags");
    7518  json.BeginArray(true);
    7519  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7520  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7521  {
    7522  json.WriteString("DEVICE_LOCAL");
    7523  }
    7524  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7525  {
    7526  json.WriteString("HOST_VISIBLE");
    7527  }
    7528  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7529  {
    7530  json.WriteString("HOST_COHERENT");
    7531  }
    7532  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7533  {
    7534  json.WriteString("HOST_CACHED");
    7535  }
    7536  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7537  {
    7538  json.WriteString("LAZILY_ALLOCATED");
    7539  }
    7540  json.EndArray();
    7541 
    7542  if(stats.memoryType[typeIndex].blockCount > 0)
    7543  {
    7544  json.WriteString("Stats");
    7545  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7546  }
    7547 
    7548  json.EndObject();
    7549  }
    7550  }
    7551 
    7552  json.EndObject();
    7553  }
    7554  if(detailedMap == VK_TRUE)
    7555  {
    7556  allocator->PrintDetailedMap(json);
    7557  }
    7558 
    7559  json.EndObject();
    7560  }
    7561 
    7562  const size_t len = sb.GetLength();
    7563  char* const pChars = vma_new_array(allocator, char, len + 1);
    7564  if(len > 0)
    7565  {
    7566  memcpy(pChars, sb.GetData(), len);
    7567  }
    7568  pChars[len] = '\0';
    7569  *ppStatsString = pChars;
    7570 }
    7571 
    7572 void vmaFreeStatsString(
    7573  VmaAllocator allocator,
    7574  char* pStatsString)
    7575 {
    7576  if(pStatsString != VMA_NULL)
    7577  {
    7578  VMA_ASSERT(allocator);
    7579  size_t len = strlen(pStatsString);
    7580  vma_delete_array(allocator, pStatsString, len + 1);
    7581  }
    7582 }
    7583 
    7584 #endif // #if VMA_STATS_STRING_ENABLED
    7585 
    7588 VkResult vmaFindMemoryTypeIndex(
    7589  VmaAllocator allocator,
    7590  uint32_t memoryTypeBits,
    7591  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7592  uint32_t* pMemoryTypeIndex)
    7593 {
    7594  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7597 
    7598  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7599  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7600  if(preferredFlags == 0)
    7601  {
    7602  preferredFlags = requiredFlags;
    7603  }
    7604  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7605  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7606 
    7607  // Convert usage to requiredFlags and preferredFlags.
    7608  switch(pAllocationCreateInfo->usage)
    7609  {
    7611  break;
    7613  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7614  break;
    7616  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7617  break;
    7619  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7620  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7621  break;
    7623  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7624  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7625  break;
    7626  default:
    7627  break;
    7628  }
    7629 
    7630  *pMemoryTypeIndex = UINT32_MAX;
    7631  uint32_t minCost = UINT32_MAX;
    7632  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7633  memTypeIndex < allocator->GetMemoryTypeCount();
    7634  ++memTypeIndex, memTypeBit <<= 1)
    7635  {
    7636  // This memory type is acceptable according to memoryTypeBits bitmask.
    7637  if((memTypeBit & memoryTypeBits) != 0)
    7638  {
    7639  const VkMemoryPropertyFlags currFlags =
    7640  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7641  // This memory type contains requiredFlags.
    7642  if((requiredFlags & ~currFlags) == 0)
    7643  {
    7644  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7645  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7646  // Remember memory type with lowest cost.
    7647  if(currCost < minCost)
    7648  {
    7649  *pMemoryTypeIndex = memTypeIndex;
    7650  if(currCost == 0)
    7651  {
    7652  return VK_SUCCESS;
    7653  }
    7654  minCost = currCost;
    7655  }
    7656  }
    7657  }
    7658  }
    7659  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7660 }
    7661 
    7662 VkResult vmaCreatePool(
    7663  VmaAllocator allocator,
    7664  const VmaPoolCreateInfo* pCreateInfo,
    7665  VmaPool* pPool)
    7666 {
    7667  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7668 
    7669  VMA_DEBUG_LOG("vmaCreatePool");
    7670 
    7671  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7672 
    7673  return allocator->CreatePool(pCreateInfo, pPool);
    7674 }
    7675 
    7676 void vmaDestroyPool(
    7677  VmaAllocator allocator,
    7678  VmaPool pool)
    7679 {
    7680  VMA_ASSERT(allocator && pool);
    7681 
    7682  VMA_DEBUG_LOG("vmaDestroyPool");
    7683 
    7684  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7685 
    7686  allocator->DestroyPool(pool);
    7687 }
    7688 
    7689 void vmaGetPoolStats(
    7690  VmaAllocator allocator,
    7691  VmaPool pool,
    7692  VmaPoolStats* pPoolStats)
    7693 {
    7694  VMA_ASSERT(allocator && pool && pPoolStats);
    7695 
    7696  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7697 
    7698  allocator->GetPoolStats(pool, pPoolStats);
    7699 }
    7700 
    7702  VmaAllocator allocator,
    7703  VmaPool pool,
    7704  size_t* pLostAllocationCount)
    7705 {
    7706  VMA_ASSERT(allocator && pool);
    7707 
    7708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7709 
    7710  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7711 }
    7712 
    7713 VkResult vmaAllocateMemory(
    7714  VmaAllocator allocator,
    7715  const VkMemoryRequirements* pVkMemoryRequirements,
    7716  const VmaAllocationCreateInfo* pCreateInfo,
    7717  VmaAllocation* pAllocation,
    7718  VmaAllocationInfo* pAllocationInfo)
    7719 {
    7720  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7721 
    7722  VMA_DEBUG_LOG("vmaAllocateMemory");
    7723 
    7724  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7725 
    7726  VkResult result = allocator->AllocateMemory(
    7727  *pVkMemoryRequirements,
    7728  false, // requiresDedicatedAllocation
    7729  false, // prefersDedicatedAllocation
    7730  VK_NULL_HANDLE, // dedicatedBuffer
    7731  VK_NULL_HANDLE, // dedicatedImage
    7732  *pCreateInfo,
    7733  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7734  pAllocation);
    7735 
    7736  if(pAllocationInfo && result == VK_SUCCESS)
    7737  {
    7738  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7739  }
    7740 
    7741  return result;
    7742 }
    7743 
    7745  VmaAllocator allocator,
    7746  VkBuffer buffer,
    7747  const VmaAllocationCreateInfo* pCreateInfo,
    7748  VmaAllocation* pAllocation,
    7749  VmaAllocationInfo* pAllocationInfo)
    7750 {
    7751  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7752 
    7753  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7754 
    7755  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7756 
    7757  VkMemoryRequirements vkMemReq = {};
    7758  bool requiresDedicatedAllocation = false;
    7759  bool prefersDedicatedAllocation = false;
    7760  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    7761  requiresDedicatedAllocation,
    7762  prefersDedicatedAllocation);
    7763 
    7764  VkResult result = allocator->AllocateMemory(
    7765  vkMemReq,
    7766  requiresDedicatedAllocation,
    7767  prefersDedicatedAllocation,
    7768  buffer, // dedicatedBuffer
    7769  VK_NULL_HANDLE, // dedicatedImage
    7770  *pCreateInfo,
    7771  VMA_SUBALLOCATION_TYPE_BUFFER,
    7772  pAllocation);
    7773 
    7774  if(pAllocationInfo && result == VK_SUCCESS)
    7775  {
    7776  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7777  }
    7778 
    7779  return result;
    7780 }
    7781 
    7782 VkResult vmaAllocateMemoryForImage(
    7783  VmaAllocator allocator,
    7784  VkImage image,
    7785  const VmaAllocationCreateInfo* pCreateInfo,
    7786  VmaAllocation* pAllocation,
    7787  VmaAllocationInfo* pAllocationInfo)
    7788 {
    7789  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7790 
    7791  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7792 
    7793  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7794 
    7795  VkResult result = AllocateMemoryForImage(
    7796  allocator,
    7797  image,
    7798  pCreateInfo,
    7799  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7800  pAllocation);
    7801 
    7802  if(pAllocationInfo && result == VK_SUCCESS)
    7803  {
    7804  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7805  }
    7806 
    7807  return result;
    7808 }
    7809 
    7810 void vmaFreeMemory(
    7811  VmaAllocator allocator,
    7812  VmaAllocation allocation)
    7813 {
    7814  VMA_ASSERT(allocator && allocation);
    7815 
    7816  VMA_DEBUG_LOG("vmaFreeMemory");
    7817 
    7818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7819 
    7820  allocator->FreeMemory(allocation);
    7821 }
    7822 
    7824  VmaAllocator allocator,
    7825  VmaAllocation allocation,
    7826  VmaAllocationInfo* pAllocationInfo)
    7827 {
    7828  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7829 
    7830  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7831 
    7832  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7833 }
    7834 
    7836  VmaAllocator allocator,
    7837  VmaAllocation allocation,
    7838  void* pUserData)
    7839 {
    7840  VMA_ASSERT(allocator && allocation);
    7841 
    7842  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7843 
    7844  allocation->SetUserData(pUserData);
    7845 }
    7846 
    7848  VmaAllocator allocator,
    7849  VmaAllocation* pAllocation)
    7850 {
    7851  VMA_ASSERT(allocator && pAllocation);
    7852 
    7853  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7854 
    7855  allocator->CreateLostAllocation(pAllocation);
    7856 }
    7857 
    7858 VkResult vmaMapMemory(
    7859  VmaAllocator allocator,
    7860  VmaAllocation allocation,
    7861  void** ppData)
    7862 {
    7863  VMA_ASSERT(allocator && allocation && ppData);
    7864 
    7865  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7866 
    7867  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7868  allocator->m_hDevice,
    7869  allocation->GetMemory(),
    7870  allocation->GetOffset(),
    7871  allocation->GetSize(),
    7872  0,
    7873  ppData);
    7874 }
    7875 
    7876 void vmaUnmapMemory(
    7877  VmaAllocator allocator,
    7878  VmaAllocation allocation)
    7879 {
    7880  VMA_ASSERT(allocator && allocation);
    7881 
    7882  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7883 
    7884  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7885 }
    7886 
    7887 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7888 {
    7889  VMA_ASSERT(allocator);
    7890 
    7891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7892 
    7893  allocator->UnmapPersistentlyMappedMemory();
    7894 }
    7895 
    7896 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7897 {
    7898  VMA_ASSERT(allocator);
    7899 
    7900  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7901 
    7902  return allocator->MapPersistentlyMappedMemory();
    7903 }
    7904 
    7905 VkResult vmaDefragment(
    7906  VmaAllocator allocator,
    7907  VmaAllocation* pAllocations,
    7908  size_t allocationCount,
    7909  VkBool32* pAllocationsChanged,
    7910  const VmaDefragmentationInfo *pDefragmentationInfo,
    7911  VmaDefragmentationStats* pDefragmentationStats)
    7912 {
    7913  VMA_ASSERT(allocator && pAllocations);
    7914 
    7915  VMA_DEBUG_LOG("vmaDefragment");
    7916 
    7917  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7918 
    7919  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    7920 }
    7921 
    7922 VkResult vmaCreateBuffer(
    7923  VmaAllocator allocator,
    7924  const VkBufferCreateInfo* pBufferCreateInfo,
    7925  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7926  VkBuffer* pBuffer,
    7927  VmaAllocation* pAllocation,
    7928  VmaAllocationInfo* pAllocationInfo)
    7929 {
    7930  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    7931 
    7932  VMA_DEBUG_LOG("vmaCreateBuffer");
    7933 
    7934  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7935 
    7936  *pBuffer = VK_NULL_HANDLE;
    7937  *pAllocation = VK_NULL_HANDLE;
    7938 
    7939  // 1. Create VkBuffer.
    7940  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    7941  allocator->m_hDevice,
    7942  pBufferCreateInfo,
    7943  allocator->GetAllocationCallbacks(),
    7944  pBuffer);
    7945  if(res >= 0)
    7946  {
    7947  // 2. vkGetBufferMemoryRequirements.
    7948  VkMemoryRequirements vkMemReq = {};
    7949  bool requiresDedicatedAllocation = false;
    7950  bool prefersDedicatedAllocation = false;
    7951  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    7952  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7953 
    7954  // 3. Allocate memory using allocator.
    7955  res = allocator->AllocateMemory(
    7956  vkMemReq,
    7957  requiresDedicatedAllocation,
    7958  prefersDedicatedAllocation,
    7959  *pBuffer, // dedicatedBuffer
    7960  VK_NULL_HANDLE, // dedicatedImage
    7961  *pAllocationCreateInfo,
    7962  VMA_SUBALLOCATION_TYPE_BUFFER,
    7963  pAllocation);
    7964  if(res >= 0)
    7965  {
    7966  // 3. Bind buffer with memory.
    7967  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    7968  allocator->m_hDevice,
    7969  *pBuffer,
    7970  (*pAllocation)->GetMemory(),
    7971  (*pAllocation)->GetOffset());
    7972  if(res >= 0)
    7973  {
    7974  // All steps succeeded.
    7975  if(pAllocationInfo != VMA_NULL)
    7976  {
    7977  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7978  }
    7979  return VK_SUCCESS;
    7980  }
    7981  allocator->FreeMemory(*pAllocation);
    7982  *pAllocation = VK_NULL_HANDLE;
    7983  return res;
    7984  }
    7985  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    7986  *pBuffer = VK_NULL_HANDLE;
    7987  return res;
    7988  }
    7989  return res;
    7990 }
    7991 
    7992 void vmaDestroyBuffer(
    7993  VmaAllocator allocator,
    7994  VkBuffer buffer,
    7995  VmaAllocation allocation)
    7996 {
    7997  if(buffer != VK_NULL_HANDLE)
    7998  {
    7999  VMA_ASSERT(allocator);
    8000 
    8001  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8002 
    8003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8004 
    8005  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8006 
    8007  allocator->FreeMemory(allocation);
    8008  }
    8009 }
    8010 
    8011 VkResult vmaCreateImage(
    8012  VmaAllocator allocator,
    8013  const VkImageCreateInfo* pImageCreateInfo,
    8014  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8015  VkImage* pImage,
    8016  VmaAllocation* pAllocation,
    8017  VmaAllocationInfo* pAllocationInfo)
    8018 {
    8019  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8020 
    8021  VMA_DEBUG_LOG("vmaCreateImage");
    8022 
    8023  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8024 
    8025  *pImage = VK_NULL_HANDLE;
    8026  *pAllocation = VK_NULL_HANDLE;
    8027 
    8028  // 1. Create VkImage.
    8029  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8030  allocator->m_hDevice,
    8031  pImageCreateInfo,
    8032  allocator->GetAllocationCallbacks(),
    8033  pImage);
    8034  if(res >= 0)
    8035  {
    8036  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8037  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8038  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8039 
    8040  // 2. Allocate memory using allocator.
    8041  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8042  if(res >= 0)
    8043  {
    8044  // 3. Bind image with memory.
    8045  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8046  allocator->m_hDevice,
    8047  *pImage,
    8048  (*pAllocation)->GetMemory(),
    8049  (*pAllocation)->GetOffset());
    8050  if(res >= 0)
    8051  {
    8052  // All steps succeeded.
    8053  if(pAllocationInfo != VMA_NULL)
    8054  {
    8055  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8056  }
    8057  return VK_SUCCESS;
    8058  }
    8059  allocator->FreeMemory(*pAllocation);
    8060  *pAllocation = VK_NULL_HANDLE;
    8061  return res;
    8062  }
    8063  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8064  *pImage = VK_NULL_HANDLE;
    8065  return res;
    8066  }
    8067  return res;
    8068 }
    8069 
    8070 void vmaDestroyImage(
    8071  VmaAllocator allocator,
    8072  VkImage image,
    8073  VmaAllocation allocation)
    8074 {
    8075  if(image != VK_NULL_HANDLE)
    8076  {
    8077  VMA_ASSERT(allocator);
    8078 
    8079  VMA_DEBUG_LOG("vmaDestroyImage");
    8080 
    8081  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8082 
    8083  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8084 
    8085  allocator->FreeMemory(allocation);
    8086  }
    8087 }
    8088 
    8089 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:486
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:703
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    475 #include <vulkan/vulkan.h>
    476 
    477 VK_DEFINE_HANDLE(VmaAllocator)
    478 
    479 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    481  VmaAllocator allocator,
    482  uint32_t memoryType,
    483  VkDeviceMemory memory,
    484  VkDeviceSize size);
    486 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    487  VmaAllocator allocator,
    488  uint32_t memoryType,
    489  VkDeviceMemory memory,
    490  VkDeviceSize size);
    491 
    499 typedef struct VmaDeviceMemoryCallbacks {
    505 
    541 
    544 typedef VkFlags VmaAllocatorCreateFlags;
    545 
    550 typedef struct VmaVulkanFunctions {
    551  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    552  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    553  PFN_vkAllocateMemory vkAllocateMemory;
    554  PFN_vkFreeMemory vkFreeMemory;
    555  PFN_vkMapMemory vkMapMemory;
    556  PFN_vkUnmapMemory vkUnmapMemory;
    557  PFN_vkBindBufferMemory vkBindBufferMemory;
    558  PFN_vkBindImageMemory vkBindImageMemory;
    559  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    560  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    561  PFN_vkCreateBuffer vkCreateBuffer;
    562  PFN_vkDestroyBuffer vkDestroyBuffer;
    563  PFN_vkCreateImage vkCreateImage;
    564  PFN_vkDestroyImage vkDestroyImage;
    565  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    566  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    568 
    571 {
    573  VmaAllocatorCreateFlags flags;
    575 
    576  VkPhysicalDevice physicalDevice;
    578 
    579  VkDevice device;
    581 
    584 
    587 
    588  const VkAllocationCallbacks* pAllocationCallbacks;
    590 
    605  uint32_t frameInUseCount;
    623  const VkDeviceSize* pHeapSizeLimit;
    637 
    639 VkResult vmaCreateAllocator(
    640  const VmaAllocatorCreateInfo* pCreateInfo,
    641  VmaAllocator* pAllocator);
    642 
    645  VmaAllocator allocator);
    646 
    652  VmaAllocator allocator,
    653  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    654 
    660  VmaAllocator allocator,
    661  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    662 
    670  VmaAllocator allocator,
    671  uint32_t memoryTypeIndex,
    672  VkMemoryPropertyFlags* pFlags);
    673 
    683  VmaAllocator allocator,
    684  uint32_t frameIndex);
    685 
    688 typedef struct VmaStatInfo
    689 {
    691  uint32_t blockCount;
    693  uint32_t allocationCount;
    697  VkDeviceSize usedBytes;
    699  VkDeviceSize unusedBytes;
    700  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    701  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    702 } VmaStatInfo;
    703 
    705 typedef struct VmaStats
    706 {
    707  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    708  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    710 } VmaStats;
    711 
    713 void vmaCalculateStats(
    714  VmaAllocator allocator,
    715  VmaStats* pStats);
    716 
    717 #define VMA_STATS_STRING_ENABLED 1
    718 
    719 #if VMA_STATS_STRING_ENABLED
    720 
    722 
    725  VmaAllocator allocator,
    726  char** ppStatsString,
    727  VkBool32 detailedMap);
    728 
    729 void vmaFreeStatsString(
    730  VmaAllocator allocator,
    731  char* pStatsString);
    732 
    733 #endif // #if VMA_STATS_STRING_ENABLED
    734 
    735 VK_DEFINE_HANDLE(VmaPool)
    736 
    737 typedef enum VmaMemoryUsage
    738 {
    744 
    747 
    750 
    754 
    769 
    814 
    817 typedef VkFlags VmaAllocationCreateFlags;
    818 
    820 {
    822  VmaAllocationCreateFlags flags;
    833  VkMemoryPropertyFlags requiredFlags;
    839  VkMemoryPropertyFlags preferredFlags;
    841  void* pUserData;
    846  VmaPool pool;
    848 
    863 VkResult vmaFindMemoryTypeIndex(
    864  VmaAllocator allocator,
    865  uint32_t memoryTypeBits,
    866  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    867  uint32_t* pMemoryTypeIndex);
    868 
    870 typedef enum VmaPoolCreateFlagBits {
    898 
    901 typedef VkFlags VmaPoolCreateFlags;
    902 
    905 typedef struct VmaPoolCreateInfo {
    908  uint32_t memoryTypeIndex;
    911  VmaPoolCreateFlags flags;
    916  VkDeviceSize blockSize;
    943  uint32_t frameInUseCount;
    945 
    948 typedef struct VmaPoolStats {
    951  VkDeviceSize size;
    954  VkDeviceSize unusedSize;
    967  VkDeviceSize unusedRangeSizeMax;
    968 } VmaPoolStats;
    969 
    976 VkResult vmaCreatePool(
    977  VmaAllocator allocator,
    978  const VmaPoolCreateInfo* pCreateInfo,
    979  VmaPool* pPool);
    980 
    983 void vmaDestroyPool(
    984  VmaAllocator allocator,
    985  VmaPool pool);
    986 
    993 void vmaGetPoolStats(
    994  VmaAllocator allocator,
    995  VmaPool pool,
    996  VmaPoolStats* pPoolStats);
    997 
    1005  VmaAllocator allocator,
    1006  VmaPool pool,
    1007  size_t* pLostAllocationCount);
    1008 
    1009 VK_DEFINE_HANDLE(VmaAllocation)
    1010 
    1011 
    1013 typedef struct VmaAllocationInfo {
    1018  uint32_t memoryType;
    1027  VkDeviceMemory deviceMemory;
    1032  VkDeviceSize offset;
    1037  VkDeviceSize size;
    1048  void* pUserData;
    1050 
    1061 VkResult vmaAllocateMemory(
    1062  VmaAllocator allocator,
    1063  const VkMemoryRequirements* pVkMemoryRequirements,
    1064  const VmaAllocationCreateInfo* pCreateInfo,
    1065  VmaAllocation* pAllocation,
    1066  VmaAllocationInfo* pAllocationInfo);
    1067 
    1075  VmaAllocator allocator,
    1076  VkBuffer buffer,
    1077  const VmaAllocationCreateInfo* pCreateInfo,
    1078  VmaAllocation* pAllocation,
    1079  VmaAllocationInfo* pAllocationInfo);
    1080 
    1082 VkResult vmaAllocateMemoryForImage(
    1083  VmaAllocator allocator,
    1084  VkImage image,
    1085  const VmaAllocationCreateInfo* pCreateInfo,
    1086  VmaAllocation* pAllocation,
    1087  VmaAllocationInfo* pAllocationInfo);
    1088 
    1090 void vmaFreeMemory(
    1091  VmaAllocator allocator,
    1092  VmaAllocation allocation);
    1093 
    1096  VmaAllocator allocator,
    1097  VmaAllocation allocation,
    1098  VmaAllocationInfo* pAllocationInfo);
    1099 
    1102  VmaAllocator allocator,
    1103  VmaAllocation allocation,
    1104  void* pUserData);
    1105 
    1117  VmaAllocator allocator,
    1118  VmaAllocation* pAllocation);
    1119 
    1128 VkResult vmaMapMemory(
    1129  VmaAllocator allocator,
    1130  VmaAllocation allocation,
    1131  void** ppData);
    1132 
    1133 void vmaUnmapMemory(
    1134  VmaAllocator allocator,
    1135  VmaAllocation allocation);
    1136 
    1158 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
    1159 
    1167 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
    1168 
    1170 typedef struct VmaDefragmentationInfo {
    1175  VkDeviceSize maxBytesToMove;
    1182 
    1184 typedef struct VmaDefragmentationStats {
    1186  VkDeviceSize bytesMoved;
    1188  VkDeviceSize bytesFreed;
    1194 
    1265 VkResult vmaDefragment(
    1266  VmaAllocator allocator,
    1267  VmaAllocation* pAllocations,
    1268  size_t allocationCount,
    1269  VkBool32* pAllocationsChanged,
    1270  const VmaDefragmentationInfo *pDefragmentationInfo,
    1271  VmaDefragmentationStats* pDefragmentationStats);
    1272 
    1291 VkResult vmaCreateBuffer(
    1292  VmaAllocator allocator,
    1293  const VkBufferCreateInfo* pBufferCreateInfo,
    1294  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1295  VkBuffer* pBuffer,
    1296  VmaAllocation* pAllocation,
    1297  VmaAllocationInfo* pAllocationInfo);
    1298 
    1307 void vmaDestroyBuffer(
    1308  VmaAllocator allocator,
    1309  VkBuffer buffer,
    1310  VmaAllocation allocation);
    1311 
    1313 VkResult vmaCreateImage(
    1314  VmaAllocator allocator,
    1315  const VkImageCreateInfo* pImageCreateInfo,
    1316  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1317  VkImage* pImage,
    1318  VmaAllocation* pAllocation,
    1319  VmaAllocationInfo* pAllocationInfo);
    1320 
    1329 void vmaDestroyImage(
    1330  VmaAllocator allocator,
    1331  VkImage image,
    1332  VmaAllocation allocation);
    1333 
    1334 #ifdef __cplusplus
    1335 }
    1336 #endif
    1337 
    1338 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1339 
    1340 // For Visual Studio IntelliSense.
    1341 #ifdef __INTELLISENSE__
    1342 #define VMA_IMPLEMENTATION
    1343 #endif
    1344 
    1345 #ifdef VMA_IMPLEMENTATION
    1346 #undef VMA_IMPLEMENTATION
    1347 
    1348 #include <cstdint>
    1349 #include <cstdlib>
    1350 #include <cstring>
    1351 
    1352 /*******************************************************************************
    1353 CONFIGURATION SECTION
    1354 
    1355 Define some of these macros before each #include of this header or change them
    1356 here if you need other then default behavior depending on your environment.
    1357 */
    1358 
    1359 /*
    1360 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1361 internally, like:
    1362 
    1363  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1364 
    1365 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1366 VmaAllocatorCreateInfo::pVulkanFunctions.
    1367 */
    1368 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
    1369 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1370 #endif
    1371 
    1372 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1373 //#define VMA_USE_STL_CONTAINERS 1
    1374 
    1375 /* Set this macro to 1 to make the library including and using STL containers:
    1376 std::pair, std::vector, std::list, std::unordered_map.
    1377 
    1378 Set it to 0 or undefined to make the library using its own implementation of
    1379 the containers.
    1380 */
    1381 #if VMA_USE_STL_CONTAINERS
    1382  #define VMA_USE_STL_VECTOR 1
    1383  #define VMA_USE_STL_UNORDERED_MAP 1
    1384  #define VMA_USE_STL_LIST 1
    1385 #endif
    1386 
    1387 #if VMA_USE_STL_VECTOR
    1388  #include <vector>
    1389 #endif
    1390 
    1391 #if VMA_USE_STL_UNORDERED_MAP
    1392  #include <unordered_map>
    1393 #endif
    1394 
    1395 #if VMA_USE_STL_LIST
    1396  #include <list>
    1397 #endif
    1398 
    1399 /*
    1400 Following headers are used in this CONFIGURATION section only, so feel free to
    1401 remove them if not needed.
    1402 */
    1403 #include <cassert> // for assert
    1404 #include <algorithm> // for min, max
    1405 #include <mutex> // for std::mutex
    1406 #include <atomic> // for std::atomic
    1407 
    1408 #if !defined(_WIN32)
    1409  #include <malloc.h> // for aligned_alloc()
    1410 #endif
    1411 
    1412 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1413 #ifndef VMA_ASSERT
    1414  #ifdef _DEBUG
    1415  #define VMA_ASSERT(expr) assert(expr)
    1416  #else
    1417  #define VMA_ASSERT(expr)
    1418  #endif
    1419 #endif
    1420 
    1421 // Assert that will be called very often, like inside data structures e.g. operator[].
    1422 // Making it non-empty can make program slow.
    1423 #ifndef VMA_HEAVY_ASSERT
    1424  #ifdef _DEBUG
    1425  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1426  #else
    1427  #define VMA_HEAVY_ASSERT(expr)
    1428  #endif
    1429 #endif
    1430 
    1431 #ifndef VMA_NULL
    1432  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1433  #define VMA_NULL nullptr
    1434 #endif
    1435 
    1436 #ifndef VMA_ALIGN_OF
    1437  #define VMA_ALIGN_OF(type) (__alignof(type))
    1438 #endif
    1439 
    1440 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1441  #if defined(_WIN32)
    1442  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1443  #else
    1444  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1445  #endif
    1446 #endif
    1447 
    1448 #ifndef VMA_SYSTEM_FREE
    1449  #if defined(_WIN32)
    1450  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1451  #else
    1452  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1453  #endif
    1454 #endif
    1455 
    1456 #ifndef VMA_MIN
    1457  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1458 #endif
    1459 
    1460 #ifndef VMA_MAX
    1461  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1462 #endif
    1463 
    1464 #ifndef VMA_SWAP
    1465  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1466 #endif
    1467 
    1468 #ifndef VMA_SORT
    1469  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1470 #endif
    1471 
    1472 #ifndef VMA_DEBUG_LOG
    1473  #define VMA_DEBUG_LOG(format, ...)
    1474  /*
    1475  #define VMA_DEBUG_LOG(format, ...) do { \
    1476  printf(format, __VA_ARGS__); \
    1477  printf("\n"); \
    1478  } while(false)
    1479  */
    1480 #endif
    1481 
    1482 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1483 #if VMA_STATS_STRING_ENABLED
    1484  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1485  {
    1486  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1487  }
    1488  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1489  {
    1490  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1491  }
    1492  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1493  {
    1494  snprintf(outStr, strLen, "%p", ptr);
    1495  }
    1496 #endif
    1497 
    1498 #ifndef VMA_MUTEX
    1499  class VmaMutex
    1500  {
    1501  public:
    1502  VmaMutex() { }
    1503  ~VmaMutex() { }
    1504  void Lock() { m_Mutex.lock(); }
    1505  void Unlock() { m_Mutex.unlock(); }
    1506  private:
    1507  std::mutex m_Mutex;
    1508  };
    1509  #define VMA_MUTEX VmaMutex
    1510 #endif
    1511 
    1512 /*
    1513 If providing your own implementation, you need to implement a subset of std::atomic:
    1514 
    1515 - Constructor(uint32_t desired)
    1516 - uint32_t load() const
    1517 - void store(uint32_t desired)
    1518 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    1519 */
    1520 #ifndef VMA_ATOMIC_UINT32
    1521  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    1522 #endif
    1523 
    1524 #ifndef VMA_BEST_FIT
    1525 
    1537  #define VMA_BEST_FIT (1)
    1538 #endif
    1539 
    1540 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    1541 
    1545  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    1546 #endif
    1547 
    1548 #ifndef VMA_DEBUG_ALIGNMENT
    1549 
    1553  #define VMA_DEBUG_ALIGNMENT (1)
    1554 #endif
    1555 
    1556 #ifndef VMA_DEBUG_MARGIN
    1557 
    1561  #define VMA_DEBUG_MARGIN (0)
    1562 #endif
    1563 
    1564 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    1565 
    1569  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    1570 #endif
    1571 
    1572 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    1573 
    1577  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    1578 #endif
    1579 
    1580 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    1581  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
    1583 #endif
    1584 
    1585 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    1586  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
    1588 #endif
    1589 
    1590 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
    1591  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
    1593 #endif
    1594 
    1595 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    1596 
    1597 /*******************************************************************************
    1598 END OF CONFIGURATION
    1599 */
    1600 
    1601 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    1602  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    1603 
    1604 // Returns number of bits set to 1 in (v).
    1605 static inline uint32_t CountBitsSet(uint32_t v)
    1606 {
    1607  uint32_t c = v - ((v >> 1) & 0x55555555);
    1608  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    1609  c = ((c >> 4) + c) & 0x0F0F0F0F;
    1610  c = ((c >> 8) + c) & 0x00FF00FF;
    1611  c = ((c >> 16) + c) & 0x0000FFFF;
    1612  return c;
    1613 }
    1614 
    1615 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    1616 // Use types like uint32_t, uint64_t as T.
    1617 template <typename T>
    1618 static inline T VmaAlignUp(T val, T align)
    1619 {
    1620  return (val + align - 1) / align * align;
    1621 }
    1622 
    1623 // Division with mathematical rounding to nearest number.
    1624 template <typename T>
    1625 inline T VmaRoundDiv(T x, T y)
    1626 {
    1627  return (x + (y / (T)2)) / y;
    1628 }
    1629 
    1630 #ifndef VMA_SORT
    1631 
    1632 template<typename Iterator, typename Compare>
    1633 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    1634 {
    1635  Iterator centerValue = end; --centerValue;
    1636  Iterator insertIndex = beg;
    1637  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    1638  {
    1639  if(cmp(*memTypeIndex, *centerValue))
    1640  {
    1641  if(insertIndex != memTypeIndex)
    1642  {
    1643  VMA_SWAP(*memTypeIndex, *insertIndex);
    1644  }
    1645  ++insertIndex;
    1646  }
    1647  }
    1648  if(insertIndex != centerValue)
    1649  {
    1650  VMA_SWAP(*insertIndex, *centerValue);
    1651  }
    1652  return insertIndex;
    1653 }
    1654 
    1655 template<typename Iterator, typename Compare>
    1656 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    1657 {
    1658  if(beg < end)
    1659  {
    1660  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    1661  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    1662  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    1663  }
    1664 }
    1665 
    1666 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    1667 
    1668 #endif // #ifndef VMA_SORT
    1669 
    1670 /*
    1671 Returns true if two memory blocks occupy overlapping pages.
    1672 ResourceA must be in less memory offset than ResourceB.
    1673 
    1674 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    1675 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    1676 */
    1677 static inline bool VmaBlocksOnSamePage(
    1678  VkDeviceSize resourceAOffset,
    1679  VkDeviceSize resourceASize,
    1680  VkDeviceSize resourceBOffset,
    1681  VkDeviceSize pageSize)
    1682 {
    1683  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    1684  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    1685  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    1686  VkDeviceSize resourceBStart = resourceBOffset;
    1687  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    1688  return resourceAEndPage == resourceBStartPage;
    1689 }
    1690 
    1691 enum VmaSuballocationType
    1692 {
    1693  VMA_SUBALLOCATION_TYPE_FREE = 0,
    1694  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    1695  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    1696  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    1697  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    1698  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    1699  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    1700 };
    1701 
    1702 /*
    1703 Returns true if given suballocation types could conflict and must respect
    1704 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    1705 or linear image and another one is optimal image. If type is unknown, behave
    1706 conservatively.
    1707 */
    1708 static inline bool VmaIsBufferImageGranularityConflict(
    1709  VmaSuballocationType suballocType1,
    1710  VmaSuballocationType suballocType2)
    1711 {
    1712  if(suballocType1 > suballocType2)
    1713  {
    1714  VMA_SWAP(suballocType1, suballocType2);
    1715  }
    1716 
    1717  switch(suballocType1)
    1718  {
    1719  case VMA_SUBALLOCATION_TYPE_FREE:
    1720  return false;
    1721  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    1722  return true;
    1723  case VMA_SUBALLOCATION_TYPE_BUFFER:
    1724  return
    1725  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1726  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1727  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    1728  return
    1729  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    1730  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    1731  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1732  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    1733  return
    1734  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    1735  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    1736  return false;
    1737  default:
    1738  VMA_ASSERT(0);
    1739  return true;
    1740  }
    1741 }
    1742 
    1743 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    1744 struct VmaMutexLock
    1745 {
    1746 public:
    1747  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    1748  m_pMutex(useMutex ? &mutex : VMA_NULL)
    1749  {
    1750  if(m_pMutex)
    1751  {
    1752  m_pMutex->Lock();
    1753  }
    1754  }
    1755 
    1756  ~VmaMutexLock()
    1757  {
    1758  if(m_pMutex)
    1759  {
    1760  m_pMutex->Unlock();
    1761  }
    1762  }
    1763 
    1764 private:
    1765  VMA_MUTEX* m_pMutex;
    1766 };
    1767 
    1768 #if VMA_DEBUG_GLOBAL_MUTEX
    1769  static VMA_MUTEX gDebugGlobalMutex;
    1770  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    1771 #else
    1772  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    1773 #endif
    1774 
    1775 // Minimum size of a free suballocation to register it in the free suballocation collection.
    1776 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    1777 
    1778 /*
    1779 Performs binary search and returns iterator to first element that is greater or
    1780 equal to (key), according to comparison (cmp).
    1781 
    1782 Cmp should return true if first argument is less than second argument.
    1783 
    1784 Returned value is the found element, if present in the collection or place where
    1785 new element with value (key) should be inserted.
    1786 */
    1787 template <typename IterT, typename KeyT, typename CmpT>
    1788 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    1789 {
    1790  size_t down = 0, up = (end - beg);
    1791  while(down < up)
    1792  {
    1793  const size_t mid = (down + up) / 2;
    1794  if(cmp(*(beg+mid), key))
    1795  {
    1796  down = mid + 1;
    1797  }
    1798  else
    1799  {
    1800  up = mid;
    1801  }
    1802  }
    1803  return beg + down;
    1804 }
    1805 
    1807 // Memory allocation
    1808 
    1809 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    1810 {
    1811  if((pAllocationCallbacks != VMA_NULL) &&
    1812  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    1813  {
    1814  return (*pAllocationCallbacks->pfnAllocation)(
    1815  pAllocationCallbacks->pUserData,
    1816  size,
    1817  alignment,
    1818  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    1819  }
    1820  else
    1821  {
    1822  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    1823  }
    1824 }
    1825 
    1826 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    1827 {
    1828  if((pAllocationCallbacks != VMA_NULL) &&
    1829  (pAllocationCallbacks->pfnFree != VMA_NULL))
    1830  {
    1831  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    1832  }
    1833  else
    1834  {
    1835  VMA_SYSTEM_FREE(ptr);
    1836  }
    1837 }
    1838 
    1839 template<typename T>
    1840 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    1841 {
    1842  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    1843 }
    1844 
    1845 template<typename T>
    1846 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    1847 {
    1848  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    1849 }
    1850 
    1851 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    1852 
    1853 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    1854 
    1855 template<typename T>
    1856 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    1857 {
    1858  ptr->~T();
    1859  VmaFree(pAllocationCallbacks, ptr);
    1860 }
    1861 
    1862 template<typename T>
    1863 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    1864 {
    1865  if(ptr != VMA_NULL)
    1866  {
    1867  for(size_t i = count; i--; )
    1868  {
    1869  ptr[i].~T();
    1870  }
    1871  VmaFree(pAllocationCallbacks, ptr);
    1872  }
    1873 }
    1874 
    1875 // STL-compatible allocator.
    1876 template<typename T>
    1877 class VmaStlAllocator
    1878 {
    1879 public:
    1880  const VkAllocationCallbacks* const m_pCallbacks;
    1881  typedef T value_type;
    1882 
    1883  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    1884  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    1885 
    1886  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    1887  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    1888 
    1889  template<typename U>
    1890  bool operator==(const VmaStlAllocator<U>& rhs) const
    1891  {
    1892  return m_pCallbacks == rhs.m_pCallbacks;
    1893  }
    1894  template<typename U>
    1895  bool operator!=(const VmaStlAllocator<U>& rhs) const
    1896  {
    1897  return m_pCallbacks != rhs.m_pCallbacks;
    1898  }
    1899 
    1900  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    1901 };
    1902 
    1903 #if VMA_USE_STL_VECTOR
    1904 
    1905 #define VmaVector std::vector
    1906 
    1907 template<typename T, typename allocatorT>
    1908 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    1909 {
    1910  vec.insert(vec.begin() + index, item);
    1911 }
    1912 
    1913 template<typename T, typename allocatorT>
    1914 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    1915 {
    1916  vec.erase(vec.begin() + index);
    1917 }
    1918 
    1919 #else // #if VMA_USE_STL_VECTOR
    1920 
    1921 /* Class with interface compatible with subset of std::vector.
    1922 T must be POD because constructors and destructors are not called and memcpy is
    1923 used for these objects. */
    1924 template<typename T, typename AllocatorT>
    1925 class VmaVector
    1926 {
    1927 public:
    1928  typedef T value_type;
    1929 
    1930  VmaVector(const AllocatorT& allocator) :
    1931  m_Allocator(allocator),
    1932  m_pArray(VMA_NULL),
    1933  m_Count(0),
    1934  m_Capacity(0)
    1935  {
    1936  }
    1937 
    1938  VmaVector(size_t count, const AllocatorT& allocator) :
    1939  m_Allocator(allocator),
    1940  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    1941  m_Count(count),
    1942  m_Capacity(count)
    1943  {
    1944  }
    1945 
    1946  VmaVector(const VmaVector<T, AllocatorT>& src) :
    1947  m_Allocator(src.m_Allocator),
    1948  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    1949  m_Count(src.m_Count),
    1950  m_Capacity(src.m_Count)
    1951  {
    1952  if(m_Count != 0)
    1953  {
    1954  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    1955  }
    1956  }
    1957 
    1958  ~VmaVector()
    1959  {
    1960  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    1961  }
    1962 
    1963  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    1964  {
    1965  if(&rhs != this)
    1966  {
    1967  resize(rhs.m_Count);
    1968  if(m_Count != 0)
    1969  {
    1970  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    1971  }
    1972  }
    1973  return *this;
    1974  }
    1975 
    1976  bool empty() const { return m_Count == 0; }
    1977  size_t size() const { return m_Count; }
    1978  T* data() { return m_pArray; }
    1979  const T* data() const { return m_pArray; }
    1980 
    1981  T& operator[](size_t index)
    1982  {
    1983  VMA_HEAVY_ASSERT(index < m_Count);
    1984  return m_pArray[index];
    1985  }
    1986  const T& operator[](size_t index) const
    1987  {
    1988  VMA_HEAVY_ASSERT(index < m_Count);
    1989  return m_pArray[index];
    1990  }
    1991 
    1992  T& front()
    1993  {
    1994  VMA_HEAVY_ASSERT(m_Count > 0);
    1995  return m_pArray[0];
    1996  }
    1997  const T& front() const
    1998  {
    1999  VMA_HEAVY_ASSERT(m_Count > 0);
    2000  return m_pArray[0];
    2001  }
    2002  T& back()
    2003  {
    2004  VMA_HEAVY_ASSERT(m_Count > 0);
    2005  return m_pArray[m_Count - 1];
    2006  }
    2007  const T& back() const
    2008  {
    2009  VMA_HEAVY_ASSERT(m_Count > 0);
    2010  return m_pArray[m_Count - 1];
    2011  }
    2012 
    2013  void reserve(size_t newCapacity, bool freeMemory = false)
    2014  {
    2015  newCapacity = VMA_MAX(newCapacity, m_Count);
    2016 
    2017  if((newCapacity < m_Capacity) && !freeMemory)
    2018  {
    2019  newCapacity = m_Capacity;
    2020  }
    2021 
    2022  if(newCapacity != m_Capacity)
    2023  {
    2024  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2025  if(m_Count != 0)
    2026  {
    2027  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2028  }
    2029  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2030  m_Capacity = newCapacity;
    2031  m_pArray = newArray;
    2032  }
    2033  }
    2034 
    2035  void resize(size_t newCount, bool freeMemory = false)
    2036  {
    2037  size_t newCapacity = m_Capacity;
    2038  if(newCount > m_Capacity)
    2039  {
    2040  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2041  }
    2042  else if(freeMemory)
    2043  {
    2044  newCapacity = newCount;
    2045  }
    2046 
    2047  if(newCapacity != m_Capacity)
    2048  {
    2049  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2050  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2051  if(elementsToCopy != 0)
    2052  {
    2053  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2054  }
    2055  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2056  m_Capacity = newCapacity;
    2057  m_pArray = newArray;
    2058  }
    2059 
    2060  m_Count = newCount;
    2061  }
    2062 
    2063  void clear(bool freeMemory = false)
    2064  {
    2065  resize(0, freeMemory);
    2066  }
    2067 
    2068  void insert(size_t index, const T& src)
    2069  {
    2070  VMA_HEAVY_ASSERT(index <= m_Count);
    2071  const size_t oldCount = size();
    2072  resize(oldCount + 1);
    2073  if(index < oldCount)
    2074  {
    2075  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2076  }
    2077  m_pArray[index] = src;
    2078  }
    2079 
    2080  void remove(size_t index)
    2081  {
    2082  VMA_HEAVY_ASSERT(index < m_Count);
    2083  const size_t oldCount = size();
    2084  if(index < oldCount - 1)
    2085  {
    2086  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2087  }
    2088  resize(oldCount - 1);
    2089  }
    2090 
    2091  void push_back(const T& src)
    2092  {
    2093  const size_t newIndex = size();
    2094  resize(newIndex + 1);
    2095  m_pArray[newIndex] = src;
    2096  }
    2097 
    2098  void pop_back()
    2099  {
    2100  VMA_HEAVY_ASSERT(m_Count > 0);
    2101  resize(size() - 1);
    2102  }
    2103 
    2104  void push_front(const T& src)
    2105  {
    2106  insert(0, src);
    2107  }
    2108 
    2109  void pop_front()
    2110  {
    2111  VMA_HEAVY_ASSERT(m_Count > 0);
    2112  remove(0);
    2113  }
    2114 
    2115  typedef T* iterator;
    2116 
    2117  iterator begin() { return m_pArray; }
    2118  iterator end() { return m_pArray + m_Count; }
    2119 
    2120 private:
    2121  AllocatorT m_Allocator;
    2122  T* m_pArray;
    2123  size_t m_Count;
    2124  size_t m_Capacity;
    2125 };
    2126 
    2127 template<typename T, typename allocatorT>
    2128 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2129 {
    2130  vec.insert(index, item);
    2131 }
    2132 
    2133 template<typename T, typename allocatorT>
    2134 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2135 {
    2136  vec.remove(index);
    2137 }
    2138 
    2139 #endif // #if VMA_USE_STL_VECTOR
    2140 
    2141 template<typename CmpLess, typename VectorT>
    2142 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2143 {
    2144  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2145  vector.data(),
    2146  vector.data() + vector.size(),
    2147  value,
    2148  CmpLess()) - vector.data();
    2149  VmaVectorInsert(vector, indexToInsert, value);
    2150  return indexToInsert;
    2151 }
    2152 
    2153 template<typename CmpLess, typename VectorT>
    2154 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2155 {
    2156  CmpLess comparator;
    2157  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2158  vector.begin(),
    2159  vector.end(),
    2160  value,
    2161  comparator);
    2162  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2163  {
    2164  size_t indexToRemove = it - vector.begin();
    2165  VmaVectorRemove(vector, indexToRemove);
    2166  return true;
    2167  }
    2168  return false;
    2169 }
    2170 
    2171 template<typename CmpLess, typename VectorT>
    2172 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2173 {
    2174  CmpLess comparator;
    2175  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2176  vector.data(),
    2177  vector.data() + vector.size(),
    2178  value,
    2179  comparator);
    2180  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2181  {
    2182  return it - vector.begin();
    2183  }
    2184  else
    2185  {
    2186  return vector.size();
    2187  }
    2188 }
    2189 
    2191 // class VmaPoolAllocator
    2192 
    2193 /*
    2194 Allocator for objects of type T using a list of arrays (pools) to speed up
    2195 allocation. Number of elements that can be allocated is not bounded because
    2196 allocator can create multiple blocks.
    2197 */
    2198 template<typename T>
    2199 class VmaPoolAllocator
    2200 {
    2201 public:
    2202  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2203  ~VmaPoolAllocator();
    2204  void Clear();
    2205  T* Alloc();
    2206  void Free(T* ptr);
    2207 
    2208 private:
    2209  union Item
    2210  {
    2211  uint32_t NextFreeIndex;
    2212  T Value;
    2213  };
    2214 
    2215  struct ItemBlock
    2216  {
    2217  Item* pItems;
    2218  uint32_t FirstFreeIndex;
    2219  };
    2220 
    2221  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2222  size_t m_ItemsPerBlock;
    2223  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2224 
    2225  ItemBlock& CreateNewBlock();
    2226 };
    2227 
    2228 template<typename T>
    2229 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2230  m_pAllocationCallbacks(pAllocationCallbacks),
    2231  m_ItemsPerBlock(itemsPerBlock),
    2232  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2233 {
    2234  VMA_ASSERT(itemsPerBlock > 0);
    2235 }
    2236 
    2237 template<typename T>
    2238 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2239 {
    2240  Clear();
    2241 }
    2242 
    2243 template<typename T>
    2244 void VmaPoolAllocator<T>::Clear()
    2245 {
    2246  for(size_t i = m_ItemBlocks.size(); i--; )
    2247  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2248  m_ItemBlocks.clear();
    2249 }
    2250 
    2251 template<typename T>
    2252 T* VmaPoolAllocator<T>::Alloc()
    2253 {
    2254  for(size_t i = m_ItemBlocks.size(); i--; )
    2255  {
    2256  ItemBlock& block = m_ItemBlocks[i];
    2257  // This block has some free items: Use first one.
    2258  if(block.FirstFreeIndex != UINT32_MAX)
    2259  {
    2260  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2261  block.FirstFreeIndex = pItem->NextFreeIndex;
    2262  return &pItem->Value;
    2263  }
    2264  }
    2265 
    2266  // No block has free item: Create new one and use it.
    2267  ItemBlock& newBlock = CreateNewBlock();
    2268  Item* const pItem = &newBlock.pItems[0];
    2269  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2270  return &pItem->Value;
    2271 }
    2272 
    2273 template<typename T>
    2274 void VmaPoolAllocator<T>::Free(T* ptr)
    2275 {
    2276  // Search all memory blocks to find ptr.
    2277  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2278  {
    2279  ItemBlock& block = m_ItemBlocks[i];
    2280 
    2281  // Casting to union.
    2282  Item* pItemPtr;
    2283  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2284 
    2285  // Check if pItemPtr is in address range of this block.
    2286  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2287  {
    2288  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2289  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2290  block.FirstFreeIndex = index;
    2291  return;
    2292  }
    2293  }
    2294  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2295 }
    2296 
    2297 template<typename T>
    2298 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2299 {
    2300  ItemBlock newBlock = {
    2301  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2302 
    2303  m_ItemBlocks.push_back(newBlock);
    2304 
    2305  // Setup singly-linked list of all free items in this block.
    2306  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2307  newBlock.pItems[i].NextFreeIndex = i + 1;
    2308  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2309  return m_ItemBlocks.back();
    2310 }
    2311 
    2313 // class VmaRawList, VmaList
    2314 
    2315 #if VMA_USE_STL_LIST
    2316 
    2317 #define VmaList std::list
    2318 
    2319 #else // #if VMA_USE_STL_LIST
    2320 
    2321 template<typename T>
    2322 struct VmaListItem
    2323 {
    2324  VmaListItem* pPrev;
    2325  VmaListItem* pNext;
    2326  T Value;
    2327 };
    2328 
    2329 // Doubly linked list.
    2330 template<typename T>
    2331 class VmaRawList
    2332 {
    2333 public:
    2334  typedef VmaListItem<T> ItemType;
    2335 
    2336  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2337  ~VmaRawList();
    2338  void Clear();
    2339 
    2340  size_t GetCount() const { return m_Count; }
    2341  bool IsEmpty() const { return m_Count == 0; }
    2342 
    2343  ItemType* Front() { return m_pFront; }
    2344  const ItemType* Front() const { return m_pFront; }
    2345  ItemType* Back() { return m_pBack; }
    2346  const ItemType* Back() const { return m_pBack; }
    2347 
    2348  ItemType* PushBack();
    2349  ItemType* PushFront();
    2350  ItemType* PushBack(const T& value);
    2351  ItemType* PushFront(const T& value);
    2352  void PopBack();
    2353  void PopFront();
    2354 
    2355  // Item can be null - it means PushBack.
    2356  ItemType* InsertBefore(ItemType* pItem);
    2357  // Item can be null - it means PushFront.
    2358  ItemType* InsertAfter(ItemType* pItem);
    2359 
    2360  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2361  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2362 
    2363  void Remove(ItemType* pItem);
    2364 
    2365 private:
    2366  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2367  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2368  ItemType* m_pFront;
    2369  ItemType* m_pBack;
    2370  size_t m_Count;
    2371 
    2372  // Declared not defined, to block copy constructor and assignment operator.
    2373  VmaRawList(const VmaRawList<T>& src);
    2374  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2375 };
    2376 
    2377 template<typename T>
    2378 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2379  m_pAllocationCallbacks(pAllocationCallbacks),
    2380  m_ItemAllocator(pAllocationCallbacks, 128),
    2381  m_pFront(VMA_NULL),
    2382  m_pBack(VMA_NULL),
    2383  m_Count(0)
    2384 {
    2385 }
    2386 
    2387 template<typename T>
    2388 VmaRawList<T>::~VmaRawList()
    2389 {
    2390  // Intentionally not calling Clear, because that would be unnecessary
    2391  // computations to return all items to m_ItemAllocator as free.
    2392 }
    2393 
    2394 template<typename T>
    2395 void VmaRawList<T>::Clear()
    2396 {
    2397  if(IsEmpty() == false)
    2398  {
    2399  ItemType* pItem = m_pBack;
    2400  while(pItem != VMA_NULL)
    2401  {
    2402  ItemType* const pPrevItem = pItem->pPrev;
    2403  m_ItemAllocator.Free(pItem);
    2404  pItem = pPrevItem;
    2405  }
    2406  m_pFront = VMA_NULL;
    2407  m_pBack = VMA_NULL;
    2408  m_Count = 0;
    2409  }
    2410 }
    2411 
    2412 template<typename T>
    2413 VmaListItem<T>* VmaRawList<T>::PushBack()
    2414 {
    2415  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2416  pNewItem->pNext = VMA_NULL;
    2417  if(IsEmpty())
    2418  {
    2419  pNewItem->pPrev = VMA_NULL;
    2420  m_pFront = pNewItem;
    2421  m_pBack = pNewItem;
    2422  m_Count = 1;
    2423  }
    2424  else
    2425  {
    2426  pNewItem->pPrev = m_pBack;
    2427  m_pBack->pNext = pNewItem;
    2428  m_pBack = pNewItem;
    2429  ++m_Count;
    2430  }
    2431  return pNewItem;
    2432 }
    2433 
    2434 template<typename T>
    2435 VmaListItem<T>* VmaRawList<T>::PushFront()
    2436 {
    2437  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2438  pNewItem->pPrev = VMA_NULL;
    2439  if(IsEmpty())
    2440  {
    2441  pNewItem->pNext = VMA_NULL;
    2442  m_pFront = pNewItem;
    2443  m_pBack = pNewItem;
    2444  m_Count = 1;
    2445  }
    2446  else
    2447  {
    2448  pNewItem->pNext = m_pFront;
    2449  m_pFront->pPrev = pNewItem;
    2450  m_pFront = pNewItem;
    2451  ++m_Count;
    2452  }
    2453  return pNewItem;
    2454 }
    2455 
    2456 template<typename T>
    2457 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2458 {
    2459  ItemType* const pNewItem = PushBack();
    2460  pNewItem->Value = value;
    2461  return pNewItem;
    2462 }
    2463 
    2464 template<typename T>
    2465 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2466 {
    2467  ItemType* const pNewItem = PushFront();
    2468  pNewItem->Value = value;
    2469  return pNewItem;
    2470 }
    2471 
    2472 template<typename T>
    2473 void VmaRawList<T>::PopBack()
    2474 {
    2475  VMA_HEAVY_ASSERT(m_Count > 0);
    2476  ItemType* const pBackItem = m_pBack;
    2477  ItemType* const pPrevItem = pBackItem->pPrev;
    2478  if(pPrevItem != VMA_NULL)
    2479  {
    2480  pPrevItem->pNext = VMA_NULL;
    2481  }
    2482  m_pBack = pPrevItem;
    2483  m_ItemAllocator.Free(pBackItem);
    2484  --m_Count;
    2485 }
    2486 
    2487 template<typename T>
    2488 void VmaRawList<T>::PopFront()
    2489 {
    2490  VMA_HEAVY_ASSERT(m_Count > 0);
    2491  ItemType* const pFrontItem = m_pFront;
    2492  ItemType* const pNextItem = pFrontItem->pNext;
    2493  if(pNextItem != VMA_NULL)
    2494  {
    2495  pNextItem->pPrev = VMA_NULL;
    2496  }
    2497  m_pFront = pNextItem;
    2498  m_ItemAllocator.Free(pFrontItem);
    2499  --m_Count;
    2500 }
    2501 
    2502 template<typename T>
    2503 void VmaRawList<T>::Remove(ItemType* pItem)
    2504 {
    2505  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2506  VMA_HEAVY_ASSERT(m_Count > 0);
    2507 
    2508  if(pItem->pPrev != VMA_NULL)
    2509  {
    2510  pItem->pPrev->pNext = pItem->pNext;
    2511  }
    2512  else
    2513  {
    2514  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2515  m_pFront = pItem->pNext;
    2516  }
    2517 
    2518  if(pItem->pNext != VMA_NULL)
    2519  {
    2520  pItem->pNext->pPrev = pItem->pPrev;
    2521  }
    2522  else
    2523  {
    2524  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2525  m_pBack = pItem->pPrev;
    2526  }
    2527 
    2528  m_ItemAllocator.Free(pItem);
    2529  --m_Count;
    2530 }
    2531 
    2532 template<typename T>
    2533 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    2534 {
    2535  if(pItem != VMA_NULL)
    2536  {
    2537  ItemType* const prevItem = pItem->pPrev;
    2538  ItemType* const newItem = m_ItemAllocator.Alloc();
    2539  newItem->pPrev = prevItem;
    2540  newItem->pNext = pItem;
    2541  pItem->pPrev = newItem;
    2542  if(prevItem != VMA_NULL)
    2543  {
    2544  prevItem->pNext = newItem;
    2545  }
    2546  else
    2547  {
    2548  VMA_HEAVY_ASSERT(m_pFront == pItem);
    2549  m_pFront = newItem;
    2550  }
    2551  ++m_Count;
    2552  return newItem;
    2553  }
    2554  else
    2555  return PushBack();
    2556 }
    2557 
    2558 template<typename T>
    2559 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    2560 {
    2561  if(pItem != VMA_NULL)
    2562  {
    2563  ItemType* const nextItem = pItem->pNext;
    2564  ItemType* const newItem = m_ItemAllocator.Alloc();
    2565  newItem->pNext = nextItem;
    2566  newItem->pPrev = pItem;
    2567  pItem->pNext = newItem;
    2568  if(nextItem != VMA_NULL)
    2569  {
    2570  nextItem->pPrev = newItem;
    2571  }
    2572  else
    2573  {
    2574  VMA_HEAVY_ASSERT(m_pBack == pItem);
    2575  m_pBack = newItem;
    2576  }
    2577  ++m_Count;
    2578  return newItem;
    2579  }
    2580  else
    2581  return PushFront();
    2582 }
    2583 
    2584 template<typename T>
    2585 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    2586 {
    2587  ItemType* const newItem = InsertBefore(pItem);
    2588  newItem->Value = value;
    2589  return newItem;
    2590 }
    2591 
    2592 template<typename T>
    2593 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    2594 {
    2595  ItemType* const newItem = InsertAfter(pItem);
    2596  newItem->Value = value;
    2597  return newItem;
    2598 }
    2599 
    2600 template<typename T, typename AllocatorT>
    2601 class VmaList
    2602 {
    2603 public:
    2604  class iterator
    2605  {
    2606  public:
    2607  iterator() :
    2608  m_pList(VMA_NULL),
    2609  m_pItem(VMA_NULL)
    2610  {
    2611  }
    2612 
    2613  T& operator*() const
    2614  {
    2615  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2616  return m_pItem->Value;
    2617  }
    2618  T* operator->() const
    2619  {
    2620  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2621  return &m_pItem->Value;
    2622  }
    2623 
    2624  iterator& operator++()
    2625  {
    2626  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2627  m_pItem = m_pItem->pNext;
    2628  return *this;
    2629  }
    2630  iterator& operator--()
    2631  {
    2632  if(m_pItem != VMA_NULL)
    2633  {
    2634  m_pItem = m_pItem->pPrev;
    2635  }
    2636  else
    2637  {
    2638  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
    2639  m_pItem = m_pList->Back();
    2640  }
    2641  return *this;
    2642  }
    2643 
    2644  iterator operator++(int)
    2645  {
    2646  iterator result = *this;
    2647  ++*this;
    2648  return result;
    2649  }
    2650  iterator operator--(int)
    2651  {
    2652  iterator result = *this;
    2653  --*this;
    2654  return result;
    2655  }
    2656 
    2657  bool operator==(const iterator& rhs) const
    2658  {
    2659  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2660  return m_pItem == rhs.m_pItem;
    2661  }
    2662  bool operator!=(const iterator& rhs) const
    2663  {
    2664  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2665  return m_pItem != rhs.m_pItem;
    2666  }
    2667 
    2668  private:
    2669  VmaRawList<T>* m_pList;
    2670  VmaListItem<T>* m_pItem;
    2671 
    2672  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    2673  m_pList(pList),
    2674  m_pItem(pItem)
    2675  {
    2676  }
    2677 
    2678  friend class VmaList<T, AllocatorT>;
    2679  };
    2680 
    2681  class const_iterator
    2682  {
    2683  public:
    2684  const_iterator() :
    2685  m_pList(VMA_NULL),
    2686  m_pItem(VMA_NULL)
    2687  {
    2688  }
    2689 
    2690  const_iterator(const iterator& src) :
    2691  m_pList(src.m_pList),
    2692  m_pItem(src.m_pItem)
    2693  {
    2694  }
    2695 
    2696  const T& operator*() const
    2697  {
    2698  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2699  return m_pItem->Value;
    2700  }
    2701  const T* operator->() const
    2702  {
    2703  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2704  return &m_pItem->Value;
    2705  }
    2706 
    2707  const_iterator& operator++()
    2708  {
    2709  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    2710  m_pItem = m_pItem->pNext;
    2711  return *this;
    2712  }
    2713  const_iterator& operator--()
    2714  {
    2715  if(m_pItem != VMA_NULL)
    2716  {
    2717  m_pItem = m_pItem->pPrev;
    2718  }
    2719  else
    2720  {
    2721  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    2722  m_pItem = m_pList->Back();
    2723  }
    2724  return *this;
    2725  }
    2726 
    2727  const_iterator operator++(int)
    2728  {
    2729  const_iterator result = *this;
    2730  ++*this;
    2731  return result;
    2732  }
    2733  const_iterator operator--(int)
    2734  {
    2735  const_iterator result = *this;
    2736  --*this;
    2737  return result;
    2738  }
    2739 
    2740  bool operator==(const const_iterator& rhs) const
    2741  {
    2742  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2743  return m_pItem == rhs.m_pItem;
    2744  }
    2745  bool operator!=(const const_iterator& rhs) const
    2746  {
    2747  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    2748  return m_pItem != rhs.m_pItem;
    2749  }
    2750 
    2751  private:
    2752  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    2753  m_pList(pList),
    2754  m_pItem(pItem)
    2755  {
    2756  }
    2757 
    2758  const VmaRawList<T>* m_pList;
    2759  const VmaListItem<T>* m_pItem;
    2760 
    2761  friend class VmaList<T, AllocatorT>;
    2762  };
    2763 
    2764  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    2765 
    2766  bool empty() const { return m_RawList.IsEmpty(); }
    2767  size_t size() const { return m_RawList.GetCount(); }
    2768 
    2769  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    2770  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    2771 
    2772  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    2773  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    2774 
    2775  void clear() { m_RawList.Clear(); }
    2776  void push_back(const T& value) { m_RawList.PushBack(value); }
    2777  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    2778  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    2779 
    2780 private:
    2781  VmaRawList<T> m_RawList;
    2782 };
    2783 
    2784 #endif // #if VMA_USE_STL_LIST
    2785 
    2787 // class VmaMap
    2788 
    2789 // Unused in this version.
    2790 #if 0
    2791 
    2792 #if VMA_USE_STL_UNORDERED_MAP
    2793 
    2794 #define VmaPair std::pair
    2795 
    2796 #define VMA_MAP_TYPE(KeyT, ValueT) \
    2797  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    2798 
    2799 #else // #if VMA_USE_STL_UNORDERED_MAP
    2800 
    2801 template<typename T1, typename T2>
    2802 struct VmaPair
    2803 {
    2804  T1 first;
    2805  T2 second;
    2806 
    2807  VmaPair() : first(), second() { }
    2808  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    2809 };
    2810 
    2811 /* Class compatible with subset of interface of std::unordered_map.
    2812 KeyT, ValueT must be POD because they will be stored in VmaVector.
    2813 */
    2814 template<typename KeyT, typename ValueT>
    2815 class VmaMap
    2816 {
    2817 public:
    2818  typedef VmaPair<KeyT, ValueT> PairType;
    2819  typedef PairType* iterator;
    2820 
    2821  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    2822 
    2823  iterator begin() { return m_Vector.begin(); }
    2824  iterator end() { return m_Vector.end(); }
    2825 
    2826  void insert(const PairType& pair);
    2827  iterator find(const KeyT& key);
    2828  void erase(iterator it);
    2829 
    2830 private:
    2831  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    2832 };
    2833 
    2834 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    2835 
    2836 template<typename FirstT, typename SecondT>
    2837 struct VmaPairFirstLess
    2838 {
    2839  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    2840  {
    2841  return lhs.first < rhs.first;
    2842  }
    2843  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    2844  {
    2845  return lhs.first < rhsFirst;
    2846  }
    2847 };
    2848 
    2849 template<typename KeyT, typename ValueT>
    2850 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    2851 {
    2852  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2853  m_Vector.data(),
    2854  m_Vector.data() + m_Vector.size(),
    2855  pair,
    2856  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    2857  VmaVectorInsert(m_Vector, indexToInsert, pair);
    2858 }
    2859 
    2860 template<typename KeyT, typename ValueT>
    2861 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    2862 {
    2863  PairType* it = VmaBinaryFindFirstNotLess(
    2864  m_Vector.data(),
    2865  m_Vector.data() + m_Vector.size(),
    2866  key,
    2867  VmaPairFirstLess<KeyT, ValueT>());
    2868  if((it != m_Vector.end()) && (it->first == key))
    2869  {
    2870  return it;
    2871  }
    2872  else
    2873  {
    2874  return m_Vector.end();
    2875  }
    2876 }
    2877 
    2878 template<typename KeyT, typename ValueT>
    2879 void VmaMap<KeyT, ValueT>::erase(iterator it)
    2880 {
    2881  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    2882 }
    2883 
    2884 #endif // #if VMA_USE_STL_UNORDERED_MAP
    2885 
    2886 #endif // #if 0
    2887 
    2889 
    2890 class VmaDeviceMemoryBlock;
    2891 
    2892 enum VMA_BLOCK_VECTOR_TYPE
    2893 {
    2894  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    2895  VMA_BLOCK_VECTOR_TYPE_MAPPED,
    2896  VMA_BLOCK_VECTOR_TYPE_COUNT
    2897 };
    2898 
    2899 static VMA_BLOCK_VECTOR_TYPE VmaAllocationCreateFlagsToBlockVectorType(VmaAllocationCreateFlags flags)
    2900 {
    2901  return (flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    2902  VMA_BLOCK_VECTOR_TYPE_MAPPED :
    2903  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
    2904 }
    2905 
    2906 struct VmaAllocation_T
    2907 {
    2908 public:
    2909  enum ALLOCATION_TYPE
    2910  {
    2911  ALLOCATION_TYPE_NONE,
    2912  ALLOCATION_TYPE_BLOCK,
    2913  ALLOCATION_TYPE_DEDICATED,
    2914  };
    2915 
    2916  VmaAllocation_T(uint32_t currentFrameIndex) :
    2917  m_Alignment(1),
    2918  m_Size(0),
    2919  m_pUserData(VMA_NULL),
    2920  m_Type(ALLOCATION_TYPE_NONE),
    2921  m_SuballocationType(VMA_SUBALLOCATION_TYPE_UNKNOWN),
    2922  m_LastUseFrameIndex(currentFrameIndex)
    2923  {
    2924  }
    2925 
    2926  void InitBlockAllocation(
    2927  VmaPool hPool,
    2928  VmaDeviceMemoryBlock* block,
    2929  VkDeviceSize offset,
    2930  VkDeviceSize alignment,
    2931  VkDeviceSize size,
    2932  VmaSuballocationType suballocationType,
    2933  void* pUserData,
    2934  bool canBecomeLost)
    2935  {
    2936  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2937  VMA_ASSERT(block != VMA_NULL);
    2938  m_Type = ALLOCATION_TYPE_BLOCK;
    2939  m_Alignment = alignment;
    2940  m_Size = size;
    2941  m_pUserData = pUserData;
    2942  m_SuballocationType = suballocationType;
    2943  m_BlockAllocation.m_hPool = hPool;
    2944  m_BlockAllocation.m_Block = block;
    2945  m_BlockAllocation.m_Offset = offset;
    2946  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    2947  }
    2948 
    2949  void InitLost()
    2950  {
    2951  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2952  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    2953  m_Type = ALLOCATION_TYPE_BLOCK;
    2954  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    2955  m_BlockAllocation.m_Block = VMA_NULL;
    2956  m_BlockAllocation.m_Offset = 0;
    2957  m_BlockAllocation.m_CanBecomeLost = true;
    2958  }
    2959 
    2960  void ChangeBlockAllocation(
    2961  VmaDeviceMemoryBlock* block,
    2962  VkDeviceSize offset)
    2963  {
    2964  VMA_ASSERT(block != VMA_NULL);
    2965  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    2966  m_BlockAllocation.m_Block = block;
    2967  m_BlockAllocation.m_Offset = offset;
    2968  }
    2969 
    2970  void InitDedicatedAllocation(
    2971  uint32_t memoryTypeIndex,
    2972  VkDeviceMemory hMemory,
    2973  VmaSuballocationType suballocationType,
    2974  bool persistentMap,
    2975  void* pMappedData,
    2976  VkDeviceSize size,
    2977  void* pUserData)
    2978  {
    2979  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    2980  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    2981  m_Type = ALLOCATION_TYPE_DEDICATED;
    2982  m_Alignment = 0;
    2983  m_Size = size;
    2984  m_pUserData = pUserData;
    2985  m_SuballocationType = suballocationType;
    2986  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    2987  m_DedicatedAllocation.m_hMemory = hMemory;
    2988  m_DedicatedAllocation.m_PersistentMap = persistentMap;
    2989  m_DedicatedAllocation.m_pMappedData = pMappedData;
    2990  }
    2991 
    2992  ALLOCATION_TYPE GetType() const { return m_Type; }
    2993  VkDeviceSize GetAlignment() const { return m_Alignment; }
    2994  VkDeviceSize GetSize() const { return m_Size; }
    2995  void* GetUserData() const { return m_pUserData; }
    2996  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
    2997  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
    2998 
    2999  VmaDeviceMemoryBlock* GetBlock() const
    3000  {
    3001  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3002  return m_BlockAllocation.m_Block;
    3003  }
    3004  VkDeviceSize GetOffset() const;
    3005  VkDeviceMemory GetMemory() const;
    3006  uint32_t GetMemoryTypeIndex() const;
    3007  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
    3008  void* GetMappedData() const;
    3009  bool CanBecomeLost() const;
    3010  VmaPool GetPool() const;
    3011 
    3012  VkResult DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3013  void DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator);
    3014 
    3015  uint32_t GetLastUseFrameIndex() const
    3016  {
    3017  return m_LastUseFrameIndex.load();
    3018  }
    3019  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3020  {
    3021  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3022  }
    3023  /*
    3024  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3025  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3026  - Else, returns false.
    3027 
    3028  If hAllocation is already lost, assert - you should not call it then.
    3029  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3030  */
    3031  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3032 
    3033  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3034  {
    3035  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3036  outInfo.blockCount = 1;
    3037  outInfo.allocationCount = 1;
    3038  outInfo.unusedRangeCount = 0;
    3039  outInfo.usedBytes = m_Size;
    3040  outInfo.unusedBytes = 0;
    3041  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3042  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3043  outInfo.unusedRangeSizeMax = 0;
    3044  }
    3045 
    3046 private:
    3047  VkDeviceSize m_Alignment;
    3048  VkDeviceSize m_Size;
    3049  void* m_pUserData;
    3050  ALLOCATION_TYPE m_Type;
    3051  VmaSuballocationType m_SuballocationType;
    3052  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3053 
    3054  // Allocation out of VmaDeviceMemoryBlock.
    3055  struct BlockAllocation
    3056  {
    3057  VmaPool m_hPool; // Null if belongs to general memory.
    3058  VmaDeviceMemoryBlock* m_Block;
    3059  VkDeviceSize m_Offset;
    3060  bool m_CanBecomeLost;
    3061  };
    3062 
    3063  // Allocation for an object that has its own private VkDeviceMemory.
    3064  struct DedicatedAllocation
    3065  {
    3066  uint32_t m_MemoryTypeIndex;
    3067  VkDeviceMemory m_hMemory;
    3068  bool m_PersistentMap;
    3069  void* m_pMappedData;
    3070  };
    3071 
    3072  union
    3073  {
    3074  // Allocation out of VmaDeviceMemoryBlock.
    3075  BlockAllocation m_BlockAllocation;
    3076  // Allocation for an object that has its own private VkDeviceMemory.
    3077  DedicatedAllocation m_DedicatedAllocation;
    3078  };
    3079 };
    3080 
    3081 /*
    3082 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3083 allocated memory block or free.
    3084 */
    3085 struct VmaSuballocation
    3086 {
    3087  VkDeviceSize offset;
    3088  VkDeviceSize size;
    3089  VmaAllocation hAllocation;
    3090  VmaSuballocationType type;
    3091 };
    3092 
    3093 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3094 
    3095 // Cost of one additional allocation lost, as equivalent in bytes.
    3096 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3097 
    3098 /*
    3099 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3100 
    3101 If canMakeOtherLost was false:
    3102 - item points to a FREE suballocation.
    3103 - itemsToMakeLostCount is 0.
    3104 
    3105 If canMakeOtherLost was true:
    3106 - item points to first of sequence of suballocations, which are either FREE,
    3107  or point to VmaAllocations that can become lost.
    3108 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3109  the requested allocation to succeed.
    3110 */
    3111 struct VmaAllocationRequest
    3112 {
    3113  VkDeviceSize offset;
    3114  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3115  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3116  VmaSuballocationList::iterator item;
    3117  size_t itemsToMakeLostCount;
    3118 
    3119  VkDeviceSize CalcCost() const
    3120  {
    3121  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3122  }
    3123 };
    3124 
    3125 /*
    3126 Data structure used for bookkeeping of allocations and unused ranges of memory
    3127 in a single VkDeviceMemory block.
    3128 */
    3129 class VmaBlockMetadata
    3130 {
    3131 public:
    3132  VmaBlockMetadata(VmaAllocator hAllocator);
    3133  ~VmaBlockMetadata();
    3134  void Init(VkDeviceSize size);
    3135 
    3136  // Validates all data structures inside this object. If not valid, returns false.
    3137  bool Validate() const;
    3138  VkDeviceSize GetSize() const { return m_Size; }
    3139  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3140  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3141  VkDeviceSize GetUnusedRangeSizeMax() const;
    3142  // Returns true if this block is empty - contains only single free suballocation.
    3143  bool IsEmpty() const;
    3144 
    3145  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3146  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3147 
    3148 #if VMA_STATS_STRING_ENABLED
    3149  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3150 #endif
    3151 
    3152  // Creates trivial request for case when block is empty.
    3153  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3154 
    3155  // Tries to find a place for suballocation with given parameters inside this block.
    3156  // If succeeded, fills pAllocationRequest and returns true.
    3157  // If failed, returns false.
    3158  bool CreateAllocationRequest(
    3159  uint32_t currentFrameIndex,
    3160  uint32_t frameInUseCount,
    3161  VkDeviceSize bufferImageGranularity,
    3162  VkDeviceSize allocSize,
    3163  VkDeviceSize allocAlignment,
    3164  VmaSuballocationType allocType,
    3165  bool canMakeOtherLost,
    3166  VmaAllocationRequest* pAllocationRequest);
    3167 
    3168  bool MakeRequestedAllocationsLost(
    3169  uint32_t currentFrameIndex,
    3170  uint32_t frameInUseCount,
    3171  VmaAllocationRequest* pAllocationRequest);
    3172 
    3173  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3174 
    3175  // Makes actual allocation based on request. Request must already be checked and valid.
    3176  void Alloc(
    3177  const VmaAllocationRequest& request,
    3178  VmaSuballocationType type,
    3179  VkDeviceSize allocSize,
    3180  VmaAllocation hAllocation);
    3181 
    3182  // Frees suballocation assigned to given memory region.
    3183  void Free(const VmaAllocation allocation);
    3184 
    3185 private:
    3186  VkDeviceSize m_Size;
    3187  uint32_t m_FreeCount;
    3188  VkDeviceSize m_SumFreeSize;
    3189  VmaSuballocationList m_Suballocations;
    3190  // Suballocations that are free and have size greater than certain threshold.
    3191  // Sorted by size, ascending.
    3192  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3193 
    3194  bool ValidateFreeSuballocationList() const;
    3195 
    3196  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3197  // If yes, fills pOffset and returns true. If no, returns false.
    3198  bool CheckAllocation(
    3199  uint32_t currentFrameIndex,
    3200  uint32_t frameInUseCount,
    3201  VkDeviceSize bufferImageGranularity,
    3202  VkDeviceSize allocSize,
    3203  VkDeviceSize allocAlignment,
    3204  VmaSuballocationType allocType,
    3205  VmaSuballocationList::const_iterator suballocItem,
    3206  bool canMakeOtherLost,
    3207  VkDeviceSize* pOffset,
    3208  size_t* itemsToMakeLostCount,
    3209  VkDeviceSize* pSumFreeSize,
    3210  VkDeviceSize* pSumItemSize) const;
    3211  // Given free suballocation, it merges it with following one, which must also be free.
    3212  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3213  // Releases given suballocation, making it free.
    3214  // Merges it with adjacent free suballocations if applicable.
    3215  // Returns iterator to new free suballocation at this place.
    3216  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3217  // Given free suballocation, it inserts it into sorted list of
    3218  // m_FreeSuballocationsBySize if it's suitable.
    3219  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3220  // Given free suballocation, it removes it from sorted list of
    3221  // m_FreeSuballocationsBySize if it's suitable.
    3222  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3223 };
    3224 
    3225 /*
    3226 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3227 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3228 
    3229 Thread-safety: This class must be externally synchronized.
    3230 */
    3231 class VmaDeviceMemoryBlock
    3232 {
    3233 public:
    3234  uint32_t m_MemoryTypeIndex;
    3235  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3236  VkDeviceMemory m_hMemory;
    3237  bool m_PersistentMap;
    3238  void* m_pMappedData;
    3239  VmaBlockMetadata m_Metadata;
    3240 
    3241  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3242 
    3243  ~VmaDeviceMemoryBlock()
    3244  {
    3245  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3246  }
    3247 
    3248  // Always call after construction.
    3249  void Init(
    3250  uint32_t newMemoryTypeIndex,
    3251  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    3252  VkDeviceMemory newMemory,
    3253  VkDeviceSize newSize,
    3254  bool persistentMap,
    3255  void* pMappedData);
    3256  // Always call before destruction.
    3257  void Destroy(VmaAllocator allocator);
    3258 
    3259  // Validates all data structures inside this object. If not valid, returns false.
    3260  bool Validate() const;
    3261 };
    3262 
    3263 struct VmaPointerLess
    3264 {
    3265  bool operator()(const void* lhs, const void* rhs) const
    3266  {
    3267  return lhs < rhs;
    3268  }
    3269 };
    3270 
    3271 class VmaDefragmentator;
    3272 
    3273 /*
    3274 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3275 Vulkan memory type.
    3276 
    3277 Synchronized internally with a mutex.
    3278 */
    3279 struct VmaBlockVector
    3280 {
    3281  VmaBlockVector(
    3282  VmaAllocator hAllocator,
    3283  uint32_t memoryTypeIndex,
    3284  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    3285  VkDeviceSize preferredBlockSize,
    3286  size_t minBlockCount,
    3287  size_t maxBlockCount,
    3288  VkDeviceSize bufferImageGranularity,
    3289  uint32_t frameInUseCount,
    3290  bool isCustomPool);
    3291  ~VmaBlockVector();
    3292 
    3293  VkResult CreateMinBlocks();
    3294 
    3295  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3296  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3297  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3298  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3299  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const { return m_BlockVectorType; }
    3300 
    3301  void GetPoolStats(VmaPoolStats* pStats);
    3302 
    3303  bool IsEmpty() const { return m_Blocks.empty(); }
    3304 
    3305  VkResult Allocate(
    3306  VmaPool hCurrentPool,
    3307  uint32_t currentFrameIndex,
    3308  const VkMemoryRequirements& vkMemReq,
    3309  const VmaAllocationCreateInfo& createInfo,
    3310  VmaSuballocationType suballocType,
    3311  VmaAllocation* pAllocation);
    3312 
    3313  void Free(
    3314  VmaAllocation hAllocation);
    3315 
    3316  // Adds statistics of this BlockVector to pStats.
    3317  void AddStats(VmaStats* pStats);
    3318 
    3319 #if VMA_STATS_STRING_ENABLED
    3320  void PrintDetailedMap(class VmaJsonWriter& json);
    3321 #endif
    3322 
    3323  void UnmapPersistentlyMappedMemory();
    3324  VkResult MapPersistentlyMappedMemory();
    3325 
    3326  void MakePoolAllocationsLost(
    3327  uint32_t currentFrameIndex,
    3328  size_t* pLostAllocationCount);
    3329 
    3330  VmaDefragmentator* EnsureDefragmentator(
    3331  VmaAllocator hAllocator,
    3332  uint32_t currentFrameIndex);
    3333 
    3334  VkResult Defragment(
    3335  VmaDefragmentationStats* pDefragmentationStats,
    3336  VkDeviceSize& maxBytesToMove,
    3337  uint32_t& maxAllocationsToMove);
    3338 
    3339  void DestroyDefragmentator();
    3340 
    3341 private:
    3342  friend class VmaDefragmentator;
    3343 
    3344  const VmaAllocator m_hAllocator;
    3345  const uint32_t m_MemoryTypeIndex;
    3346  const VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3347  const VkDeviceSize m_PreferredBlockSize;
    3348  const size_t m_MinBlockCount;
    3349  const size_t m_MaxBlockCount;
    3350  const VkDeviceSize m_BufferImageGranularity;
    3351  const uint32_t m_FrameInUseCount;
    3352  const bool m_IsCustomPool;
    3353  VMA_MUTEX m_Mutex;
    3354  // Incrementally sorted by sumFreeSize, ascending.
    3355  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3356  /* There can be at most one allocation that is completely empty - a
    3357  hysteresis to avoid pessimistic case of alternating creation and destruction
    3358  of a VkDeviceMemory. */
    3359  bool m_HasEmptyBlock;
    3360  VmaDefragmentator* m_pDefragmentator;
    3361 
    3362  // Finds and removes given block from vector.
    3363  void Remove(VmaDeviceMemoryBlock* pBlock);
    3364 
    3365  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3366  // after this call.
    3367  void IncrementallySortBlocks();
    3368 
    3369  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3370 };
    3371 
    3372 struct VmaPool_T
    3373 {
    3374 public:
    3375  VmaBlockVector m_BlockVector;
    3376 
    3377  // Takes ownership.
    3378  VmaPool_T(
    3379  VmaAllocator hAllocator,
    3380  const VmaPoolCreateInfo& createInfo);
    3381  ~VmaPool_T();
    3382 
    3383  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3384 
    3385 #if VMA_STATS_STRING_ENABLED
    3386  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3387 #endif
    3388 };
    3389 
    3390 class VmaDefragmentator
    3391 {
    3392  const VmaAllocator m_hAllocator;
    3393  VmaBlockVector* const m_pBlockVector;
    3394  uint32_t m_CurrentFrameIndex;
    3395  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
    3396  VkDeviceSize m_BytesMoved;
    3397  uint32_t m_AllocationsMoved;
    3398 
    3399  struct AllocationInfo
    3400  {
    3401  VmaAllocation m_hAllocation;
    3402  VkBool32* m_pChanged;
    3403 
    3404  AllocationInfo() :
    3405  m_hAllocation(VK_NULL_HANDLE),
    3406  m_pChanged(VMA_NULL)
    3407  {
    3408  }
    3409  };
    3410 
    3411  struct AllocationInfoSizeGreater
    3412  {
    3413  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3414  {
    3415  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3416  }
    3417  };
    3418 
    3419  // Used between AddAllocation and Defragment.
    3420  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3421 
    3422  struct BlockInfo
    3423  {
    3424  VmaDeviceMemoryBlock* m_pBlock;
    3425  bool m_HasNonMovableAllocations;
    3426  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3427 
    3428  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3429  m_pBlock(VMA_NULL),
    3430  m_HasNonMovableAllocations(true),
    3431  m_Allocations(pAllocationCallbacks),
    3432  m_pMappedDataForDefragmentation(VMA_NULL)
    3433  {
    3434  }
    3435 
    3436  void CalcHasNonMovableAllocations()
    3437  {
    3438  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3439  const size_t defragmentAllocCount = m_Allocations.size();
    3440  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3441  }
    3442 
    3443  void SortAllocationsBySizeDescecnding()
    3444  {
    3445  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3446  }
    3447 
    3448  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3449  void Unmap(VmaAllocator hAllocator);
    3450 
    3451  private:
    3452  // Not null if mapped for defragmentation only, not persistently mapped.
    3453  void* m_pMappedDataForDefragmentation;
    3454  };
    3455 
    3456  struct BlockPointerLess
    3457  {
    3458  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3459  {
    3460  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3461  }
    3462  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3463  {
    3464  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3465  }
    3466  };
    3467 
    3468  // 1. Blocks with some non-movable allocations go first.
    3469  // 2. Blocks with smaller sumFreeSize go first.
    3470  struct BlockInfoCompareMoveDestination
    3471  {
    3472  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3473  {
    3474  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3475  {
    3476  return true;
    3477  }
    3478  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3479  {
    3480  return false;
    3481  }
    3482  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3483  {
    3484  return true;
    3485  }
    3486  return false;
    3487  }
    3488  };
    3489 
    3490  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3491  BlockInfoVector m_Blocks;
    3492 
    3493  VkResult DefragmentRound(
    3494  VkDeviceSize maxBytesToMove,
    3495  uint32_t maxAllocationsToMove);
    3496 
    3497  static bool MoveMakesSense(
    3498  size_t dstBlockIndex, VkDeviceSize dstOffset,
    3499  size_t srcBlockIndex, VkDeviceSize srcOffset);
    3500 
    3501 public:
    3502  VmaDefragmentator(
    3503  VmaAllocator hAllocator,
    3504  VmaBlockVector* pBlockVector,
    3505  uint32_t currentFrameIndex);
    3506 
    3507  ~VmaDefragmentator();
    3508 
    3509  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    3510  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    3511 
    3512  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    3513 
    3514  VkResult Defragment(
    3515  VkDeviceSize maxBytesToMove,
    3516  uint32_t maxAllocationsToMove);
    3517 };
    3518 
    3519 // Main allocator object.
    3520 struct VmaAllocator_T
    3521 {
    3522  bool m_UseMutex;
    3523  bool m_UseKhrDedicatedAllocation;
    3524  VkDevice m_hDevice;
    3525  bool m_AllocationCallbacksSpecified;
    3526  VkAllocationCallbacks m_AllocationCallbacks;
    3527  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    3528  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
    3529  // Counter to allow nested calls to these functions.
    3530  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
    3531 
    3532  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    3533  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    3534  VMA_MUTEX m_HeapSizeLimitMutex;
    3535 
    3536  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    3537  VkPhysicalDeviceMemoryProperties m_MemProps;
    3538 
    3539  // Default pools.
    3540  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3541 
    3542  // Each vector is sorted by memory (handle value).
    3543  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    3544  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
    3545  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    3546 
    3547  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    3548  ~VmaAllocator_T();
    3549 
    3550  const VkAllocationCallbacks* GetAllocationCallbacks() const
    3551  {
    3552  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    3553  }
    3554  const VmaVulkanFunctions& GetVulkanFunctions() const
    3555  {
    3556  return m_VulkanFunctions;
    3557  }
    3558 
    3559  VkDeviceSize GetBufferImageGranularity() const
    3560  {
    3561  return VMA_MAX(
    3562  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    3563  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    3564  }
    3565 
    3566  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    3567  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    3568 
    3569  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    3570  {
    3571  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    3572  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    3573  }
    3574 
    3575  void GetBufferMemoryRequirements(
    3576  VkBuffer hBuffer,
    3577  VkMemoryRequirements& memReq,
    3578  bool& requiresDedicatedAllocation,
    3579  bool& prefersDedicatedAllocation) const;
    3580  void GetImageMemoryRequirements(
    3581  VkImage hImage,
    3582  VkMemoryRequirements& memReq,
    3583  bool& requiresDedicatedAllocation,
    3584  bool& prefersDedicatedAllocation) const;
    3585 
    3586  // Main allocation function.
    3587  VkResult AllocateMemory(
    3588  const VkMemoryRequirements& vkMemReq,
    3589  bool requiresDedicatedAllocation,
    3590  bool prefersDedicatedAllocation,
    3591  VkBuffer dedicatedBuffer,
    3592  VkImage dedicatedImage,
    3593  const VmaAllocationCreateInfo& createInfo,
    3594  VmaSuballocationType suballocType,
    3595  VmaAllocation* pAllocation);
    3596 
    3597  // Main deallocation function.
    3598  void FreeMemory(const VmaAllocation allocation);
    3599 
    3600  void CalculateStats(VmaStats* pStats);
    3601 
    3602 #if VMA_STATS_STRING_ENABLED
    3603  void PrintDetailedMap(class VmaJsonWriter& json);
    3604 #endif
    3605 
    3606  void UnmapPersistentlyMappedMemory();
    3607  VkResult MapPersistentlyMappedMemory();
    3608 
    3609  VkResult Defragment(
    3610  VmaAllocation* pAllocations,
    3611  size_t allocationCount,
    3612  VkBool32* pAllocationsChanged,
    3613  const VmaDefragmentationInfo* pDefragmentationInfo,
    3614  VmaDefragmentationStats* pDefragmentationStats);
    3615 
    3616  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    3617 
    3618  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    3619  void DestroyPool(VmaPool pool);
    3620  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    3621 
    3622  void SetCurrentFrameIndex(uint32_t frameIndex);
    3623 
    3624  void MakePoolAllocationsLost(
    3625  VmaPool hPool,
    3626  size_t* pLostAllocationCount);
    3627 
    3628  void CreateLostAllocation(VmaAllocation* pAllocation);
    3629 
    3630  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    3631  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    3632 
    3633 private:
    3634  VkDeviceSize m_PreferredLargeHeapBlockSize;
    3635  VkDeviceSize m_PreferredSmallHeapBlockSize;
    3636 
    3637  VkPhysicalDevice m_PhysicalDevice;
    3638  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    3639 
    3640  VMA_MUTEX m_PoolsMutex;
    3641  // Protected by m_PoolsMutex. Sorted by pointer value.
    3642  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    3643 
    3644  VmaVulkanFunctions m_VulkanFunctions;
    3645 
    3646  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    3647 
    3648  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    3649 
    3650  VkResult AllocateMemoryOfType(
    3651  const VkMemoryRequirements& vkMemReq,
    3652  bool dedicatedAllocation,
    3653  VkBuffer dedicatedBuffer,
    3654  VkImage dedicatedImage,
    3655  const VmaAllocationCreateInfo& createInfo,
    3656  uint32_t memTypeIndex,
    3657  VmaSuballocationType suballocType,
    3658  VmaAllocation* pAllocation);
    3659 
    3660  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    3661  VkResult AllocateDedicatedMemory(
    3662  VkDeviceSize size,
    3663  VmaSuballocationType suballocType,
    3664  uint32_t memTypeIndex,
    3665  bool map,
    3666  void* pUserData,
    3667  VkBuffer dedicatedBuffer,
    3668  VkImage dedicatedImage,
    3669  VmaAllocation* pAllocation);
    3670 
    3671  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    3672  void FreeDedicatedMemory(VmaAllocation allocation);
    3673 };
    3674 
    3676 // Memory allocation #2 after VmaAllocator_T definition
    3677 
    3678 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    3679 {
    3680  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    3681 }
    3682 
    3683 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    3684 {
    3685  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    3686 }
    3687 
    3688 template<typename T>
    3689 static T* VmaAllocate(VmaAllocator hAllocator)
    3690 {
    3691  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    3692 }
    3693 
    3694 template<typename T>
    3695 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    3696 {
    3697  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    3698 }
    3699 
    3700 template<typename T>
    3701 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    3702 {
    3703  if(ptr != VMA_NULL)
    3704  {
    3705  ptr->~T();
    3706  VmaFree(hAllocator, ptr);
    3707  }
    3708 }
    3709 
    3710 template<typename T>
    3711 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    3712 {
    3713  if(ptr != VMA_NULL)
    3714  {
    3715  for(size_t i = count; i--; )
    3716  ptr[i].~T();
    3717  VmaFree(hAllocator, ptr);
    3718  }
    3719 }
    3720 
    3722 // VmaStringBuilder
    3723 
    3724 #if VMA_STATS_STRING_ENABLED
    3725 
    3726 class VmaStringBuilder
    3727 {
    3728 public:
    3729  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    3730  size_t GetLength() const { return m_Data.size(); }
    3731  const char* GetData() const { return m_Data.data(); }
    3732 
    3733  void Add(char ch) { m_Data.push_back(ch); }
    3734  void Add(const char* pStr);
    3735  void AddNewLine() { Add('\n'); }
    3736  void AddNumber(uint32_t num);
    3737  void AddNumber(uint64_t num);
    3738  void AddPointer(const void* ptr);
    3739 
    3740 private:
    3741  VmaVector< char, VmaStlAllocator<char> > m_Data;
    3742 };
    3743 
    3744 void VmaStringBuilder::Add(const char* pStr)
    3745 {
    3746  const size_t strLen = strlen(pStr);
    3747  if(strLen > 0)
    3748  {
    3749  const size_t oldCount = m_Data.size();
    3750  m_Data.resize(oldCount + strLen);
    3751  memcpy(m_Data.data() + oldCount, pStr, strLen);
    3752  }
    3753 }
    3754 
    3755 void VmaStringBuilder::AddNumber(uint32_t num)
    3756 {
    3757  char buf[11];
    3758  VmaUint32ToStr(buf, sizeof(buf), num);
    3759  Add(buf);
    3760 }
    3761 
    3762 void VmaStringBuilder::AddNumber(uint64_t num)
    3763 {
    3764  char buf[21];
    3765  VmaUint64ToStr(buf, sizeof(buf), num);
    3766  Add(buf);
    3767 }
    3768 
    3769 void VmaStringBuilder::AddPointer(const void* ptr)
    3770 {
    3771  char buf[21];
    3772  VmaPtrToStr(buf, sizeof(buf), ptr);
    3773  Add(buf);
    3774 }
    3775 
    3776 #endif // #if VMA_STATS_STRING_ENABLED
    3777 
    3779 // VmaJsonWriter
    3780 
    3781 #if VMA_STATS_STRING_ENABLED
    3782 
    3783 class VmaJsonWriter
    3784 {
    3785 public:
    3786  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    3787  ~VmaJsonWriter();
    3788 
    3789  void BeginObject(bool singleLine = false);
    3790  void EndObject();
    3791 
    3792  void BeginArray(bool singleLine = false);
    3793  void EndArray();
    3794 
    3795  void WriteString(const char* pStr);
    3796  void BeginString(const char* pStr = VMA_NULL);
    3797  void ContinueString(const char* pStr);
    3798  void ContinueString(uint32_t n);
    3799  void ContinueString(uint64_t n);
    3800  void EndString(const char* pStr = VMA_NULL);
    3801 
    3802  void WriteNumber(uint32_t n);
    3803  void WriteNumber(uint64_t n);
    3804  void WriteBool(bool b);
    3805  void WriteNull();
    3806 
    3807 private:
    3808  static const char* const INDENT;
    3809 
    3810  enum COLLECTION_TYPE
    3811  {
    3812  COLLECTION_TYPE_OBJECT,
    3813  COLLECTION_TYPE_ARRAY,
    3814  };
    3815  struct StackItem
    3816  {
    3817  COLLECTION_TYPE type;
    3818  uint32_t valueCount;
    3819  bool singleLineMode;
    3820  };
    3821 
    3822  VmaStringBuilder& m_SB;
    3823  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    3824  bool m_InsideString;
    3825 
    3826  void BeginValue(bool isString);
    3827  void WriteIndent(bool oneLess = false);
    3828 };
    3829 
    3830 const char* const VmaJsonWriter::INDENT = " ";
    3831 
    3832 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    3833  m_SB(sb),
    3834  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    3835  m_InsideString(false)
    3836 {
    3837 }
    3838 
    3839 VmaJsonWriter::~VmaJsonWriter()
    3840 {
    3841  VMA_ASSERT(!m_InsideString);
    3842  VMA_ASSERT(m_Stack.empty());
    3843 }
    3844 
    3845 void VmaJsonWriter::BeginObject(bool singleLine)
    3846 {
    3847  VMA_ASSERT(!m_InsideString);
    3848 
    3849  BeginValue(false);
    3850  m_SB.Add('{');
    3851 
    3852  StackItem item;
    3853  item.type = COLLECTION_TYPE_OBJECT;
    3854  item.valueCount = 0;
    3855  item.singleLineMode = singleLine;
    3856  m_Stack.push_back(item);
    3857 }
    3858 
    3859 void VmaJsonWriter::EndObject()
    3860 {
    3861  VMA_ASSERT(!m_InsideString);
    3862 
    3863  WriteIndent(true);
    3864  m_SB.Add('}');
    3865 
    3866  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    3867  m_Stack.pop_back();
    3868 }
    3869 
    3870 void VmaJsonWriter::BeginArray(bool singleLine)
    3871 {
    3872  VMA_ASSERT(!m_InsideString);
    3873 
    3874  BeginValue(false);
    3875  m_SB.Add('[');
    3876 
    3877  StackItem item;
    3878  item.type = COLLECTION_TYPE_ARRAY;
    3879  item.valueCount = 0;
    3880  item.singleLineMode = singleLine;
    3881  m_Stack.push_back(item);
    3882 }
    3883 
    3884 void VmaJsonWriter::EndArray()
    3885 {
    3886  VMA_ASSERT(!m_InsideString);
    3887 
    3888  WriteIndent(true);
    3889  m_SB.Add(']');
    3890 
    3891  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    3892  m_Stack.pop_back();
    3893 }
    3894 
    3895 void VmaJsonWriter::WriteString(const char* pStr)
    3896 {
    3897  BeginString(pStr);
    3898  EndString();
    3899 }
    3900 
    3901 void VmaJsonWriter::BeginString(const char* pStr)
    3902 {
    3903  VMA_ASSERT(!m_InsideString);
    3904 
    3905  BeginValue(true);
    3906  m_SB.Add('"');
    3907  m_InsideString = true;
    3908  if(pStr != VMA_NULL && pStr[0] != '\0')
    3909  {
    3910  ContinueString(pStr);
    3911  }
    3912 }
    3913 
    3914 void VmaJsonWriter::ContinueString(const char* pStr)
    3915 {
    3916  VMA_ASSERT(m_InsideString);
    3917 
    3918  const size_t strLen = strlen(pStr);
    3919  for(size_t i = 0; i < strLen; ++i)
    3920  {
    3921  char ch = pStr[i];
    3922  if(ch == '\'')
    3923  {
    3924  m_SB.Add("\\\\");
    3925  }
    3926  else if(ch == '"')
    3927  {
    3928  m_SB.Add("\\\"");
    3929  }
    3930  else if(ch >= 32)
    3931  {
    3932  m_SB.Add(ch);
    3933  }
    3934  else switch(ch)
    3935  {
    3936  case '\n':
    3937  m_SB.Add("\\n");
    3938  break;
    3939  case '\r':
    3940  m_SB.Add("\\r");
    3941  break;
    3942  case '\t':
    3943  m_SB.Add("\\t");
    3944  break;
    3945  default:
    3946  VMA_ASSERT(0 && "Character not currently supported.");
    3947  break;
    3948  }
    3949  }
    3950 }
    3951 
    3952 void VmaJsonWriter::ContinueString(uint32_t n)
    3953 {
    3954  VMA_ASSERT(m_InsideString);
    3955  m_SB.AddNumber(n);
    3956 }
    3957 
    3958 void VmaJsonWriter::ContinueString(uint64_t n)
    3959 {
    3960  VMA_ASSERT(m_InsideString);
    3961  m_SB.AddNumber(n);
    3962 }
    3963 
    3964 void VmaJsonWriter::EndString(const char* pStr)
    3965 {
    3966  VMA_ASSERT(m_InsideString);
    3967  if(pStr != VMA_NULL && pStr[0] != '\0')
    3968  {
    3969  ContinueString(pStr);
    3970  }
    3971  m_SB.Add('"');
    3972  m_InsideString = false;
    3973 }
    3974 
    3975 void VmaJsonWriter::WriteNumber(uint32_t n)
    3976 {
    3977  VMA_ASSERT(!m_InsideString);
    3978  BeginValue(false);
    3979  m_SB.AddNumber(n);
    3980 }
    3981 
    3982 void VmaJsonWriter::WriteNumber(uint64_t n)
    3983 {
    3984  VMA_ASSERT(!m_InsideString);
    3985  BeginValue(false);
    3986  m_SB.AddNumber(n);
    3987 }
    3988 
    3989 void VmaJsonWriter::WriteBool(bool b)
    3990 {
    3991  VMA_ASSERT(!m_InsideString);
    3992  BeginValue(false);
    3993  m_SB.Add(b ? "true" : "false");
    3994 }
    3995 
    3996 void VmaJsonWriter::WriteNull()
    3997 {
    3998  VMA_ASSERT(!m_InsideString);
    3999  BeginValue(false);
    4000  m_SB.Add("null");
    4001 }
    4002 
    4003 void VmaJsonWriter::BeginValue(bool isString)
    4004 {
    4005  if(!m_Stack.empty())
    4006  {
    4007  StackItem& currItem = m_Stack.back();
    4008  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4009  currItem.valueCount % 2 == 0)
    4010  {
    4011  VMA_ASSERT(isString);
    4012  }
    4013 
    4014  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4015  currItem.valueCount % 2 != 0)
    4016  {
    4017  m_SB.Add(": ");
    4018  }
    4019  else if(currItem.valueCount > 0)
    4020  {
    4021  m_SB.Add(", ");
    4022  WriteIndent();
    4023  }
    4024  else
    4025  {
    4026  WriteIndent();
    4027  }
    4028  ++currItem.valueCount;
    4029  }
    4030 }
    4031 
    4032 void VmaJsonWriter::WriteIndent(bool oneLess)
    4033 {
    4034  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4035  {
    4036  m_SB.AddNewLine();
    4037 
    4038  size_t count = m_Stack.size();
    4039  if(count > 0 && oneLess)
    4040  {
    4041  --count;
    4042  }
    4043  for(size_t i = 0; i < count; ++i)
    4044  {
    4045  m_SB.Add(INDENT);
    4046  }
    4047  }
    4048 }
    4049 
    4050 #endif // #if VMA_STATS_STRING_ENABLED
    4051 
    4053 
    4054 VkDeviceSize VmaAllocation_T::GetOffset() const
    4055 {
    4056  switch(m_Type)
    4057  {
    4058  case ALLOCATION_TYPE_BLOCK:
    4059  return m_BlockAllocation.m_Offset;
    4060  case ALLOCATION_TYPE_DEDICATED:
    4061  return 0;
    4062  default:
    4063  VMA_ASSERT(0);
    4064  return 0;
    4065  }
    4066 }
    4067 
    4068 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4069 {
    4070  switch(m_Type)
    4071  {
    4072  case ALLOCATION_TYPE_BLOCK:
    4073  return m_BlockAllocation.m_Block->m_hMemory;
    4074  case ALLOCATION_TYPE_DEDICATED:
    4075  return m_DedicatedAllocation.m_hMemory;
    4076  default:
    4077  VMA_ASSERT(0);
    4078  return VK_NULL_HANDLE;
    4079  }
    4080 }
    4081 
    4082 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4083 {
    4084  switch(m_Type)
    4085  {
    4086  case ALLOCATION_TYPE_BLOCK:
    4087  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4088  case ALLOCATION_TYPE_DEDICATED:
    4089  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4090  default:
    4091  VMA_ASSERT(0);
    4092  return UINT32_MAX;
    4093  }
    4094 }
    4095 
    4096 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
    4097 {
    4098  switch(m_Type)
    4099  {
    4100  case ALLOCATION_TYPE_BLOCK:
    4101  return m_BlockAllocation.m_Block->m_BlockVectorType;
    4102  case ALLOCATION_TYPE_DEDICATED:
    4103  return (m_DedicatedAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
    4104  default:
    4105  VMA_ASSERT(0);
    4106  return VMA_BLOCK_VECTOR_TYPE_COUNT;
    4107  }
    4108 }
    4109 
    4110 void* VmaAllocation_T::GetMappedData() const
    4111 {
    4112  switch(m_Type)
    4113  {
    4114  case ALLOCATION_TYPE_BLOCK:
    4115  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
    4116  {
    4117  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
    4118  }
    4119  else
    4120  {
    4121  return VMA_NULL;
    4122  }
    4123  break;
    4124  case ALLOCATION_TYPE_DEDICATED:
    4125  return m_DedicatedAllocation.m_pMappedData;
    4126  default:
    4127  VMA_ASSERT(0);
    4128  return VMA_NULL;
    4129  }
    4130 }
    4131 
    4132 bool VmaAllocation_T::CanBecomeLost() const
    4133 {
    4134  switch(m_Type)
    4135  {
    4136  case ALLOCATION_TYPE_BLOCK:
    4137  return m_BlockAllocation.m_CanBecomeLost;
    4138  case ALLOCATION_TYPE_DEDICATED:
    4139  return false;
    4140  default:
    4141  VMA_ASSERT(0);
    4142  return false;
    4143  }
    4144 }
    4145 
    4146 VmaPool VmaAllocation_T::GetPool() const
    4147 {
    4148  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4149  return m_BlockAllocation.m_hPool;
    4150 }
    4151 
    4152 VkResult VmaAllocation_T::DedicatedAllocMapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4153 {
    4154  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4155  if(m_DedicatedAllocation.m_PersistentMap)
    4156  {
    4157  return (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4158  hAllocator->m_hDevice,
    4159  m_DedicatedAllocation.m_hMemory,
    4160  0,
    4161  VK_WHOLE_SIZE,
    4162  0,
    4163  &m_DedicatedAllocation.m_pMappedData);
    4164  }
    4165  return VK_SUCCESS;
    4166 }
    4167 void VmaAllocation_T::DedicatedAllocUnmapPersistentlyMappedMemory(VmaAllocator hAllocator)
    4168 {
    4169  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4170  if(m_DedicatedAllocation.m_pMappedData)
    4171  {
    4172  VMA_ASSERT(m_DedicatedAllocation.m_PersistentMap);
    4173  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_DedicatedAllocation.m_hMemory);
    4174  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4175  }
    4176 }
    4177 
    4178 
    4179 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4180 {
    4181  VMA_ASSERT(CanBecomeLost());
    4182 
    4183  /*
    4184  Warning: This is a carefully designed algorithm.
    4185  Do not modify unless you really know what you're doing :)
    4186  */
    4187  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4188  for(;;)
    4189  {
    4190  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4191  {
    4192  VMA_ASSERT(0);
    4193  return false;
    4194  }
    4195  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4196  {
    4197  return false;
    4198  }
    4199  else // Last use time earlier than current time.
    4200  {
    4201  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4202  {
    4203  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4204  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4205  return true;
    4206  }
    4207  }
    4208  }
    4209 }
    4210 
    4211 #if VMA_STATS_STRING_ENABLED
    4212 
    4213 // Correspond to values of enum VmaSuballocationType.
    4214 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4215  "FREE",
    4216  "UNKNOWN",
    4217  "BUFFER",
    4218  "IMAGE_UNKNOWN",
    4219  "IMAGE_LINEAR",
    4220  "IMAGE_OPTIMAL",
    4221 };
    4222 
    4223 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4224 {
    4225  json.BeginObject();
    4226 
    4227  json.WriteString("Blocks");
    4228  json.WriteNumber(stat.blockCount);
    4229 
    4230  json.WriteString("Allocations");
    4231  json.WriteNumber(stat.allocationCount);
    4232 
    4233  json.WriteString("UnusedRanges");
    4234  json.WriteNumber(stat.unusedRangeCount);
    4235 
    4236  json.WriteString("UsedBytes");
    4237  json.WriteNumber(stat.usedBytes);
    4238 
    4239  json.WriteString("UnusedBytes");
    4240  json.WriteNumber(stat.unusedBytes);
    4241 
    4242  if(stat.allocationCount > 1)
    4243  {
    4244  json.WriteString("AllocationSize");
    4245  json.BeginObject(true);
    4246  json.WriteString("Min");
    4247  json.WriteNumber(stat.allocationSizeMin);
    4248  json.WriteString("Avg");
    4249  json.WriteNumber(stat.allocationSizeAvg);
    4250  json.WriteString("Max");
    4251  json.WriteNumber(stat.allocationSizeMax);
    4252  json.EndObject();
    4253  }
    4254 
    4255  if(stat.unusedRangeCount > 1)
    4256  {
    4257  json.WriteString("UnusedRangeSize");
    4258  json.BeginObject(true);
    4259  json.WriteString("Min");
    4260  json.WriteNumber(stat.unusedRangeSizeMin);
    4261  json.WriteString("Avg");
    4262  json.WriteNumber(stat.unusedRangeSizeAvg);
    4263  json.WriteString("Max");
    4264  json.WriteNumber(stat.unusedRangeSizeMax);
    4265  json.EndObject();
    4266  }
    4267 
    4268  json.EndObject();
    4269 }
    4270 
    4271 #endif // #if VMA_STATS_STRING_ENABLED
    4272 
    4273 struct VmaSuballocationItemSizeLess
    4274 {
    4275  bool operator()(
    4276  const VmaSuballocationList::iterator lhs,
    4277  const VmaSuballocationList::iterator rhs) const
    4278  {
    4279  return lhs->size < rhs->size;
    4280  }
    4281  bool operator()(
    4282  const VmaSuballocationList::iterator lhs,
    4283  VkDeviceSize rhsSize) const
    4284  {
    4285  return lhs->size < rhsSize;
    4286  }
    4287 };
    4288 
    4290 // class VmaBlockMetadata
    4291 
    4292 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4293  m_Size(0),
    4294  m_FreeCount(0),
    4295  m_SumFreeSize(0),
    4296  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4297  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4298 {
    4299 }
    4300 
    4301 VmaBlockMetadata::~VmaBlockMetadata()
    4302 {
    4303 }
    4304 
    4305 void VmaBlockMetadata::Init(VkDeviceSize size)
    4306 {
    4307  m_Size = size;
    4308  m_FreeCount = 1;
    4309  m_SumFreeSize = size;
    4310 
    4311  VmaSuballocation suballoc = {};
    4312  suballoc.offset = 0;
    4313  suballoc.size = size;
    4314  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4315  suballoc.hAllocation = VK_NULL_HANDLE;
    4316 
    4317  m_Suballocations.push_back(suballoc);
    4318  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4319  --suballocItem;
    4320  m_FreeSuballocationsBySize.push_back(suballocItem);
    4321 }
    4322 
    4323 bool VmaBlockMetadata::Validate() const
    4324 {
    4325  if(m_Suballocations.empty())
    4326  {
    4327  return false;
    4328  }
    4329 
    4330  // Expected offset of new suballocation as calculates from previous ones.
    4331  VkDeviceSize calculatedOffset = 0;
    4332  // Expected number of free suballocations as calculated from traversing their list.
    4333  uint32_t calculatedFreeCount = 0;
    4334  // Expected sum size of free suballocations as calculated from traversing their list.
    4335  VkDeviceSize calculatedSumFreeSize = 0;
    4336  // Expected number of free suballocations that should be registered in
    4337  // m_FreeSuballocationsBySize calculated from traversing their list.
    4338  size_t freeSuballocationsToRegister = 0;
    4339  // True if previous visisted suballocation was free.
    4340  bool prevFree = false;
    4341 
    4342  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4343  suballocItem != m_Suballocations.cend();
    4344  ++suballocItem)
    4345  {
    4346  const VmaSuballocation& subAlloc = *suballocItem;
    4347 
    4348  // Actual offset of this suballocation doesn't match expected one.
    4349  if(subAlloc.offset != calculatedOffset)
    4350  {
    4351  return false;
    4352  }
    4353 
    4354  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4355  // Two adjacent free suballocations are invalid. They should be merged.
    4356  if(prevFree && currFree)
    4357  {
    4358  return false;
    4359  }
    4360  prevFree = currFree;
    4361 
    4362  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4363  {
    4364  return false;
    4365  }
    4366 
    4367  if(currFree)
    4368  {
    4369  calculatedSumFreeSize += subAlloc.size;
    4370  ++calculatedFreeCount;
    4371  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4372  {
    4373  ++freeSuballocationsToRegister;
    4374  }
    4375  }
    4376 
    4377  calculatedOffset += subAlloc.size;
    4378  }
    4379 
    4380  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    4381  // match expected one.
    4382  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    4383  {
    4384  return false;
    4385  }
    4386 
    4387  VkDeviceSize lastSize = 0;
    4388  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    4389  {
    4390  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    4391 
    4392  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    4393  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    4394  {
    4395  return false;
    4396  }
    4397  // They must be sorted by size ascending.
    4398  if(suballocItem->size < lastSize)
    4399  {
    4400  return false;
    4401  }
    4402 
    4403  lastSize = suballocItem->size;
    4404  }
    4405 
    4406  // Check if totals match calculacted values.
    4407  return
    4408  ValidateFreeSuballocationList() &&
    4409  (calculatedOffset == m_Size) &&
    4410  (calculatedSumFreeSize == m_SumFreeSize) &&
    4411  (calculatedFreeCount == m_FreeCount);
    4412 }
    4413 
    4414 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    4415 {
    4416  if(!m_FreeSuballocationsBySize.empty())
    4417  {
    4418  return m_FreeSuballocationsBySize.back()->size;
    4419  }
    4420  else
    4421  {
    4422  return 0;
    4423  }
    4424 }
    4425 
    4426 bool VmaBlockMetadata::IsEmpty() const
    4427 {
    4428  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    4429 }
    4430 
    4431 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    4432 {
    4433  outInfo.blockCount = 1;
    4434 
    4435  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4436  outInfo.allocationCount = rangeCount - m_FreeCount;
    4437  outInfo.unusedRangeCount = m_FreeCount;
    4438 
    4439  outInfo.unusedBytes = m_SumFreeSize;
    4440  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    4441 
    4442  outInfo.allocationSizeMin = UINT64_MAX;
    4443  outInfo.allocationSizeMax = 0;
    4444  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4445  outInfo.unusedRangeSizeMax = 0;
    4446 
    4447  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4448  suballocItem != m_Suballocations.cend();
    4449  ++suballocItem)
    4450  {
    4451  const VmaSuballocation& suballoc = *suballocItem;
    4452  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    4453  {
    4454  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    4455  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    4456  }
    4457  else
    4458  {
    4459  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    4460  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    4461  }
    4462  }
    4463 }
    4464 
    4465 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    4466 {
    4467  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    4468 
    4469  inoutStats.size += m_Size;
    4470  inoutStats.unusedSize += m_SumFreeSize;
    4471  inoutStats.allocationCount += rangeCount - m_FreeCount;
    4472  inoutStats.unusedRangeCount += m_FreeCount;
    4473  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    4474 }
    4475 
    4476 #if VMA_STATS_STRING_ENABLED
    4477 
    4478 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    4479 {
    4480  json.BeginObject();
    4481 
    4482  json.WriteString("TotalBytes");
    4483  json.WriteNumber(m_Size);
    4484 
    4485  json.WriteString("UnusedBytes");
    4486  json.WriteNumber(m_SumFreeSize);
    4487 
    4488  json.WriteString("Allocations");
    4489  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
    4490 
    4491  json.WriteString("UnusedRanges");
    4492  json.WriteNumber(m_FreeCount);
    4493 
    4494  json.WriteString("Suballocations");
    4495  json.BeginArray();
    4496  size_t i = 0;
    4497  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4498  suballocItem != m_Suballocations.cend();
    4499  ++suballocItem, ++i)
    4500  {
    4501  json.BeginObject(true);
    4502 
    4503  json.WriteString("Type");
    4504  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    4505 
    4506  json.WriteString("Size");
    4507  json.WriteNumber(suballocItem->size);
    4508 
    4509  json.WriteString("Offset");
    4510  json.WriteNumber(suballocItem->offset);
    4511 
    4512  json.EndObject();
    4513  }
    4514  json.EndArray();
    4515 
    4516  json.EndObject();
    4517 }
    4518 
    4519 #endif // #if VMA_STATS_STRING_ENABLED
    4520 
    4521 /*
    4522 How many suitable free suballocations to analyze before choosing best one.
    4523 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    4524  be chosen.
    4525 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    4526  suballocations will be analized and best one will be chosen.
    4527 - Any other value is also acceptable.
    4528 */
    4529 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    4530 
    4531 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    4532 {
    4533  VMA_ASSERT(IsEmpty());
    4534  pAllocationRequest->offset = 0;
    4535  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    4536  pAllocationRequest->sumItemSize = 0;
    4537  pAllocationRequest->item = m_Suballocations.begin();
    4538  pAllocationRequest->itemsToMakeLostCount = 0;
    4539 }
    4540 
    4541 bool VmaBlockMetadata::CreateAllocationRequest(
    4542  uint32_t currentFrameIndex,
    4543  uint32_t frameInUseCount,
    4544  VkDeviceSize bufferImageGranularity,
    4545  VkDeviceSize allocSize,
    4546  VkDeviceSize allocAlignment,
    4547  VmaSuballocationType allocType,
    4548  bool canMakeOtherLost,
    4549  VmaAllocationRequest* pAllocationRequest)
    4550 {
    4551  VMA_ASSERT(allocSize > 0);
    4552  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4553  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    4554  VMA_HEAVY_ASSERT(Validate());
    4555 
    4556  // There is not enough total free space in this block to fullfill the request: Early return.
    4557  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    4558  {
    4559  return false;
    4560  }
    4561 
    4562  // New algorithm, efficiently searching freeSuballocationsBySize.
    4563  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    4564  if(freeSuballocCount > 0)
    4565  {
    4566  if(VMA_BEST_FIT)
    4567  {
    4568  // Find first free suballocation with size not less than allocSize.
    4569  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    4570  m_FreeSuballocationsBySize.data(),
    4571  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    4572  allocSize,
    4573  VmaSuballocationItemSizeLess());
    4574  size_t index = it - m_FreeSuballocationsBySize.data();
    4575  for(; index < freeSuballocCount; ++index)
    4576  {
    4577  if(CheckAllocation(
    4578  currentFrameIndex,
    4579  frameInUseCount,
    4580  bufferImageGranularity,
    4581  allocSize,
    4582  allocAlignment,
    4583  allocType,
    4584  m_FreeSuballocationsBySize[index],
    4585  false, // canMakeOtherLost
    4586  &pAllocationRequest->offset,
    4587  &pAllocationRequest->itemsToMakeLostCount,
    4588  &pAllocationRequest->sumFreeSize,
    4589  &pAllocationRequest->sumItemSize))
    4590  {
    4591  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4592  return true;
    4593  }
    4594  }
    4595  }
    4596  else
    4597  {
    4598  // Search staring from biggest suballocations.
    4599  for(size_t index = freeSuballocCount; index--; )
    4600  {
    4601  if(CheckAllocation(
    4602  currentFrameIndex,
    4603  frameInUseCount,
    4604  bufferImageGranularity,
    4605  allocSize,
    4606  allocAlignment,
    4607  allocType,
    4608  m_FreeSuballocationsBySize[index],
    4609  false, // canMakeOtherLost
    4610  &pAllocationRequest->offset,
    4611  &pAllocationRequest->itemsToMakeLostCount,
    4612  &pAllocationRequest->sumFreeSize,
    4613  &pAllocationRequest->sumItemSize))
    4614  {
    4615  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    4616  return true;
    4617  }
    4618  }
    4619  }
    4620  }
    4621 
    4622  if(canMakeOtherLost)
    4623  {
    4624  // Brute-force algorithm. TODO: Come up with something better.
    4625 
    4626  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    4627  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    4628 
    4629  VmaAllocationRequest tmpAllocRequest = {};
    4630  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    4631  suballocIt != m_Suballocations.end();
    4632  ++suballocIt)
    4633  {
    4634  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    4635  suballocIt->hAllocation->CanBecomeLost())
    4636  {
    4637  if(CheckAllocation(
    4638  currentFrameIndex,
    4639  frameInUseCount,
    4640  bufferImageGranularity,
    4641  allocSize,
    4642  allocAlignment,
    4643  allocType,
    4644  suballocIt,
    4645  canMakeOtherLost,
    4646  &tmpAllocRequest.offset,
    4647  &tmpAllocRequest.itemsToMakeLostCount,
    4648  &tmpAllocRequest.sumFreeSize,
    4649  &tmpAllocRequest.sumItemSize))
    4650  {
    4651  tmpAllocRequest.item = suballocIt;
    4652 
    4653  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    4654  {
    4655  *pAllocationRequest = tmpAllocRequest;
    4656  }
    4657  }
    4658  }
    4659  }
    4660 
    4661  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    4662  {
    4663  return true;
    4664  }
    4665  }
    4666 
    4667  return false;
    4668 }
    4669 
    4670 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    4671  uint32_t currentFrameIndex,
    4672  uint32_t frameInUseCount,
    4673  VmaAllocationRequest* pAllocationRequest)
    4674 {
    4675  while(pAllocationRequest->itemsToMakeLostCount > 0)
    4676  {
    4677  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    4678  {
    4679  ++pAllocationRequest->item;
    4680  }
    4681  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4682  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    4683  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    4684  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4685  {
    4686  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    4687  --pAllocationRequest->itemsToMakeLostCount;
    4688  }
    4689  else
    4690  {
    4691  return false;
    4692  }
    4693  }
    4694 
    4695  VMA_HEAVY_ASSERT(Validate());
    4696  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    4697  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    4698 
    4699  return true;
    4700 }
    4701 
    4702 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4703 {
    4704  uint32_t lostAllocationCount = 0;
    4705  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    4706  it != m_Suballocations.end();
    4707  ++it)
    4708  {
    4709  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    4710  it->hAllocation->CanBecomeLost() &&
    4711  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    4712  {
    4713  it = FreeSuballocation(it);
    4714  ++lostAllocationCount;
    4715  }
    4716  }
    4717  return lostAllocationCount;
    4718 }
    4719 
    4720 void VmaBlockMetadata::Alloc(
    4721  const VmaAllocationRequest& request,
    4722  VmaSuballocationType type,
    4723  VkDeviceSize allocSize,
    4724  VmaAllocation hAllocation)
    4725 {
    4726  VMA_ASSERT(request.item != m_Suballocations.end());
    4727  VmaSuballocation& suballoc = *request.item;
    4728  // Given suballocation is a free block.
    4729  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4730  // Given offset is inside this suballocation.
    4731  VMA_ASSERT(request.offset >= suballoc.offset);
    4732  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    4733  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    4734  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    4735 
    4736  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    4737  // it to become used.
    4738  UnregisterFreeSuballocation(request.item);
    4739 
    4740  suballoc.offset = request.offset;
    4741  suballoc.size = allocSize;
    4742  suballoc.type = type;
    4743  suballoc.hAllocation = hAllocation;
    4744 
    4745  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    4746  if(paddingEnd)
    4747  {
    4748  VmaSuballocation paddingSuballoc = {};
    4749  paddingSuballoc.offset = request.offset + allocSize;
    4750  paddingSuballoc.size = paddingEnd;
    4751  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4752  VmaSuballocationList::iterator next = request.item;
    4753  ++next;
    4754  const VmaSuballocationList::iterator paddingEndItem =
    4755  m_Suballocations.insert(next, paddingSuballoc);
    4756  RegisterFreeSuballocation(paddingEndItem);
    4757  }
    4758 
    4759  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    4760  if(paddingBegin)
    4761  {
    4762  VmaSuballocation paddingSuballoc = {};
    4763  paddingSuballoc.offset = request.offset - paddingBegin;
    4764  paddingSuballoc.size = paddingBegin;
    4765  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4766  const VmaSuballocationList::iterator paddingBeginItem =
    4767  m_Suballocations.insert(request.item, paddingSuballoc);
    4768  RegisterFreeSuballocation(paddingBeginItem);
    4769  }
    4770 
    4771  // Update totals.
    4772  m_FreeCount = m_FreeCount - 1;
    4773  if(paddingBegin > 0)
    4774  {
    4775  ++m_FreeCount;
    4776  }
    4777  if(paddingEnd > 0)
    4778  {
    4779  ++m_FreeCount;
    4780  }
    4781  m_SumFreeSize -= allocSize;
    4782 }
    4783 
    4784 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    4785 {
    4786  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    4787  suballocItem != m_Suballocations.end();
    4788  ++suballocItem)
    4789  {
    4790  VmaSuballocation& suballoc = *suballocItem;
    4791  if(suballoc.hAllocation == allocation)
    4792  {
    4793  FreeSuballocation(suballocItem);
    4794  VMA_HEAVY_ASSERT(Validate());
    4795  return;
    4796  }
    4797  }
    4798  VMA_ASSERT(0 && "Not found!");
    4799 }
    4800 
    4801 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    4802 {
    4803  VkDeviceSize lastSize = 0;
    4804  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    4805  {
    4806  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    4807 
    4808  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    4809  {
    4810  VMA_ASSERT(0);
    4811  return false;
    4812  }
    4813  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4814  {
    4815  VMA_ASSERT(0);
    4816  return false;
    4817  }
    4818  if(it->size < lastSize)
    4819  {
    4820  VMA_ASSERT(0);
    4821  return false;
    4822  }
    4823 
    4824  lastSize = it->size;
    4825  }
    4826  return true;
    4827 }
    4828 
    4829 bool VmaBlockMetadata::CheckAllocation(
    4830  uint32_t currentFrameIndex,
    4831  uint32_t frameInUseCount,
    4832  VkDeviceSize bufferImageGranularity,
    4833  VkDeviceSize allocSize,
    4834  VkDeviceSize allocAlignment,
    4835  VmaSuballocationType allocType,
    4836  VmaSuballocationList::const_iterator suballocItem,
    4837  bool canMakeOtherLost,
    4838  VkDeviceSize* pOffset,
    4839  size_t* itemsToMakeLostCount,
    4840  VkDeviceSize* pSumFreeSize,
    4841  VkDeviceSize* pSumItemSize) const
    4842 {
    4843  VMA_ASSERT(allocSize > 0);
    4844  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    4845  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    4846  VMA_ASSERT(pOffset != VMA_NULL);
    4847 
    4848  *itemsToMakeLostCount = 0;
    4849  *pSumFreeSize = 0;
    4850  *pSumItemSize = 0;
    4851 
    4852  if(canMakeOtherLost)
    4853  {
    4854  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4855  {
    4856  *pSumFreeSize = suballocItem->size;
    4857  }
    4858  else
    4859  {
    4860  if(suballocItem->hAllocation->CanBecomeLost() &&
    4861  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4862  {
    4863  ++*itemsToMakeLostCount;
    4864  *pSumItemSize = suballocItem->size;
    4865  }
    4866  else
    4867  {
    4868  return false;
    4869  }
    4870  }
    4871 
    4872  // Remaining size is too small for this request: Early return.
    4873  if(m_Size - suballocItem->offset < allocSize)
    4874  {
    4875  return false;
    4876  }
    4877 
    4878  // Start from offset equal to beginning of this suballocation.
    4879  *pOffset = suballocItem->offset;
    4880 
    4881  // Apply VMA_DEBUG_MARGIN at the beginning.
    4882  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    4883  {
    4884  *pOffset += VMA_DEBUG_MARGIN;
    4885  }
    4886 
    4887  // Apply alignment.
    4888  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    4889  *pOffset = VmaAlignUp(*pOffset, alignment);
    4890 
    4891  // Check previous suballocations for BufferImageGranularity conflicts.
    4892  // Make bigger alignment if necessary.
    4893  if(bufferImageGranularity > 1)
    4894  {
    4895  bool bufferImageGranularityConflict = false;
    4896  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    4897  while(prevSuballocItem != m_Suballocations.cbegin())
    4898  {
    4899  --prevSuballocItem;
    4900  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    4901  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    4902  {
    4903  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    4904  {
    4905  bufferImageGranularityConflict = true;
    4906  break;
    4907  }
    4908  }
    4909  else
    4910  // Already on previous page.
    4911  break;
    4912  }
    4913  if(bufferImageGranularityConflict)
    4914  {
    4915  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    4916  }
    4917  }
    4918 
    4919  // Now that we have final *pOffset, check if we are past suballocItem.
    4920  // If yes, return false - this function should be called for another suballocItem as starting point.
    4921  if(*pOffset >= suballocItem->offset + suballocItem->size)
    4922  {
    4923  return false;
    4924  }
    4925 
    4926  // Calculate padding at the beginning based on current offset.
    4927  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    4928 
    4929  // Calculate required margin at the end if this is not last suballocation.
    4930  VmaSuballocationList::const_iterator next = suballocItem;
    4931  ++next;
    4932  const VkDeviceSize requiredEndMargin =
    4933  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    4934 
    4935  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    4936  // Another early return check.
    4937  if(suballocItem->offset + totalSize > m_Size)
    4938  {
    4939  return false;
    4940  }
    4941 
    4942  // Advance lastSuballocItem until desired size is reached.
    4943  // Update itemsToMakeLostCount.
    4944  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    4945  if(totalSize > suballocItem->size)
    4946  {
    4947  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    4948  while(remainingSize > 0)
    4949  {
    4950  ++lastSuballocItem;
    4951  if(lastSuballocItem == m_Suballocations.cend())
    4952  {
    4953  return false;
    4954  }
    4955  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    4956  {
    4957  *pSumFreeSize += lastSuballocItem->size;
    4958  }
    4959  else
    4960  {
    4961  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    4962  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    4963  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4964  {
    4965  ++*itemsToMakeLostCount;
    4966  *pSumItemSize += lastSuballocItem->size;
    4967  }
    4968  else
    4969  {
    4970  return false;
    4971  }
    4972  }
    4973  remainingSize = (lastSuballocItem->size < remainingSize) ?
    4974  remainingSize - lastSuballocItem->size : 0;
    4975  }
    4976  }
    4977 
    4978  // Check next suballocations for BufferImageGranularity conflicts.
    4979  // If conflict exists, we must mark more allocations lost or fail.
    4980  if(bufferImageGranularity > 1)
    4981  {
    4982  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    4983  ++nextSuballocItem;
    4984  while(nextSuballocItem != m_Suballocations.cend())
    4985  {
    4986  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    4987  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    4988  {
    4989  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    4990  {
    4991  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    4992  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    4993  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    4994  {
    4995  ++*itemsToMakeLostCount;
    4996  }
    4997  else
    4998  {
    4999  return false;
    5000  }
    5001  }
    5002  }
    5003  else
    5004  {
    5005  // Already on next page.
    5006  break;
    5007  }
    5008  ++nextSuballocItem;
    5009  }
    5010  }
    5011  }
    5012  else
    5013  {
    5014  const VmaSuballocation& suballoc = *suballocItem;
    5015  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5016 
    5017  *pSumFreeSize = suballoc.size;
    5018 
    5019  // Size of this suballocation is too small for this request: Early return.
    5020  if(suballoc.size < allocSize)
    5021  {
    5022  return false;
    5023  }
    5024 
    5025  // Start from offset equal to beginning of this suballocation.
    5026  *pOffset = suballoc.offset;
    5027 
    5028  // Apply VMA_DEBUG_MARGIN at the beginning.
    5029  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5030  {
    5031  *pOffset += VMA_DEBUG_MARGIN;
    5032  }
    5033 
    5034  // Apply alignment.
    5035  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5036  *pOffset = VmaAlignUp(*pOffset, alignment);
    5037 
    5038  // Check previous suballocations for BufferImageGranularity conflicts.
    5039  // Make bigger alignment if necessary.
    5040  if(bufferImageGranularity > 1)
    5041  {
    5042  bool bufferImageGranularityConflict = false;
    5043  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5044  while(prevSuballocItem != m_Suballocations.cbegin())
    5045  {
    5046  --prevSuballocItem;
    5047  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5048  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5049  {
    5050  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5051  {
    5052  bufferImageGranularityConflict = true;
    5053  break;
    5054  }
    5055  }
    5056  else
    5057  // Already on previous page.
    5058  break;
    5059  }
    5060  if(bufferImageGranularityConflict)
    5061  {
    5062  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5063  }
    5064  }
    5065 
    5066  // Calculate padding at the beginning based on current offset.
    5067  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5068 
    5069  // Calculate required margin at the end if this is not last suballocation.
    5070  VmaSuballocationList::const_iterator next = suballocItem;
    5071  ++next;
    5072  const VkDeviceSize requiredEndMargin =
    5073  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5074 
    5075  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5076  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5077  {
    5078  return false;
    5079  }
    5080 
    5081  // Check next suballocations for BufferImageGranularity conflicts.
    5082  // If conflict exists, allocation cannot be made here.
    5083  if(bufferImageGranularity > 1)
    5084  {
    5085  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5086  ++nextSuballocItem;
    5087  while(nextSuballocItem != m_Suballocations.cend())
    5088  {
    5089  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5090  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5091  {
    5092  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5093  {
    5094  return false;
    5095  }
    5096  }
    5097  else
    5098  {
    5099  // Already on next page.
    5100  break;
    5101  }
    5102  ++nextSuballocItem;
    5103  }
    5104  }
    5105  }
    5106 
    5107  // All tests passed: Success. pOffset is already filled.
    5108  return true;
    5109 }
    5110 
    5111 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5112 {
    5113  VMA_ASSERT(item != m_Suballocations.end());
    5114  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5115 
    5116  VmaSuballocationList::iterator nextItem = item;
    5117  ++nextItem;
    5118  VMA_ASSERT(nextItem != m_Suballocations.end());
    5119  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5120 
    5121  item->size += nextItem->size;
    5122  --m_FreeCount;
    5123  m_Suballocations.erase(nextItem);
    5124 }
    5125 
    5126 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5127 {
    5128  // Change this suballocation to be marked as free.
    5129  VmaSuballocation& suballoc = *suballocItem;
    5130  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5131  suballoc.hAllocation = VK_NULL_HANDLE;
    5132 
    5133  // Update totals.
    5134  ++m_FreeCount;
    5135  m_SumFreeSize += suballoc.size;
    5136 
    5137  // Merge with previous and/or next suballocation if it's also free.
    5138  bool mergeWithNext = false;
    5139  bool mergeWithPrev = false;
    5140 
    5141  VmaSuballocationList::iterator nextItem = suballocItem;
    5142  ++nextItem;
    5143  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5144  {
    5145  mergeWithNext = true;
    5146  }
    5147 
    5148  VmaSuballocationList::iterator prevItem = suballocItem;
    5149  if(suballocItem != m_Suballocations.begin())
    5150  {
    5151  --prevItem;
    5152  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5153  {
    5154  mergeWithPrev = true;
    5155  }
    5156  }
    5157 
    5158  if(mergeWithNext)
    5159  {
    5160  UnregisterFreeSuballocation(nextItem);
    5161  MergeFreeWithNext(suballocItem);
    5162  }
    5163 
    5164  if(mergeWithPrev)
    5165  {
    5166  UnregisterFreeSuballocation(prevItem);
    5167  MergeFreeWithNext(prevItem);
    5168  RegisterFreeSuballocation(prevItem);
    5169  return prevItem;
    5170  }
    5171  else
    5172  {
    5173  RegisterFreeSuballocation(suballocItem);
    5174  return suballocItem;
    5175  }
    5176 }
    5177 
    5178 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5179 {
    5180  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5181  VMA_ASSERT(item->size > 0);
    5182 
    5183  // You may want to enable this validation at the beginning or at the end of
    5184  // this function, depending on what do you want to check.
    5185  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5186 
    5187  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5188  {
    5189  if(m_FreeSuballocationsBySize.empty())
    5190  {
    5191  m_FreeSuballocationsBySize.push_back(item);
    5192  }
    5193  else
    5194  {
    5195  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5196  }
    5197  }
    5198 
    5199  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5200 }
    5201 
    5202 
    5203 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5204 {
    5205  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5206  VMA_ASSERT(item->size > 0);
    5207 
    5208  // You may want to enable this validation at the beginning or at the end of
    5209  // this function, depending on what do you want to check.
    5210  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5211 
    5212  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5213  {
    5214  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5215  m_FreeSuballocationsBySize.data(),
    5216  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5217  item,
    5218  VmaSuballocationItemSizeLess());
    5219  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5220  index < m_FreeSuballocationsBySize.size();
    5221  ++index)
    5222  {
    5223  if(m_FreeSuballocationsBySize[index] == item)
    5224  {
    5225  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5226  return;
    5227  }
    5228  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5229  }
    5230  VMA_ASSERT(0 && "Not found.");
    5231  }
    5232 
    5233  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5234 }
    5235 
    5237 // class VmaDeviceMemoryBlock
    5238 
    5239 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5240  m_MemoryTypeIndex(UINT32_MAX),
    5241  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
    5242  m_hMemory(VK_NULL_HANDLE),
    5243  m_PersistentMap(false),
    5244  m_pMappedData(VMA_NULL),
    5245  m_Metadata(hAllocator)
    5246 {
    5247 }
    5248 
    5249 void VmaDeviceMemoryBlock::Init(
    5250  uint32_t newMemoryTypeIndex,
    5251  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
    5252  VkDeviceMemory newMemory,
    5253  VkDeviceSize newSize,
    5254  bool persistentMap,
    5255  void* pMappedData)
    5256 {
    5257  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5258 
    5259  m_MemoryTypeIndex = newMemoryTypeIndex;
    5260  m_BlockVectorType = newBlockVectorType;
    5261  m_hMemory = newMemory;
    5262  m_PersistentMap = persistentMap;
    5263  m_pMappedData = pMappedData;
    5264 
    5265  m_Metadata.Init(newSize);
    5266 }
    5267 
    5268 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    5269 {
    5270  // This is the most important assert in the entire library.
    5271  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    5272  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    5273 
    5274  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    5275  if(m_pMappedData != VMA_NULL)
    5276  {
    5277  (allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, m_hMemory);
    5278  m_pMappedData = VMA_NULL;
    5279  }
    5280 
    5281  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    5282  m_hMemory = VK_NULL_HANDLE;
    5283 }
    5284 
    5285 bool VmaDeviceMemoryBlock::Validate() const
    5286 {
    5287  if((m_hMemory == VK_NULL_HANDLE) ||
    5288  (m_Metadata.GetSize() == 0))
    5289  {
    5290  return false;
    5291  }
    5292 
    5293  return m_Metadata.Validate();
    5294 }
    5295 
    5296 static void InitStatInfo(VmaStatInfo& outInfo)
    5297 {
    5298  memset(&outInfo, 0, sizeof(outInfo));
    5299  outInfo.allocationSizeMin = UINT64_MAX;
    5300  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5301 }
    5302 
    5303 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    5304 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    5305 {
    5306  inoutInfo.blockCount += srcInfo.blockCount;
    5307  inoutInfo.allocationCount += srcInfo.allocationCount;
    5308  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    5309  inoutInfo.usedBytes += srcInfo.usedBytes;
    5310  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    5311  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    5312  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    5313  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    5314  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    5315 }
    5316 
    5317 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    5318 {
    5319  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    5320  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    5321  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    5322  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    5323 }
    5324 
    5325 VmaPool_T::VmaPool_T(
    5326  VmaAllocator hAllocator,
    5327  const VmaPoolCreateInfo& createInfo) :
    5328  m_BlockVector(
    5329  hAllocator,
    5330  createInfo.memoryTypeIndex,
    5331  (createInfo.flags & VMA_POOL_CREATE_PERSISTENT_MAP_BIT) != 0 ?
    5332  VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
    5333  createInfo.blockSize,
    5334  createInfo.minBlockCount,
    5335  createInfo.maxBlockCount,
    5336  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    5337  createInfo.frameInUseCount,
    5338  true) // isCustomPool
    5339 {
    5340 }
    5341 
    5342 VmaPool_T::~VmaPool_T()
    5343 {
    5344 }
    5345 
    5346 #if VMA_STATS_STRING_ENABLED
    5347 
    5348 #endif // #if VMA_STATS_STRING_ENABLED
    5349 
    5350 VmaBlockVector::VmaBlockVector(
    5351  VmaAllocator hAllocator,
    5352  uint32_t memoryTypeIndex,
    5353  VMA_BLOCK_VECTOR_TYPE blockVectorType,
    5354  VkDeviceSize preferredBlockSize,
    5355  size_t minBlockCount,
    5356  size_t maxBlockCount,
    5357  VkDeviceSize bufferImageGranularity,
    5358  uint32_t frameInUseCount,
    5359  bool isCustomPool) :
    5360  m_hAllocator(hAllocator),
    5361  m_MemoryTypeIndex(memoryTypeIndex),
    5362  m_BlockVectorType(blockVectorType),
    5363  m_PreferredBlockSize(preferredBlockSize),
    5364  m_MinBlockCount(minBlockCount),
    5365  m_MaxBlockCount(maxBlockCount),
    5366  m_BufferImageGranularity(bufferImageGranularity),
    5367  m_FrameInUseCount(frameInUseCount),
    5368  m_IsCustomPool(isCustomPool),
    5369  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    5370  m_HasEmptyBlock(false),
    5371  m_pDefragmentator(VMA_NULL)
    5372 {
    5373 }
    5374 
    5375 VmaBlockVector::~VmaBlockVector()
    5376 {
    5377  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    5378 
    5379  for(size_t i = m_Blocks.size(); i--; )
    5380  {
    5381  m_Blocks[i]->Destroy(m_hAllocator);
    5382  vma_delete(m_hAllocator, m_Blocks[i]);
    5383  }
    5384 }
    5385 
    5386 VkResult VmaBlockVector::CreateMinBlocks()
    5387 {
    5388  for(size_t i = 0; i < m_MinBlockCount; ++i)
    5389  {
    5390  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    5391  if(res != VK_SUCCESS)
    5392  {
    5393  return res;
    5394  }
    5395  }
    5396  return VK_SUCCESS;
    5397 }
    5398 
    5399 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    5400 {
    5401  pStats->size = 0;
    5402  pStats->unusedSize = 0;
    5403  pStats->allocationCount = 0;
    5404  pStats->unusedRangeCount = 0;
    5405  pStats->unusedRangeSizeMax = 0;
    5406 
    5407  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5408 
    5409  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5410  {
    5411  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5412  VMA_ASSERT(pBlock);
    5413  VMA_HEAVY_ASSERT(pBlock->Validate());
    5414  pBlock->m_Metadata.AddPoolStats(*pStats);
    5415  }
    5416 }
    5417 
    5418 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    5419 
    5420 VkResult VmaBlockVector::Allocate(
    5421  VmaPool hCurrentPool,
    5422  uint32_t currentFrameIndex,
    5423  const VkMemoryRequirements& vkMemReq,
    5424  const VmaAllocationCreateInfo& createInfo,
    5425  VmaSuballocationType suballocType,
    5426  VmaAllocation* pAllocation)
    5427 {
    5428  // Validate flags.
    5429  if(createInfo.pool != VK_NULL_HANDLE &&
    5430  ((createInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0) != (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED))
    5431  {
    5432  VMA_ASSERT(0 && "Usage of VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT must match VMA_POOL_CREATE_PERSISTENT_MAP_BIT.");
    5433  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5434  }
    5435 
    5436  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5437 
    5438  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    5439  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5440  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5441  {
    5442  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5443  VMA_ASSERT(pCurrBlock);
    5444  VmaAllocationRequest currRequest = {};
    5445  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5446  currentFrameIndex,
    5447  m_FrameInUseCount,
    5448  m_BufferImageGranularity,
    5449  vkMemReq.size,
    5450  vkMemReq.alignment,
    5451  suballocType,
    5452  false, // canMakeOtherLost
    5453  &currRequest))
    5454  {
    5455  // Allocate from pCurrBlock.
    5456  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    5457 
    5458  // We no longer have an empty Allocation.
    5459  if(pCurrBlock->m_Metadata.IsEmpty())
    5460  {
    5461  m_HasEmptyBlock = false;
    5462  }
    5463 
    5464  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5465  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    5466  (*pAllocation)->InitBlockAllocation(
    5467  hCurrentPool,
    5468  pCurrBlock,
    5469  currRequest.offset,
    5470  vkMemReq.alignment,
    5471  vkMemReq.size,
    5472  suballocType,
    5473  createInfo.pUserData,
    5474  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5475  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    5476  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5477  return VK_SUCCESS;
    5478  }
    5479  }
    5480 
    5481  const bool canCreateNewBlock =
    5482  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    5483  (m_Blocks.size() < m_MaxBlockCount);
    5484 
    5485  // 2. Try to create new block.
    5486  if(canCreateNewBlock)
    5487  {
    5488  // 2.1. Start with full preferredBlockSize.
    5489  VkDeviceSize blockSize = m_PreferredBlockSize;
    5490  size_t newBlockIndex = 0;
    5491  VkResult res = CreateBlock(blockSize, &newBlockIndex);
    5492  // Allocating blocks of other sizes is allowed only in default pools.
    5493  // In custom pools block size is fixed.
    5494  if(res < 0 && m_IsCustomPool == false)
    5495  {
    5496  // 2.2. Try half the size.
    5497  blockSize /= 2;
    5498  if(blockSize >= vkMemReq.size)
    5499  {
    5500  res = CreateBlock(blockSize, &newBlockIndex);
    5501  if(res < 0)
    5502  {
    5503  // 2.3. Try quarter the size.
    5504  blockSize /= 2;
    5505  if(blockSize >= vkMemReq.size)
    5506  {
    5507  res = CreateBlock(blockSize, &newBlockIndex);
    5508  }
    5509  }
    5510  }
    5511  }
    5512  if(res == VK_SUCCESS)
    5513  {
    5514  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    5515  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    5516 
    5517  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    5518  VmaAllocationRequest allocRequest;
    5519  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    5520  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5521  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    5522  (*pAllocation)->InitBlockAllocation(
    5523  hCurrentPool,
    5524  pBlock,
    5525  allocRequest.offset,
    5526  vkMemReq.alignment,
    5527  vkMemReq.size,
    5528  suballocType,
    5529  createInfo.pUserData,
    5530  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5531  VMA_HEAVY_ASSERT(pBlock->Validate());
    5532  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    5533 
    5534  return VK_SUCCESS;
    5535  }
    5536  }
    5537 
    5538  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    5539 
    5540  // 3. Try to allocate from existing blocks with making other allocations lost.
    5541  if(canMakeOtherLost)
    5542  {
    5543  uint32_t tryIndex = 0;
    5544  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    5545  {
    5546  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    5547  VmaAllocationRequest bestRequest = {};
    5548  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    5549 
    5550  // 1. Search existing allocations.
    5551  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    5552  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    5553  {
    5554  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    5555  VMA_ASSERT(pCurrBlock);
    5556  VmaAllocationRequest currRequest = {};
    5557  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    5558  currentFrameIndex,
    5559  m_FrameInUseCount,
    5560  m_BufferImageGranularity,
    5561  vkMemReq.size,
    5562  vkMemReq.alignment,
    5563  suballocType,
    5564  canMakeOtherLost,
    5565  &currRequest))
    5566  {
    5567  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    5568  if(pBestRequestBlock == VMA_NULL ||
    5569  currRequestCost < bestRequestCost)
    5570  {
    5571  pBestRequestBlock = pCurrBlock;
    5572  bestRequest = currRequest;
    5573  bestRequestCost = currRequestCost;
    5574 
    5575  if(bestRequestCost == 0)
    5576  {
    5577  break;
    5578  }
    5579  }
    5580  }
    5581  }
    5582 
    5583  if(pBestRequestBlock != VMA_NULL)
    5584  {
    5585  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    5586  currentFrameIndex,
    5587  m_FrameInUseCount,
    5588  &bestRequest))
    5589  {
    5590  // We no longer have an empty Allocation.
    5591  if(pBestRequestBlock->m_Metadata.IsEmpty())
    5592  {
    5593  m_HasEmptyBlock = false;
    5594  }
    5595  // Allocate from this pBlock.
    5596  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex);
    5597  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    5598  (*pAllocation)->InitBlockAllocation(
    5599  hCurrentPool,
    5600  pBestRequestBlock,
    5601  bestRequest.offset,
    5602  vkMemReq.alignment,
    5603  vkMemReq.size,
    5604  suballocType,
    5605  createInfo.pUserData,
    5606  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    5607  VMA_HEAVY_ASSERT(pBlock->Validate());
    5608  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    5609  return VK_SUCCESS;
    5610  }
    5611  // else: Some allocations must have been touched while we are here. Next try.
    5612  }
    5613  else
    5614  {
    5615  // Could not find place in any of the blocks - break outer loop.
    5616  break;
    5617  }
    5618  }
    5619  /* Maximum number of tries exceeded - a very unlike event when many other
    5620  threads are simultaneously touching allocations making it impossible to make
    5621  lost at the same time as we try to allocate. */
    5622  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    5623  {
    5624  return VK_ERROR_TOO_MANY_OBJECTS;
    5625  }
    5626  }
    5627 
    5628  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    5629 }
    5630 
    5631 void VmaBlockVector::Free(
    5632  VmaAllocation hAllocation)
    5633 {
    5634  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    5635 
    5636  // Scope for lock.
    5637  {
    5638  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5639 
    5640  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    5641 
    5642  pBlock->m_Metadata.Free(hAllocation);
    5643  VMA_HEAVY_ASSERT(pBlock->Validate());
    5644 
    5645  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    5646 
    5647  // pBlock became empty after this deallocation.
    5648  if(pBlock->m_Metadata.IsEmpty())
    5649  {
    5650  // Already has empty Allocation. We don't want to have two, so delete this one.
    5651  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    5652  {
    5653  pBlockToDelete = pBlock;
    5654  Remove(pBlock);
    5655  }
    5656  // We now have first empty Allocation.
    5657  else
    5658  {
    5659  m_HasEmptyBlock = true;
    5660  }
    5661  }
    5662  // pBlock didn't become empty, but we have another empty block - find and free that one.
    5663  // (This is optional, heuristics.)
    5664  else if(m_HasEmptyBlock)
    5665  {
    5666  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    5667  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    5668  {
    5669  pBlockToDelete = pLastBlock;
    5670  m_Blocks.pop_back();
    5671  m_HasEmptyBlock = false;
    5672  }
    5673  }
    5674 
    5675  IncrementallySortBlocks();
    5676  }
    5677 
    5678  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    5679  // lock, for performance reason.
    5680  if(pBlockToDelete != VMA_NULL)
    5681  {
    5682  VMA_DEBUG_LOG(" Deleted empty allocation");
    5683  pBlockToDelete->Destroy(m_hAllocator);
    5684  vma_delete(m_hAllocator, pBlockToDelete);
    5685  }
    5686 }
    5687 
    5688 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    5689 {
    5690  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5691  {
    5692  if(m_Blocks[blockIndex] == pBlock)
    5693  {
    5694  VmaVectorRemove(m_Blocks, blockIndex);
    5695  return;
    5696  }
    5697  }
    5698  VMA_ASSERT(0);
    5699 }
    5700 
    5701 void VmaBlockVector::IncrementallySortBlocks()
    5702 {
    5703  // Bubble sort only until first swap.
    5704  for(size_t i = 1; i < m_Blocks.size(); ++i)
    5705  {
    5706  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    5707  {
    5708  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    5709  return;
    5710  }
    5711  }
    5712 }
    5713 
    5714 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    5715 {
    5716  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    5717  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    5718  allocInfo.allocationSize = blockSize;
    5719  VkDeviceMemory mem = VK_NULL_HANDLE;
    5720  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    5721  if(res < 0)
    5722  {
    5723  return res;
    5724  }
    5725 
    5726  // New VkDeviceMemory successfully created.
    5727 
    5728  // Map memory if needed.
    5729  void* pMappedData = VMA_NULL;
    5730  const bool persistentMap = (m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED);
    5731  if(persistentMap && m_hAllocator->m_UnmapPersistentlyMappedMemoryCounter == 0)
    5732  {
    5733  res = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5734  m_hAllocator->m_hDevice,
    5735  mem,
    5736  0,
    5737  VK_WHOLE_SIZE,
    5738  0,
    5739  &pMappedData);
    5740  if(res < 0)
    5741  {
    5742  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    5743  m_hAllocator->FreeVulkanMemory(m_MemoryTypeIndex, blockSize, mem);
    5744  return res;
    5745  }
    5746  }
    5747 
    5748  // Create new Allocation for it.
    5749  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    5750  pBlock->Init(
    5751  m_MemoryTypeIndex,
    5752  (VMA_BLOCK_VECTOR_TYPE)m_BlockVectorType,
    5753  mem,
    5754  allocInfo.allocationSize,
    5755  persistentMap,
    5756  pMappedData);
    5757 
    5758  m_Blocks.push_back(pBlock);
    5759  if(pNewBlockIndex != VMA_NULL)
    5760  {
    5761  *pNewBlockIndex = m_Blocks.size() - 1;
    5762  }
    5763 
    5764  return VK_SUCCESS;
    5765 }
    5766 
    5767 #if VMA_STATS_STRING_ENABLED
    5768 
    5769 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    5770 {
    5771  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5772 
    5773  json.BeginObject();
    5774 
    5775  if(m_IsCustomPool)
    5776  {
    5777  json.WriteString("MemoryTypeIndex");
    5778  json.WriteNumber(m_MemoryTypeIndex);
    5779 
    5780  if(m_BlockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    5781  {
    5782  json.WriteString("Mapped");
    5783  json.WriteBool(true);
    5784  }
    5785 
    5786  json.WriteString("BlockSize");
    5787  json.WriteNumber(m_PreferredBlockSize);
    5788 
    5789  json.WriteString("BlockCount");
    5790  json.BeginObject(true);
    5791  if(m_MinBlockCount > 0)
    5792  {
    5793  json.WriteString("Min");
    5794  json.WriteNumber(m_MinBlockCount);
    5795  }
    5796  if(m_MaxBlockCount < SIZE_MAX)
    5797  {
    5798  json.WriteString("Max");
    5799  json.WriteNumber(m_MaxBlockCount);
    5800  }
    5801  json.WriteString("Cur");
    5802  json.WriteNumber(m_Blocks.size());
    5803  json.EndObject();
    5804 
    5805  if(m_FrameInUseCount > 0)
    5806  {
    5807  json.WriteString("FrameInUseCount");
    5808  json.WriteNumber(m_FrameInUseCount);
    5809  }
    5810  }
    5811  else
    5812  {
    5813  json.WriteString("PreferredBlockSize");
    5814  json.WriteNumber(m_PreferredBlockSize);
    5815  }
    5816 
    5817  json.WriteString("Blocks");
    5818  json.BeginArray();
    5819  for(size_t i = 0; i < m_Blocks.size(); ++i)
    5820  {
    5821  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    5822  }
    5823  json.EndArray();
    5824 
    5825  json.EndObject();
    5826 }
    5827 
    5828 #endif // #if VMA_STATS_STRING_ENABLED
    5829 
    5830 void VmaBlockVector::UnmapPersistentlyMappedMemory()
    5831 {
    5832  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5833 
    5834  for(size_t i = m_Blocks.size(); i--; )
    5835  {
    5836  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5837  if(pBlock->m_pMappedData != VMA_NULL)
    5838  {
    5839  VMA_ASSERT(pBlock->m_PersistentMap != false);
    5840  (m_hAllocator->GetVulkanFunctions().vkUnmapMemory)(m_hAllocator->m_hDevice, pBlock->m_hMemory);
    5841  pBlock->m_pMappedData = VMA_NULL;
    5842  }
    5843  }
    5844 }
    5845 
    5846 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
    5847 {
    5848  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5849 
    5850  VkResult finalResult = VK_SUCCESS;
    5851  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    5852  {
    5853  VmaDeviceMemoryBlock* pBlock = m_Blocks[i];
    5854  if(pBlock->m_PersistentMap)
    5855  {
    5856  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
    5857  VkResult localResult = (*m_hAllocator->GetVulkanFunctions().vkMapMemory)(
    5858  m_hAllocator->m_hDevice,
    5859  pBlock->m_hMemory,
    5860  0,
    5861  VK_WHOLE_SIZE,
    5862  0,
    5863  &pBlock->m_pMappedData);
    5864  if(localResult != VK_SUCCESS)
    5865  {
    5866  finalResult = localResult;
    5867  }
    5868  }
    5869  }
    5870  return finalResult;
    5871 }
    5872 
    5873 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    5874  VmaAllocator hAllocator,
    5875  uint32_t currentFrameIndex)
    5876 {
    5877  if(m_pDefragmentator == VMA_NULL)
    5878  {
    5879  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    5880  hAllocator,
    5881  this,
    5882  currentFrameIndex);
    5883  }
    5884 
    5885  return m_pDefragmentator;
    5886 }
    5887 
    5888 VkResult VmaBlockVector::Defragment(
    5889  VmaDefragmentationStats* pDefragmentationStats,
    5890  VkDeviceSize& maxBytesToMove,
    5891  uint32_t& maxAllocationsToMove)
    5892 {
    5893  if(m_pDefragmentator == VMA_NULL)
    5894  {
    5895  return VK_SUCCESS;
    5896  }
    5897 
    5898  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5899 
    5900  // Defragment.
    5901  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    5902 
    5903  // Accumulate statistics.
    5904  if(pDefragmentationStats != VMA_NULL)
    5905  {
    5906  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    5907  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    5908  pDefragmentationStats->bytesMoved += bytesMoved;
    5909  pDefragmentationStats->allocationsMoved += allocationsMoved;
    5910  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    5911  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    5912  maxBytesToMove -= bytesMoved;
    5913  maxAllocationsToMove -= allocationsMoved;
    5914  }
    5915 
    5916  // Free empty blocks.
    5917  m_HasEmptyBlock = false;
    5918  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    5919  {
    5920  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    5921  if(pBlock->m_Metadata.IsEmpty())
    5922  {
    5923  if(m_Blocks.size() > m_MinBlockCount)
    5924  {
    5925  if(pDefragmentationStats != VMA_NULL)
    5926  {
    5927  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    5928  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    5929  }
    5930 
    5931  VmaVectorRemove(m_Blocks, blockIndex);
    5932  pBlock->Destroy(m_hAllocator);
    5933  vma_delete(m_hAllocator, pBlock);
    5934  }
    5935  else
    5936  {
    5937  m_HasEmptyBlock = true;
    5938  }
    5939  }
    5940  }
    5941 
    5942  return result;
    5943 }
    5944 
    5945 void VmaBlockVector::DestroyDefragmentator()
    5946 {
    5947  if(m_pDefragmentator != VMA_NULL)
    5948  {
    5949  vma_delete(m_hAllocator, m_pDefragmentator);
    5950  m_pDefragmentator = VMA_NULL;
    5951  }
    5952 }
    5953 
    5954 void VmaBlockVector::MakePoolAllocationsLost(
    5955  uint32_t currentFrameIndex,
    5956  size_t* pLostAllocationCount)
    5957 {
    5958  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5959 
    5960  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5961  {
    5962  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5963  VMA_ASSERT(pBlock);
    5964  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    5965  }
    5966 }
    5967 
    5968 void VmaBlockVector::AddStats(VmaStats* pStats)
    5969 {
    5970  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    5971  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    5972 
    5973  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    5974 
    5975  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    5976  {
    5977  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    5978  VMA_ASSERT(pBlock);
    5979  VMA_HEAVY_ASSERT(pBlock->Validate());
    5980  VmaStatInfo allocationStatInfo;
    5981  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    5982  VmaAddStatInfo(pStats->total, allocationStatInfo);
    5983  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    5984  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    5985  }
    5986 }
    5987 
    5989 // VmaDefragmentator members definition
    5990 
    5991 VmaDefragmentator::VmaDefragmentator(
    5992  VmaAllocator hAllocator,
    5993  VmaBlockVector* pBlockVector,
    5994  uint32_t currentFrameIndex) :
    5995  m_hAllocator(hAllocator),
    5996  m_pBlockVector(pBlockVector),
    5997  m_CurrentFrameIndex(currentFrameIndex),
    5998  m_BytesMoved(0),
    5999  m_AllocationsMoved(0),
    6000  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6001  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6002 {
    6003 }
    6004 
    6005 VmaDefragmentator::~VmaDefragmentator()
    6006 {
    6007  for(size_t i = m_Blocks.size(); i--; )
    6008  {
    6009  vma_delete(m_hAllocator, m_Blocks[i]);
    6010  }
    6011 }
    6012 
    6013 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6014 {
    6015  AllocationInfo allocInfo;
    6016  allocInfo.m_hAllocation = hAlloc;
    6017  allocInfo.m_pChanged = pChanged;
    6018  m_Allocations.push_back(allocInfo);
    6019 }
    6020 
    6021 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6022 {
    6023  // It has already been mapped for defragmentation.
    6024  if(m_pMappedDataForDefragmentation)
    6025  {
    6026  *ppMappedData = m_pMappedDataForDefragmentation;
    6027  return VK_SUCCESS;
    6028  }
    6029 
    6030  // It is persistently mapped.
    6031  if(m_pBlock->m_PersistentMap)
    6032  {
    6033  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
    6034  *ppMappedData = m_pBlock->m_pMappedData;
    6035  return VK_SUCCESS;
    6036  }
    6037 
    6038  // Map on first usage.
    6039  VkResult res = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6040  hAllocator->m_hDevice,
    6041  m_pBlock->m_hMemory,
    6042  0,
    6043  VK_WHOLE_SIZE,
    6044  0,
    6045  &m_pMappedDataForDefragmentation);
    6046  *ppMappedData = m_pMappedDataForDefragmentation;
    6047  return res;
    6048 }
    6049 
    6050 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6051 {
    6052  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6053  {
    6054  (hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_pBlock->m_hMemory);
    6055  }
    6056 }
    6057 
    6058 VkResult VmaDefragmentator::DefragmentRound(
    6059  VkDeviceSize maxBytesToMove,
    6060  uint32_t maxAllocationsToMove)
    6061 {
    6062  if(m_Blocks.empty())
    6063  {
    6064  return VK_SUCCESS;
    6065  }
    6066 
    6067  size_t srcBlockIndex = m_Blocks.size() - 1;
    6068  size_t srcAllocIndex = SIZE_MAX;
    6069  for(;;)
    6070  {
    6071  // 1. Find next allocation to move.
    6072  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6073  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6074  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6075  {
    6076  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6077  {
    6078  // Finished: no more allocations to process.
    6079  if(srcBlockIndex == 0)
    6080  {
    6081  return VK_SUCCESS;
    6082  }
    6083  else
    6084  {
    6085  --srcBlockIndex;
    6086  srcAllocIndex = SIZE_MAX;
    6087  }
    6088  }
    6089  else
    6090  {
    6091  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6092  }
    6093  }
    6094 
    6095  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6096  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6097 
    6098  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6099  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6100  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6101  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6102 
    6103  // 2. Try to find new place for this allocation in preceding or current block.
    6104  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6105  {
    6106  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6107  VmaAllocationRequest dstAllocRequest;
    6108  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6109  m_CurrentFrameIndex,
    6110  m_pBlockVector->GetFrameInUseCount(),
    6111  m_pBlockVector->GetBufferImageGranularity(),
    6112  size,
    6113  alignment,
    6114  suballocType,
    6115  false, // canMakeOtherLost
    6116  &dstAllocRequest) &&
    6117  MoveMakesSense(
    6118  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6119  {
    6120  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6121 
    6122  // Reached limit on number of allocations or bytes to move.
    6123  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6124  (m_BytesMoved + size > maxBytesToMove))
    6125  {
    6126  return VK_INCOMPLETE;
    6127  }
    6128 
    6129  void* pDstMappedData = VMA_NULL;
    6130  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6131  if(res != VK_SUCCESS)
    6132  {
    6133  return res;
    6134  }
    6135 
    6136  void* pSrcMappedData = VMA_NULL;
    6137  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6138  if(res != VK_SUCCESS)
    6139  {
    6140  return res;
    6141  }
    6142 
    6143  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6144  memcpy(
    6145  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6146  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6147  static_cast<size_t>(size));
    6148 
    6149  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6150  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
    6151 
    6152  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6153 
    6154  if(allocInfo.m_pChanged != VMA_NULL)
    6155  {
    6156  *allocInfo.m_pChanged = VK_TRUE;
    6157  }
    6158 
    6159  ++m_AllocationsMoved;
    6160  m_BytesMoved += size;
    6161 
    6162  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6163 
    6164  break;
    6165  }
    6166  }
    6167 
    6168  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6169 
    6170  if(srcAllocIndex > 0)
    6171  {
    6172  --srcAllocIndex;
    6173  }
    6174  else
    6175  {
    6176  if(srcBlockIndex > 0)
    6177  {
    6178  --srcBlockIndex;
    6179  srcAllocIndex = SIZE_MAX;
    6180  }
    6181  else
    6182  {
    6183  return VK_SUCCESS;
    6184  }
    6185  }
    6186  }
    6187 }
    6188 
    6189 VkResult VmaDefragmentator::Defragment(
    6190  VkDeviceSize maxBytesToMove,
    6191  uint32_t maxAllocationsToMove)
    6192 {
    6193  if(m_Allocations.empty())
    6194  {
    6195  return VK_SUCCESS;
    6196  }
    6197 
    6198  // Create block info for each block.
    6199  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6200  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6201  {
    6202  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6203  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6204  m_Blocks.push_back(pBlockInfo);
    6205  }
    6206 
    6207  // Sort them by m_pBlock pointer value.
    6208  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6209 
    6210  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6211  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6212  {
    6213  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6214  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6215  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6216  {
    6217  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6218  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6219  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6220  {
    6221  (*it)->m_Allocations.push_back(allocInfo);
    6222  }
    6223  else
    6224  {
    6225  VMA_ASSERT(0);
    6226  }
    6227  }
    6228  }
    6229  m_Allocations.clear();
    6230 
    6231  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6232  {
    6233  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6234  pBlockInfo->CalcHasNonMovableAllocations();
    6235  pBlockInfo->SortAllocationsBySizeDescecnding();
    6236  }
    6237 
    6238  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6239  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6240 
    6241  // Execute defragmentation rounds (the main part).
    6242  VkResult result = VK_SUCCESS;
    6243  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6244  {
    6245  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6246  }
    6247 
    6248  // Unmap blocks that were mapped for defragmentation.
    6249  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6250  {
    6251  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6252  }
    6253 
    6254  return result;
    6255 }
    6256 
    6257 bool VmaDefragmentator::MoveMakesSense(
    6258  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6259  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6260 {
    6261  if(dstBlockIndex < srcBlockIndex)
    6262  {
    6263  return true;
    6264  }
    6265  if(dstBlockIndex > srcBlockIndex)
    6266  {
    6267  return false;
    6268  }
    6269  if(dstOffset < srcOffset)
    6270  {
    6271  return true;
    6272  }
    6273  return false;
    6274 }
    6275 
    6277 // VmaAllocator_T
    6278 
    6279 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    6280  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    6281  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    6282  m_PhysicalDevice(pCreateInfo->physicalDevice),
    6283  m_hDevice(pCreateInfo->device),
    6284  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    6285  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    6286  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    6287  m_UnmapPersistentlyMappedMemoryCounter(0),
    6288  m_PreferredLargeHeapBlockSize(0),
    6289  m_PreferredSmallHeapBlockSize(0),
    6290  m_CurrentFrameIndex(0),
    6291  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    6292 {
    6293  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    6294 
    6295  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    6296  memset(&m_MemProps, 0, sizeof(m_MemProps));
    6297  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    6298 
    6299  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    6300  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    6301 
    6302  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6303  {
    6304  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    6305  }
    6306 
    6307  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    6308  {
    6309  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    6310  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    6311  }
    6312 
    6313  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    6314 
    6315  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    6316  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    6317 
    6318  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    6319  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    6320  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
    6321  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
    6322 
    6323  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    6324  {
    6325  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    6326  {
    6327  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    6328  if(limit != VK_WHOLE_SIZE)
    6329  {
    6330  m_HeapSizeLimit[heapIndex] = limit;
    6331  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    6332  {
    6333  m_MemProps.memoryHeaps[heapIndex].size = limit;
    6334  }
    6335  }
    6336  }
    6337  }
    6338 
    6339  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6340  {
    6341  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    6342 
    6343  for(size_t blockVectorTypeIndex = 0; blockVectorTypeIndex < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorTypeIndex)
    6344  {
    6345  m_pBlockVectors[memTypeIndex][blockVectorTypeIndex] = vma_new(this, VmaBlockVector)(
    6346  this,
    6347  memTypeIndex,
    6348  static_cast<VMA_BLOCK_VECTOR_TYPE>(blockVectorTypeIndex),
    6349  preferredBlockSize,
    6350  0,
    6351  SIZE_MAX,
    6352  GetBufferImageGranularity(),
    6353  pCreateInfo->frameInUseCount,
    6354  false); // isCustomPool
    6355  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    6356  // becase minBlockCount is 0.
    6357  m_pDedicatedAllocations[memTypeIndex][blockVectorTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    6358  }
    6359  }
    6360 }
    6361 
    6362 VmaAllocator_T::~VmaAllocator_T()
    6363 {
    6364  VMA_ASSERT(m_Pools.empty());
    6365 
    6366  for(size_t i = GetMemoryTypeCount(); i--; )
    6367  {
    6368  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
    6369  {
    6370  vma_delete(this, m_pDedicatedAllocations[i][j]);
    6371  vma_delete(this, m_pBlockVectors[i][j]);
    6372  }
    6373  }
    6374 }
    6375 
    6376 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    6377 {
    6378 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6379  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    6380  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    6381  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    6382  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    6383  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    6384  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    6385  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    6386  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    6387  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    6388  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    6389  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    6390  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    6391  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    6392  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    6393  // Ignoring vkGetBufferMemoryRequirements2KHR.
    6394  // Ignoring vkGetImageMemoryRequirements2KHR.
    6395 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    6396 
    6397 #define VMA_COPY_IF_NOT_NULL(funcName) \
    6398  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    6399 
    6400  if(pVulkanFunctions != VMA_NULL)
    6401  {
    6402  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    6403  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    6404  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    6405  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    6406  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    6407  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    6408  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    6409  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    6410  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    6411  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    6412  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    6413  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    6414  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    6415  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    6416  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    6417  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    6418  }
    6419 
    6420 #undef VMA_COPY_IF_NOT_NULL
    6421 
    6422  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    6423  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    6424  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    6425  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    6426  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    6427  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    6428  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    6429  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    6430  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    6431  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    6432  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    6433  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    6434  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    6435  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    6436  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    6437  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    6438  if(m_UseKhrDedicatedAllocation)
    6439  {
    6440  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    6441  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    6442  }
    6443 }
    6444 
    6445 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    6446 {
    6447  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6448  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    6449  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
    6450  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
    6451 }
    6452 
    6453 VkResult VmaAllocator_T::AllocateMemoryOfType(
    6454  const VkMemoryRequirements& vkMemReq,
    6455  bool dedicatedAllocation,
    6456  VkBuffer dedicatedBuffer,
    6457  VkImage dedicatedImage,
    6458  const VmaAllocationCreateInfo& createInfo,
    6459  uint32_t memTypeIndex,
    6460  VmaSuballocationType suballocType,
    6461  VmaAllocation* pAllocation)
    6462 {
    6463  VMA_ASSERT(pAllocation != VMA_NULL);
    6464  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    6465 
    6466  uint32_t blockVectorType = VmaAllocationCreateFlagsToBlockVectorType(createInfo.flags);
    6467  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6468  VMA_ASSERT(blockVector);
    6469 
    6470  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    6471 
    6472  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    6473  bool preferDedicatedMemory =
    6474  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    6475  dedicatedAllocation ||
    6476  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    6477  vkMemReq.size > preferredBlockSize / 2;
    6478 
    6479  if(preferDedicatedMemory &&
    6480  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    6481  finalCreateInfo.pool == VK_NULL_HANDLE)
    6482  {
    6484  }
    6485 
    6486  // If memory type is not HOST_VISIBLE, disable PERSISTENT_MAP.
    6487  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0 &&
    6488  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    6489  {
    6490  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
    6491  }
    6492 
    6493  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    6494  {
    6495  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6496  {
    6497  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6498  }
    6499  else
    6500  {
    6501  return AllocateDedicatedMemory(
    6502  vkMemReq.size,
    6503  suballocType,
    6504  memTypeIndex,
    6505  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6506  finalCreateInfo.pUserData,
    6507  dedicatedBuffer,
    6508  dedicatedImage,
    6509  pAllocation);
    6510  }
    6511  }
    6512  else
    6513  {
    6514  VkResult res = blockVector->Allocate(
    6515  VK_NULL_HANDLE, // hCurrentPool
    6516  m_CurrentFrameIndex.load(),
    6517  vkMemReq,
    6518  finalCreateInfo,
    6519  suballocType,
    6520  pAllocation);
    6521  if(res == VK_SUCCESS)
    6522  {
    6523  return res;
    6524  }
    6525 
    6526  // 5. Try dedicated memory.
    6527  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6528  {
    6529  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6530  }
    6531  else
    6532  {
    6533  res = AllocateDedicatedMemory(
    6534  vkMemReq.size,
    6535  suballocType,
    6536  memTypeIndex,
    6537  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT) != 0,
    6538  finalCreateInfo.pUserData,
    6539  dedicatedBuffer,
    6540  dedicatedImage,
    6541  pAllocation);
    6542  if(res == VK_SUCCESS)
    6543  {
    6544  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    6545  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    6546  return VK_SUCCESS;
    6547  }
    6548  else
    6549  {
    6550  // Everything failed: Return error code.
    6551  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6552  return res;
    6553  }
    6554  }
    6555  }
    6556 }
    6557 
    6558 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    6559  VkDeviceSize size,
    6560  VmaSuballocationType suballocType,
    6561  uint32_t memTypeIndex,
    6562  bool map,
    6563  void* pUserData,
    6564  VkBuffer dedicatedBuffer,
    6565  VkImage dedicatedImage,
    6566  VmaAllocation* pAllocation)
    6567 {
    6568  VMA_ASSERT(pAllocation);
    6569 
    6570  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6571  allocInfo.memoryTypeIndex = memTypeIndex;
    6572  allocInfo.allocationSize = size;
    6573 
    6574  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    6575  if(m_UseKhrDedicatedAllocation)
    6576  {
    6577  if(dedicatedBuffer != VK_NULL_HANDLE)
    6578  {
    6579  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    6580  dedicatedAllocInfo.buffer = dedicatedBuffer;
    6581  allocInfo.pNext = &dedicatedAllocInfo;
    6582  }
    6583  else if(dedicatedImage != VK_NULL_HANDLE)
    6584  {
    6585  dedicatedAllocInfo.image = dedicatedImage;
    6586  allocInfo.pNext = &dedicatedAllocInfo;
    6587  }
    6588  }
    6589 
    6590  // Allocate VkDeviceMemory.
    6591  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    6592  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    6593  if(res < 0)
    6594  {
    6595  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    6596  return res;
    6597  }
    6598 
    6599  void* pMappedData = nullptr;
    6600  if(map)
    6601  {
    6602  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
    6603  {
    6604  res = (*m_VulkanFunctions.vkMapMemory)(
    6605  m_hDevice,
    6606  hMemory,
    6607  0,
    6608  VK_WHOLE_SIZE,
    6609  0,
    6610  &pMappedData);
    6611  if(res < 0)
    6612  {
    6613  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    6614  FreeVulkanMemory(memTypeIndex, size, hMemory);
    6615  return res;
    6616  }
    6617  }
    6618  }
    6619 
    6620  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load());
    6621  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
    6622 
    6623  // Register it in m_pDedicatedAllocations.
    6624  {
    6625  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6626  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
    6627  VMA_ASSERT(pDedicatedAllocations);
    6628  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    6629  }
    6630 
    6631  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    6632 
    6633  return VK_SUCCESS;
    6634 }
    6635 
    6636 void VmaAllocator_T::GetBufferMemoryRequirements(
    6637  VkBuffer hBuffer,
    6638  VkMemoryRequirements& memReq,
    6639  bool& requiresDedicatedAllocation,
    6640  bool& prefersDedicatedAllocation) const
    6641 {
    6642  if(m_UseKhrDedicatedAllocation)
    6643  {
    6644  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6645  memReqInfo.buffer = hBuffer;
    6646 
    6647  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6648 
    6649  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6650  memReq2.pNext = &memDedicatedReq;
    6651 
    6652  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6653 
    6654  memReq = memReq2.memoryRequirements;
    6655  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6656  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6657  }
    6658  else
    6659  {
    6660  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    6661  requiresDedicatedAllocation = false;
    6662  prefersDedicatedAllocation = false;
    6663  }
    6664 }
    6665 
    6666 void VmaAllocator_T::GetImageMemoryRequirements(
    6667  VkImage hImage,
    6668  VkMemoryRequirements& memReq,
    6669  bool& requiresDedicatedAllocation,
    6670  bool& prefersDedicatedAllocation) const
    6671 {
    6672  if(m_UseKhrDedicatedAllocation)
    6673  {
    6674  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    6675  memReqInfo.image = hImage;
    6676 
    6677  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    6678 
    6679  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    6680  memReq2.pNext = &memDedicatedReq;
    6681 
    6682  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    6683 
    6684  memReq = memReq2.memoryRequirements;
    6685  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    6686  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    6687  }
    6688  else
    6689  {
    6690  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    6691  requiresDedicatedAllocation = false;
    6692  prefersDedicatedAllocation = false;
    6693  }
    6694 }
    6695 
    6696 VkResult VmaAllocator_T::AllocateMemory(
    6697  const VkMemoryRequirements& vkMemReq,
    6698  bool requiresDedicatedAllocation,
    6699  bool prefersDedicatedAllocation,
    6700  VkBuffer dedicatedBuffer,
    6701  VkImage dedicatedImage,
    6702  const VmaAllocationCreateInfo& createInfo,
    6703  VmaSuballocationType suballocType,
    6704  VmaAllocation* pAllocation)
    6705 {
    6706  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    6707  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6708  {
    6709  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    6710  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6711  }
    6712  if(requiresDedicatedAllocation)
    6713  {
    6714  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    6715  {
    6716  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    6717  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6718  }
    6719  if(createInfo.pool != VK_NULL_HANDLE)
    6720  {
    6721  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    6722  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6723  }
    6724  }
    6725  if((createInfo.pool != VK_NULL_HANDLE) &&
    6726  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    6727  {
    6728  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    6729  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6730  }
    6731 
    6732  if(createInfo.pool != VK_NULL_HANDLE)
    6733  {
    6734  return createInfo.pool->m_BlockVector.Allocate(
    6735  createInfo.pool,
    6736  m_CurrentFrameIndex.load(),
    6737  vkMemReq,
    6738  createInfo,
    6739  suballocType,
    6740  pAllocation);
    6741  }
    6742  else
    6743  {
    6744  // Bit mask of memory Vulkan types acceptable for this allocation.
    6745  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    6746  uint32_t memTypeIndex = UINT32_MAX;
    6747  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6748  if(res == VK_SUCCESS)
    6749  {
    6750  res = AllocateMemoryOfType(
    6751  vkMemReq,
    6752  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6753  dedicatedBuffer,
    6754  dedicatedImage,
    6755  createInfo,
    6756  memTypeIndex,
    6757  suballocType,
    6758  pAllocation);
    6759  // Succeeded on first try.
    6760  if(res == VK_SUCCESS)
    6761  {
    6762  return res;
    6763  }
    6764  // Allocation from this memory type failed. Try other compatible memory types.
    6765  else
    6766  {
    6767  for(;;)
    6768  {
    6769  // Remove old memTypeIndex from list of possibilities.
    6770  memoryTypeBits &= ~(1u << memTypeIndex);
    6771  // Find alternative memTypeIndex.
    6772  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    6773  if(res == VK_SUCCESS)
    6774  {
    6775  res = AllocateMemoryOfType(
    6776  vkMemReq,
    6777  requiresDedicatedAllocation || prefersDedicatedAllocation,
    6778  dedicatedBuffer,
    6779  dedicatedImage,
    6780  createInfo,
    6781  memTypeIndex,
    6782  suballocType,
    6783  pAllocation);
    6784  // Allocation from this alternative memory type succeeded.
    6785  if(res == VK_SUCCESS)
    6786  {
    6787  return res;
    6788  }
    6789  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    6790  }
    6791  // No other matching memory type index could be found.
    6792  else
    6793  {
    6794  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    6795  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6796  }
    6797  }
    6798  }
    6799  }
    6800  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    6801  else
    6802  return res;
    6803  }
    6804 }
    6805 
    6806 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    6807 {
    6808  VMA_ASSERT(allocation);
    6809 
    6810  if(allocation->CanBecomeLost() == false ||
    6811  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6812  {
    6813  switch(allocation->GetType())
    6814  {
    6815  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    6816  {
    6817  VmaBlockVector* pBlockVector = VMA_NULL;
    6818  VmaPool hPool = allocation->GetPool();
    6819  if(hPool != VK_NULL_HANDLE)
    6820  {
    6821  pBlockVector = &hPool->m_BlockVector;
    6822  }
    6823  else
    6824  {
    6825  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    6826  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
    6827  pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6828  }
    6829  pBlockVector->Free(allocation);
    6830  }
    6831  break;
    6832  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    6833  FreeDedicatedMemory(allocation);
    6834  break;
    6835  default:
    6836  VMA_ASSERT(0);
    6837  }
    6838  }
    6839 
    6840  vma_delete(this, allocation);
    6841 }
    6842 
    6843 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    6844 {
    6845  // Initialize.
    6846  InitStatInfo(pStats->total);
    6847  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    6848  InitStatInfo(pStats->memoryType[i]);
    6849  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    6850  InitStatInfo(pStats->memoryHeap[i]);
    6851 
    6852  // Process default pools.
    6853  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6854  {
    6855  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6856  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6857  {
    6858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
    6859  VMA_ASSERT(pBlockVector);
    6860  pBlockVector->AddStats(pStats);
    6861  }
    6862  }
    6863 
    6864  // Process custom pools.
    6865  {
    6866  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6867  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6868  {
    6869  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    6870  }
    6871  }
    6872 
    6873  // Process dedicated allocations.
    6874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    6875  {
    6876  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    6877  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6878  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    6879  {
    6880  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    6881  VMA_ASSERT(pDedicatedAllocVector);
    6882  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    6883  {
    6884  VmaStatInfo allocationStatInfo;
    6885  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    6886  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6887  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6888  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6889  }
    6890  }
    6891  }
    6892 
    6893  // Postprocess.
    6894  VmaPostprocessCalcStatInfo(pStats->total);
    6895  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    6896  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    6897  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    6898  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    6899 }
    6900 
    6901 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    6902 
    6903 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
    6904 {
    6905  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
    6906  {
    6907  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6908  {
    6909  for(uint32_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
    6910  {
    6911  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6912  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6913  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6914  {
    6915  // Process DedicatedAllocations.
    6916  {
    6917  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6918  AllocationVectorType* pDedicatedAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6919  for(size_t dedicatedAllocIndex = pDedicatedAllocationsVector->size(); dedicatedAllocIndex--; )
    6920  {
    6921  VmaAllocation hAlloc = (*pDedicatedAllocationsVector)[dedicatedAllocIndex];
    6922  hAlloc->DedicatedAllocUnmapPersistentlyMappedMemory(this);
    6923  }
    6924  }
    6925 
    6926  // Process normal Allocations.
    6927  {
    6928  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6929  pBlockVector->UnmapPersistentlyMappedMemory();
    6930  }
    6931  }
    6932  }
    6933 
    6934  // Process custom pools.
    6935  {
    6936  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6937  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6938  {
    6939  m_Pools[poolIndex]->GetBlockVector().UnmapPersistentlyMappedMemory();
    6940  }
    6941  }
    6942  }
    6943  }
    6944 }
    6945 
    6946 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
    6947 {
    6948  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
    6949  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
    6950  {
    6951  VkResult finalResult = VK_SUCCESS;
    6952  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
    6953  {
    6954  // Process custom pools.
    6955  {
    6956  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    6957  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    6958  {
    6959  m_Pools[poolIndex]->GetBlockVector().MapPersistentlyMappedMemory();
    6960  }
    6961  }
    6962 
    6963  for(uint32_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
    6964  {
    6965  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    6966  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
    6967  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    6968  {
    6969  // Process DedicatedAllocations.
    6970  {
    6971  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    6972  AllocationVectorType* pAllocationsVector = m_pDedicatedAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6973  for(size_t dedicatedAllocIndex = 0, dedicatedAllocCount = pAllocationsVector->size(); dedicatedAllocIndex < dedicatedAllocCount; ++dedicatedAllocIndex)
    6974  {
    6975  VmaAllocation hAlloc = (*pAllocationsVector)[dedicatedAllocIndex];
    6976  hAlloc->DedicatedAllocMapPersistentlyMappedMemory(this);
    6977  }
    6978  }
    6979 
    6980  // Process normal Allocations.
    6981  {
    6982  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
    6983  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
    6984  if(localResult != VK_SUCCESS)
    6985  {
    6986  finalResult = localResult;
    6987  }
    6988  }
    6989  }
    6990  }
    6991  }
    6992  return finalResult;
    6993  }
    6994  else
    6995  return VK_SUCCESS;
    6996 }
    6997 
    6998 VkResult VmaAllocator_T::Defragment(
    6999  VmaAllocation* pAllocations,
    7000  size_t allocationCount,
    7001  VkBool32* pAllocationsChanged,
    7002  const VmaDefragmentationInfo* pDefragmentationInfo,
    7003  VmaDefragmentationStats* pDefragmentationStats)
    7004 {
    7005  if(pAllocationsChanged != VMA_NULL)
    7006  {
    7007  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    7008  }
    7009  if(pDefragmentationStats != VMA_NULL)
    7010  {
    7011  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    7012  }
    7013 
    7014  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
    7015  {
    7016  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
    7017  return VK_ERROR_MEMORY_MAP_FAILED;
    7018  }
    7019 
    7020  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    7021 
    7022  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    7023 
    7024  const size_t poolCount = m_Pools.size();
    7025 
    7026  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    7027  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    7028  {
    7029  VmaAllocation hAlloc = pAllocations[allocIndex];
    7030  VMA_ASSERT(hAlloc);
    7031  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7032  // DedicatedAlloc cannot be defragmented.
    7033  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7034  // Only HOST_VISIBLE memory types can be defragmented.
    7035  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7036  // Lost allocation cannot be defragmented.
    7037  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7038  {
    7039  VmaBlockVector* pAllocBlockVector = nullptr;
    7040 
    7041  const VmaPool hAllocPool = hAlloc->GetPool();
    7042  // This allocation belongs to custom pool.
    7043  if(hAllocPool != VK_NULL_HANDLE)
    7044  {
    7045  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7046  }
    7047  // This allocation belongs to general pool.
    7048  else
    7049  {
    7050  pAllocBlockVector = m_pBlockVectors[memTypeIndex][hAlloc->GetBlockVectorType()];
    7051  }
    7052 
    7053  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7054 
    7055  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7056  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7057  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7058  }
    7059  }
    7060 
    7061  VkResult result = VK_SUCCESS;
    7062 
    7063  // ======== Main processing.
    7064 
    7065  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7066  uint32_t maxAllocationsToMove = UINT32_MAX;
    7067  if(pDefragmentationInfo != VMA_NULL)
    7068  {
    7069  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7070  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7071  }
    7072 
    7073  // Process standard memory.
    7074  for(uint32_t memTypeIndex = 0;
    7075  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7076  ++memTypeIndex)
    7077  {
    7078  // Only HOST_VISIBLE memory types can be defragmented.
    7079  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7080  {
    7081  for(uint32_t blockVectorType = 0;
    7082  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
    7083  ++blockVectorType)
    7084  {
    7085  result = m_pBlockVectors[memTypeIndex][blockVectorType]->Defragment(
    7086  pDefragmentationStats,
    7087  maxBytesToMove,
    7088  maxAllocationsToMove);
    7089  }
    7090  }
    7091  }
    7092 
    7093  // Process custom pools.
    7094  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7095  {
    7096  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7097  pDefragmentationStats,
    7098  maxBytesToMove,
    7099  maxAllocationsToMove);
    7100  }
    7101 
    7102  // ======== Destroy defragmentators.
    7103 
    7104  // Process custom pools.
    7105  for(size_t poolIndex = poolCount; poolIndex--; )
    7106  {
    7107  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7108  }
    7109 
    7110  // Process standard memory.
    7111  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7112  {
    7113  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7114  {
    7115  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
    7116  {
    7117  m_pBlockVectors[memTypeIndex][blockVectorType]->DestroyDefragmentator();
    7118  }
    7119  }
    7120  }
    7121 
    7122  return result;
    7123 }
    7124 
    7125 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7126 {
    7127  if(hAllocation->CanBecomeLost())
    7128  {
    7129  /*
    7130  Warning: This is a carefully designed algorithm.
    7131  Do not modify unless you really know what you're doing :)
    7132  */
    7133  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7134  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7135  for(;;)
    7136  {
    7137  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7138  {
    7139  pAllocationInfo->memoryType = UINT32_MAX;
    7140  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7141  pAllocationInfo->offset = 0;
    7142  pAllocationInfo->size = hAllocation->GetSize();
    7143  pAllocationInfo->pMappedData = VMA_NULL;
    7144  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7145  return;
    7146  }
    7147  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7148  {
    7149  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7150  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7151  pAllocationInfo->offset = hAllocation->GetOffset();
    7152  pAllocationInfo->size = hAllocation->GetSize();
    7153  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7154  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7155  return;
    7156  }
    7157  else // Last use time earlier than current time.
    7158  {
    7159  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7160  {
    7161  localLastUseFrameIndex = localCurrFrameIndex;
    7162  }
    7163  }
    7164  }
    7165  }
    7166  // We could use the same code here, but for performance reasons we don't need to use the hAllocation.LastUseFrameIndex atomic.
    7167  else
    7168  {
    7169  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7170  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7171  pAllocationInfo->offset = hAllocation->GetOffset();
    7172  pAllocationInfo->size = hAllocation->GetSize();
    7173  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7174  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7175  }
    7176 }
    7177 
    7178 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7179 {
    7180  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7181 
    7182  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7183 
    7184  if(newCreateInfo.maxBlockCount == 0)
    7185  {
    7186  newCreateInfo.maxBlockCount = SIZE_MAX;
    7187  }
    7188  if(newCreateInfo.blockSize == 0)
    7189  {
    7190  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7191  }
    7192 
    7193  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7194 
    7195  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7196  if(res != VK_SUCCESS)
    7197  {
    7198  vma_delete(this, *pPool);
    7199  *pPool = VMA_NULL;
    7200  return res;
    7201  }
    7202 
    7203  // Add to m_Pools.
    7204  {
    7205  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7206  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7207  }
    7208 
    7209  return VK_SUCCESS;
    7210 }
    7211 
    7212 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7213 {
    7214  // Remove from m_Pools.
    7215  {
    7216  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7217  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7218  VMA_ASSERT(success && "Pool not found in Allocator.");
    7219  }
    7220 
    7221  vma_delete(this, pool);
    7222 }
    7223 
    7224 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7225 {
    7226  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7227 }
    7228 
    7229 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7230 {
    7231  m_CurrentFrameIndex.store(frameIndex);
    7232 }
    7233 
    7234 void VmaAllocator_T::MakePoolAllocationsLost(
    7235  VmaPool hPool,
    7236  size_t* pLostAllocationCount)
    7237 {
    7238  hPool->m_BlockVector.MakePoolAllocationsLost(
    7239  m_CurrentFrameIndex.load(),
    7240  pLostAllocationCount);
    7241 }
    7242 
    7243 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7244 {
    7245  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST);
    7246  (*pAllocation)->InitLost();
    7247 }
    7248 
    7249 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7250 {
    7251  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7252 
    7253  VkResult res;
    7254  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7255  {
    7256  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7257  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7258  {
    7259  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7260  if(res == VK_SUCCESS)
    7261  {
    7262  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7263  }
    7264  }
    7265  else
    7266  {
    7267  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7268  }
    7269  }
    7270  else
    7271  {
    7272  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7273  }
    7274 
    7275  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7276  {
    7277  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7278  }
    7279 
    7280  return res;
    7281 }
    7282 
    7283 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7284 {
    7285  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7286  {
    7287  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7288  }
    7289 
    7290  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7291 
    7292  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7293  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7294  {
    7295  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7296  m_HeapSizeLimit[heapIndex] += size;
    7297  }
    7298 }
    7299 
    7300 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7301 {
    7302  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7303 
    7304  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7305  {
    7306  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7307  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex][allocation->GetBlockVectorType()];
    7308  VMA_ASSERT(pDedicatedAllocations);
    7309  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    7310  VMA_ASSERT(success);
    7311  }
    7312 
    7313  VkDeviceMemory hMemory = allocation->GetMemory();
    7314 
    7315  if(allocation->GetMappedData() != VMA_NULL)
    7316  {
    7317  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    7318  }
    7319 
    7320  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    7321 
    7322  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    7323 }
    7324 
    7325 #if VMA_STATS_STRING_ENABLED
    7326 
    7327 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    7328 {
    7329  bool dedicatedAllocationsStarted = false;
    7330  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7331  {
    7332  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7333  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7334  {
    7335  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex][blockVectorType];
    7336  VMA_ASSERT(pDedicatedAllocVector);
    7337  if(pDedicatedAllocVector->empty() == false)
    7338  {
    7339  if(dedicatedAllocationsStarted == false)
    7340  {
    7341  dedicatedAllocationsStarted = true;
    7342  json.WriteString("DedicatedAllocations");
    7343  json.BeginObject();
    7344  }
    7345 
    7346  json.BeginString("Type ");
    7347  json.ContinueString(memTypeIndex);
    7348  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7349  {
    7350  json.ContinueString(" Mapped");
    7351  }
    7352  json.EndString();
    7353 
    7354  json.BeginArray();
    7355 
    7356  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    7357  {
    7358  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    7359  json.BeginObject(true);
    7360 
    7361  json.WriteString("Size");
    7362  json.WriteNumber(hAlloc->GetSize());
    7363 
    7364  json.WriteString("Type");
    7365  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    7366 
    7367  json.EndObject();
    7368  }
    7369 
    7370  json.EndArray();
    7371  }
    7372  }
    7373  }
    7374  if(dedicatedAllocationsStarted)
    7375  {
    7376  json.EndObject();
    7377  }
    7378 
    7379  {
    7380  bool allocationsStarted = false;
    7381  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7382  {
    7383  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
    7384  {
    7385  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
    7386  {
    7387  if(allocationsStarted == false)
    7388  {
    7389  allocationsStarted = true;
    7390  json.WriteString("DefaultPools");
    7391  json.BeginObject();
    7392  }
    7393 
    7394  json.BeginString("Type ");
    7395  json.ContinueString(memTypeIndex);
    7396  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
    7397  {
    7398  json.ContinueString(" Mapped");
    7399  }
    7400  json.EndString();
    7401 
    7402  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(json);
    7403  }
    7404  }
    7405  }
    7406  if(allocationsStarted)
    7407  {
    7408  json.EndObject();
    7409  }
    7410  }
    7411 
    7412  {
    7413  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7414  const size_t poolCount = m_Pools.size();
    7415  if(poolCount > 0)
    7416  {
    7417  json.WriteString("Pools");
    7418  json.BeginArray();
    7419  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    7420  {
    7421  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    7422  }
    7423  json.EndArray();
    7424  }
    7425  }
    7426 }
    7427 
    7428 #endif // #if VMA_STATS_STRING_ENABLED
    7429 
    7430 static VkResult AllocateMemoryForImage(
    7431  VmaAllocator allocator,
    7432  VkImage image,
    7433  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7434  VmaSuballocationType suballocType,
    7435  VmaAllocation* pAllocation)
    7436 {
    7437  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    7438 
    7439  VkMemoryRequirements vkMemReq = {};
    7440  bool requiresDedicatedAllocation = false;
    7441  bool prefersDedicatedAllocation = false;
    7442  allocator->GetImageMemoryRequirements(image, vkMemReq,
    7443  requiresDedicatedAllocation, prefersDedicatedAllocation);
    7444 
    7445  return allocator->AllocateMemory(
    7446  vkMemReq,
    7447  requiresDedicatedAllocation,
    7448  prefersDedicatedAllocation,
    7449  VK_NULL_HANDLE, // dedicatedBuffer
    7450  image, // dedicatedImage
    7451  *pAllocationCreateInfo,
    7452  suballocType,
    7453  pAllocation);
    7454 }
    7455 
    7457 // Public interface
    7458 
    7459 VkResult vmaCreateAllocator(
    7460  const VmaAllocatorCreateInfo* pCreateInfo,
    7461  VmaAllocator* pAllocator)
    7462 {
    7463  VMA_ASSERT(pCreateInfo && pAllocator);
    7464  VMA_DEBUG_LOG("vmaCreateAllocator");
    7465  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    7466  return VK_SUCCESS;
    7467 }
    7468 
    7469 void vmaDestroyAllocator(
    7470  VmaAllocator allocator)
    7471 {
    7472  if(allocator != VK_NULL_HANDLE)
    7473  {
    7474  VMA_DEBUG_LOG("vmaDestroyAllocator");
    7475  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    7476  vma_delete(&allocationCallbacks, allocator);
    7477  }
    7478 }
    7479 
    7481  VmaAllocator allocator,
    7482  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    7483 {
    7484  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    7485  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    7486 }
    7487 
    7489  VmaAllocator allocator,
    7490  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    7491 {
    7492  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    7493  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    7494 }
    7495 
    7497  VmaAllocator allocator,
    7498  uint32_t memoryTypeIndex,
    7499  VkMemoryPropertyFlags* pFlags)
    7500 {
    7501  VMA_ASSERT(allocator && pFlags);
    7502  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    7503  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    7504 }
    7505 
    7507  VmaAllocator allocator,
    7508  uint32_t frameIndex)
    7509 {
    7510  VMA_ASSERT(allocator);
    7511  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    7512 
    7513  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7514 
    7515  allocator->SetCurrentFrameIndex(frameIndex);
    7516 }
    7517 
    7518 void vmaCalculateStats(
    7519  VmaAllocator allocator,
    7520  VmaStats* pStats)
    7521 {
    7522  VMA_ASSERT(allocator && pStats);
    7523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7524  allocator->CalculateStats(pStats);
    7525 }
    7526 
    7527 #if VMA_STATS_STRING_ENABLED
    7528 
    7529 void vmaBuildStatsString(
    7530  VmaAllocator allocator,
    7531  char** ppStatsString,
    7532  VkBool32 detailedMap)
    7533 {
    7534  VMA_ASSERT(allocator && ppStatsString);
    7535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7536 
    7537  VmaStringBuilder sb(allocator);
    7538  {
    7539  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    7540  json.BeginObject();
    7541 
    7542  VmaStats stats;
    7543  allocator->CalculateStats(&stats);
    7544 
    7545  json.WriteString("Total");
    7546  VmaPrintStatInfo(json, stats.total);
    7547 
    7548  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    7549  {
    7550  json.BeginString("Heap ");
    7551  json.ContinueString(heapIndex);
    7552  json.EndString();
    7553  json.BeginObject();
    7554 
    7555  json.WriteString("Size");
    7556  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    7557 
    7558  json.WriteString("Flags");
    7559  json.BeginArray(true);
    7560  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    7561  {
    7562  json.WriteString("DEVICE_LOCAL");
    7563  }
    7564  json.EndArray();
    7565 
    7566  if(stats.memoryHeap[heapIndex].blockCount > 0)
    7567  {
    7568  json.WriteString("Stats");
    7569  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    7570  }
    7571 
    7572  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    7573  {
    7574  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    7575  {
    7576  json.BeginString("Type ");
    7577  json.ContinueString(typeIndex);
    7578  json.EndString();
    7579 
    7580  json.BeginObject();
    7581 
    7582  json.WriteString("Flags");
    7583  json.BeginArray(true);
    7584  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    7585  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    7586  {
    7587  json.WriteString("DEVICE_LOCAL");
    7588  }
    7589  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7590  {
    7591  json.WriteString("HOST_VISIBLE");
    7592  }
    7593  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    7594  {
    7595  json.WriteString("HOST_COHERENT");
    7596  }
    7597  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    7598  {
    7599  json.WriteString("HOST_CACHED");
    7600  }
    7601  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    7602  {
    7603  json.WriteString("LAZILY_ALLOCATED");
    7604  }
    7605  json.EndArray();
    7606 
    7607  if(stats.memoryType[typeIndex].blockCount > 0)
    7608  {
    7609  json.WriteString("Stats");
    7610  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    7611  }
    7612 
    7613  json.EndObject();
    7614  }
    7615  }
    7616 
    7617  json.EndObject();
    7618  }
    7619  if(detailedMap == VK_TRUE)
    7620  {
    7621  allocator->PrintDetailedMap(json);
    7622  }
    7623 
    7624  json.EndObject();
    7625  }
    7626 
    7627  const size_t len = sb.GetLength();
    7628  char* const pChars = vma_new_array(allocator, char, len + 1);
    7629  if(len > 0)
    7630  {
    7631  memcpy(pChars, sb.GetData(), len);
    7632  }
    7633  pChars[len] = '\0';
    7634  *ppStatsString = pChars;
    7635 }
    7636 
    7637 void vmaFreeStatsString(
    7638  VmaAllocator allocator,
    7639  char* pStatsString)
    7640 {
    7641  if(pStatsString != VMA_NULL)
    7642  {
    7643  VMA_ASSERT(allocator);
    7644  size_t len = strlen(pStatsString);
    7645  vma_delete_array(allocator, pStatsString, len + 1);
    7646  }
    7647 }
    7648 
    7649 #endif // #if VMA_STATS_STRING_ENABLED
    7650 
    7653 VkResult vmaFindMemoryTypeIndex(
    7654  VmaAllocator allocator,
    7655  uint32_t memoryTypeBits,
    7656  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7657  uint32_t* pMemoryTypeIndex)
    7658 {
    7659  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    7660  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    7661  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    7662 
    7663  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    7664  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    7665  if(preferredFlags == 0)
    7666  {
    7667  preferredFlags = requiredFlags;
    7668  }
    7669  // preferredFlags, if not 0, must be a superset of requiredFlags.
    7670  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
    7671 
    7672  // Convert usage to requiredFlags and preferredFlags.
    7673  switch(pAllocationCreateInfo->usage)
    7674  {
    7676  break;
    7678  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7679  break;
    7681  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    7682  break;
    7684  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7685  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    7686  break;
    7688  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    7689  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    7690  break;
    7691  default:
    7692  break;
    7693  }
    7694 
    7695  *pMemoryTypeIndex = UINT32_MAX;
    7696  uint32_t minCost = UINT32_MAX;
    7697  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    7698  memTypeIndex < allocator->GetMemoryTypeCount();
    7699  ++memTypeIndex, memTypeBit <<= 1)
    7700  {
    7701  // This memory type is acceptable according to memoryTypeBits bitmask.
    7702  if((memTypeBit & memoryTypeBits) != 0)
    7703  {
    7704  const VkMemoryPropertyFlags currFlags =
    7705  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    7706  // This memory type contains requiredFlags.
    7707  if((requiredFlags & ~currFlags) == 0)
    7708  {
    7709  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    7710  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
    7711  // Remember memory type with lowest cost.
    7712  if(currCost < minCost)
    7713  {
    7714  *pMemoryTypeIndex = memTypeIndex;
    7715  if(currCost == 0)
    7716  {
    7717  return VK_SUCCESS;
    7718  }
    7719  minCost = currCost;
    7720  }
    7721  }
    7722  }
    7723  }
    7724  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    7725 }
    7726 
    7727 VkResult vmaCreatePool(
    7728  VmaAllocator allocator,
    7729  const VmaPoolCreateInfo* pCreateInfo,
    7730  VmaPool* pPool)
    7731 {
    7732  VMA_ASSERT(allocator && pCreateInfo && pPool);
    7733 
    7734  VMA_DEBUG_LOG("vmaCreatePool");
    7735 
    7736  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7737 
    7738  return allocator->CreatePool(pCreateInfo, pPool);
    7739 }
    7740 
    7741 void vmaDestroyPool(
    7742  VmaAllocator allocator,
    7743  VmaPool pool)
    7744 {
    7745  VMA_ASSERT(allocator && pool);
    7746 
    7747  VMA_DEBUG_LOG("vmaDestroyPool");
    7748 
    7749  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7750 
    7751  allocator->DestroyPool(pool);
    7752 }
    7753 
    7754 void vmaGetPoolStats(
    7755  VmaAllocator allocator,
    7756  VmaPool pool,
    7757  VmaPoolStats* pPoolStats)
    7758 {
    7759  VMA_ASSERT(allocator && pool && pPoolStats);
    7760 
    7761  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7762 
    7763  allocator->GetPoolStats(pool, pPoolStats);
    7764 }
    7765 
    7767  VmaAllocator allocator,
    7768  VmaPool pool,
    7769  size_t* pLostAllocationCount)
    7770 {
    7771  VMA_ASSERT(allocator && pool);
    7772 
    7773  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7774 
    7775  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    7776 }
    7777 
    7778 VkResult vmaAllocateMemory(
    7779  VmaAllocator allocator,
    7780  const VkMemoryRequirements* pVkMemoryRequirements,
    7781  const VmaAllocationCreateInfo* pCreateInfo,
    7782  VmaAllocation* pAllocation,
    7783  VmaAllocationInfo* pAllocationInfo)
    7784 {
    7785  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    7786 
    7787  VMA_DEBUG_LOG("vmaAllocateMemory");
    7788 
    7789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7790 
    7791  VkResult result = allocator->AllocateMemory(
    7792  *pVkMemoryRequirements,
    7793  false, // requiresDedicatedAllocation
    7794  false, // prefersDedicatedAllocation
    7795  VK_NULL_HANDLE, // dedicatedBuffer
    7796  VK_NULL_HANDLE, // dedicatedImage
    7797  *pCreateInfo,
    7798  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    7799  pAllocation);
    7800 
    7801  if(pAllocationInfo && result == VK_SUCCESS)
    7802  {
    7803  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7804  }
    7805 
    7806  return result;
    7807 }
    7808 
    7810  VmaAllocator allocator,
    7811  VkBuffer buffer,
    7812  const VmaAllocationCreateInfo* pCreateInfo,
    7813  VmaAllocation* pAllocation,
    7814  VmaAllocationInfo* pAllocationInfo)
    7815 {
    7816  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7817 
    7818  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    7819 
    7820  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7821 
    7822  VkMemoryRequirements vkMemReq = {};
    7823  bool requiresDedicatedAllocation = false;
    7824  bool prefersDedicatedAllocation = false;
    7825  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    7826  requiresDedicatedAllocation,
    7827  prefersDedicatedAllocation);
    7828 
    7829  VkResult result = allocator->AllocateMemory(
    7830  vkMemReq,
    7831  requiresDedicatedAllocation,
    7832  prefersDedicatedAllocation,
    7833  buffer, // dedicatedBuffer
    7834  VK_NULL_HANDLE, // dedicatedImage
    7835  *pCreateInfo,
    7836  VMA_SUBALLOCATION_TYPE_BUFFER,
    7837  pAllocation);
    7838 
    7839  if(pAllocationInfo && result == VK_SUCCESS)
    7840  {
    7841  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7842  }
    7843 
    7844  return result;
    7845 }
    7846 
    7847 VkResult vmaAllocateMemoryForImage(
    7848  VmaAllocator allocator,
    7849  VkImage image,
    7850  const VmaAllocationCreateInfo* pCreateInfo,
    7851  VmaAllocation* pAllocation,
    7852  VmaAllocationInfo* pAllocationInfo)
    7853 {
    7854  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    7855 
    7856  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    7857 
    7858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7859 
    7860  VkResult result = AllocateMemoryForImage(
    7861  allocator,
    7862  image,
    7863  pCreateInfo,
    7864  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    7865  pAllocation);
    7866 
    7867  if(pAllocationInfo && result == VK_SUCCESS)
    7868  {
    7869  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    7870  }
    7871 
    7872  return result;
    7873 }
    7874 
    7875 void vmaFreeMemory(
    7876  VmaAllocator allocator,
    7877  VmaAllocation allocation)
    7878 {
    7879  VMA_ASSERT(allocator && allocation);
    7880 
    7881  VMA_DEBUG_LOG("vmaFreeMemory");
    7882 
    7883  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7884 
    7885  allocator->FreeMemory(allocation);
    7886 }
    7887 
    7889  VmaAllocator allocator,
    7890  VmaAllocation allocation,
    7891  VmaAllocationInfo* pAllocationInfo)
    7892 {
    7893  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    7894 
    7895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7896 
    7897  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    7898 }
    7899 
    7901  VmaAllocator allocator,
    7902  VmaAllocation allocation,
    7903  void* pUserData)
    7904 {
    7905  VMA_ASSERT(allocator && allocation);
    7906 
    7907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7908 
    7909  allocation->SetUserData(pUserData);
    7910 }
    7911 
    7913  VmaAllocator allocator,
    7914  VmaAllocation* pAllocation)
    7915 {
    7916  VMA_ASSERT(allocator && pAllocation);
    7917 
    7918  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    7919 
    7920  allocator->CreateLostAllocation(pAllocation);
    7921 }
    7922 
    7923 VkResult vmaMapMemory(
    7924  VmaAllocator allocator,
    7925  VmaAllocation allocation,
    7926  void** ppData)
    7927 {
    7928  VMA_ASSERT(allocator && allocation && ppData);
    7929 
    7930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7931 
    7932  return (*allocator->GetVulkanFunctions().vkMapMemory)(
    7933  allocator->m_hDevice,
    7934  allocation->GetMemory(),
    7935  allocation->GetOffset(),
    7936  allocation->GetSize(),
    7937  0,
    7938  ppData);
    7939 }
    7940 
    7941 void vmaUnmapMemory(
    7942  VmaAllocator allocator,
    7943  VmaAllocation allocation)
    7944 {
    7945  VMA_ASSERT(allocator && allocation);
    7946 
    7947  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7948 
    7949  (*allocator->GetVulkanFunctions().vkUnmapMemory)(allocator->m_hDevice, allocation->GetMemory());
    7950 }
    7951 
    7952 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    7953 {
    7954  VMA_ASSERT(allocator);
    7955 
    7956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7957 
    7958  allocator->UnmapPersistentlyMappedMemory();
    7959 }
    7960 
    7961 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    7962 {
    7963  VMA_ASSERT(allocator);
    7964 
    7965  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7966 
    7967  return allocator->MapPersistentlyMappedMemory();
    7968 }
    7969 
    7970 VkResult vmaDefragment(
    7971  VmaAllocator allocator,
    7972  VmaAllocation* pAllocations,
    7973  size_t allocationCount,
    7974  VkBool32* pAllocationsChanged,
    7975  const VmaDefragmentationInfo *pDefragmentationInfo,
    7976  VmaDefragmentationStats* pDefragmentationStats)
    7977 {
    7978  VMA_ASSERT(allocator && pAllocations);
    7979 
    7980  VMA_DEBUG_LOG("vmaDefragment");
    7981 
    7982  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    7983 
    7984  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    7985 }
    7986 
    7987 VkResult vmaCreateBuffer(
    7988  VmaAllocator allocator,
    7989  const VkBufferCreateInfo* pBufferCreateInfo,
    7990  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    7991  VkBuffer* pBuffer,
    7992  VmaAllocation* pAllocation,
    7993  VmaAllocationInfo* pAllocationInfo)
    7994 {
    7995  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    7996 
    7997  VMA_DEBUG_LOG("vmaCreateBuffer");
    7998 
    7999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8000 
    8001  *pBuffer = VK_NULL_HANDLE;
    8002  *pAllocation = VK_NULL_HANDLE;
    8003 
    8004  // 1. Create VkBuffer.
    8005  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    8006  allocator->m_hDevice,
    8007  pBufferCreateInfo,
    8008  allocator->GetAllocationCallbacks(),
    8009  pBuffer);
    8010  if(res >= 0)
    8011  {
    8012  // 2. vkGetBufferMemoryRequirements.
    8013  VkMemoryRequirements vkMemReq = {};
    8014  bool requiresDedicatedAllocation = false;
    8015  bool prefersDedicatedAllocation = false;
    8016  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    8017  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8018 
    8019  // 3. Allocate memory using allocator.
    8020  res = allocator->AllocateMemory(
    8021  vkMemReq,
    8022  requiresDedicatedAllocation,
    8023  prefersDedicatedAllocation,
    8024  *pBuffer, // dedicatedBuffer
    8025  VK_NULL_HANDLE, // dedicatedImage
    8026  *pAllocationCreateInfo,
    8027  VMA_SUBALLOCATION_TYPE_BUFFER,
    8028  pAllocation);
    8029  if(res >= 0)
    8030  {
    8031  // 3. Bind buffer with memory.
    8032  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8033  allocator->m_hDevice,
    8034  *pBuffer,
    8035  (*pAllocation)->GetMemory(),
    8036  (*pAllocation)->GetOffset());
    8037  if(res >= 0)
    8038  {
    8039  // All steps succeeded.
    8040  if(pAllocationInfo != VMA_NULL)
    8041  {
    8042  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8043  }
    8044  return VK_SUCCESS;
    8045  }
    8046  allocator->FreeMemory(*pAllocation);
    8047  *pAllocation = VK_NULL_HANDLE;
    8048  return res;
    8049  }
    8050  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8051  *pBuffer = VK_NULL_HANDLE;
    8052  return res;
    8053  }
    8054  return res;
    8055 }
    8056 
    8057 void vmaDestroyBuffer(
    8058  VmaAllocator allocator,
    8059  VkBuffer buffer,
    8060  VmaAllocation allocation)
    8061 {
    8062  if(buffer != VK_NULL_HANDLE)
    8063  {
    8064  VMA_ASSERT(allocator);
    8065 
    8066  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8067 
    8068  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8069 
    8070  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8071 
    8072  allocator->FreeMemory(allocation);
    8073  }
    8074 }
    8075 
    8076 VkResult vmaCreateImage(
    8077  VmaAllocator allocator,
    8078  const VkImageCreateInfo* pImageCreateInfo,
    8079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8080  VkImage* pImage,
    8081  VmaAllocation* pAllocation,
    8082  VmaAllocationInfo* pAllocationInfo)
    8083 {
    8084  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8085 
    8086  VMA_DEBUG_LOG("vmaCreateImage");
    8087 
    8088  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8089 
    8090  *pImage = VK_NULL_HANDLE;
    8091  *pAllocation = VK_NULL_HANDLE;
    8092 
    8093  // 1. Create VkImage.
    8094  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8095  allocator->m_hDevice,
    8096  pImageCreateInfo,
    8097  allocator->GetAllocationCallbacks(),
    8098  pImage);
    8099  if(res >= 0)
    8100  {
    8101  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8102  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8103  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8104 
    8105  // 2. Allocate memory using allocator.
    8106  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8107  if(res >= 0)
    8108  {
    8109  // 3. Bind image with memory.
    8110  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8111  allocator->m_hDevice,
    8112  *pImage,
    8113  (*pAllocation)->GetMemory(),
    8114  (*pAllocation)->GetOffset());
    8115  if(res >= 0)
    8116  {
    8117  // All steps succeeded.
    8118  if(pAllocationInfo != VMA_NULL)
    8119  {
    8120  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8121  }
    8122  return VK_SUCCESS;
    8123  }
    8124  allocator->FreeMemory(*pAllocation);
    8125  *pAllocation = VK_NULL_HANDLE;
    8126  return res;
    8127  }
    8128  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8129  *pImage = VK_NULL_HANDLE;
    8130  return res;
    8131  }
    8132  return res;
    8133 }
    8134 
    8135 void vmaDestroyImage(
    8136  VmaAllocator allocator,
    8137  VkImage image,
    8138  VmaAllocation allocation)
    8139 {
    8140  if(image != VK_NULL_HANDLE)
    8141  {
    8142  VMA_ASSERT(allocator);
    8143 
    8144  VMA_DEBUG_LOG("vmaDestroyImage");
    8145 
    8146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8147 
    8148  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8149 
    8150  allocator->FreeMemory(allocation);
    8151  }
    8152 }
    8153 
    8154 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:551
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:768
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:511
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:576
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:496
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:561
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
    Definition: vk_mem_alloc.h:677
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:490
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:962
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:508
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1115
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:832
    +
    Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
    Definition: vk_mem_alloc.h:742
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:555
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:1027
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:573
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:1180
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:897
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:886
    -
    Definition: vk_mem_alloc.h:741
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:479
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:774
    -
    Definition: vk_mem_alloc.h:687
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks.
    Definition: vk_mem_alloc.h:523
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:951
    +
    Definition: vk_mem_alloc.h:806
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:544
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:839
    +
    Definition: vk_mem_alloc.h:752
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks.
    Definition: vk_mem_alloc.h:588
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:570
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:505
    -
    VkDeviceSize preferredSmallHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
    Definition: vk_mem_alloc.h:520
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:635
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:570
    +
    VkDeviceSize preferredSmallHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
    Definition: vk_mem_alloc.h:585
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:691
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:756
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation.
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:635
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:493
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:634
    -
    PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:501
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1119
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:700
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:558
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:699
    +
    PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:566
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:1184
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:540
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:644
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1127
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:757
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1110
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:494
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:415
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:514
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:840
    -
    Definition: vk_mem_alloc.h:834
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:972
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:605
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:709
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:1192
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:822
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1175
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:559
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:480
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:579
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:905
    +
    Definition: vk_mem_alloc.h:899
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:1037
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:491
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:776
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:856
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:892
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:556
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:841
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:921
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:957
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:477
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:843
    +
    Definition: vk_mem_alloc.h:542
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:908
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:672
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:737
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1105
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:1170
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1123
    -
    Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
    Definition: vk_mem_alloc.h:683
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:492
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:1188
    +
    Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
    Definition: vk_mem_alloc.h:748
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:557
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:640
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:421
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:705
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:486
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:442
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:507
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:447
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1125
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:512
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:1190
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:768
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:902
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:833
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region.
    Definition: vk_mem_alloc.h:967
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:487
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:623
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:851
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:434
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:552
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:688
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
    Definition: vk_mem_alloc.h:916
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:499
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:748
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:636
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:438
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:846
    -
    Memory will be used for frequent writing on device and readback on host (download).
    Definition: vk_mem_alloc.h:686
    +
    Definition: vk_mem_alloc.h:813
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:701
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:503
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:911
    +
    Memory will be used for frequent writing on device and readback on host (download).
    Definition: vk_mem_alloc.h:751
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:763
    -
    Definition: vk_mem_alloc.h:754
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:626
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:489
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool.
    Definition: vk_mem_alloc.h:864
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory.
    Definition: vk_mem_alloc.h:526
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:895
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:828
    +
    Definition: vk_mem_alloc.h:819
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:691
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:554
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool.
    Definition: vk_mem_alloc.h:929
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory.
    Definition: vk_mem_alloc.h:591
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:960
    VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
    Maps back persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:752
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:781
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:817
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:846
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:558
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:642
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:635
    -
    Definition: vk_mem_alloc.h:814
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:498
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:436
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:497
    +
    const VkDeviceSize * pHeapSizeLimit
    Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:623
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:707
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:700
    +
    Definition: vk_mem_alloc.h:879
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:563
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:501
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:562
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:878
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:943
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:983
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
    Definition: vk_mem_alloc.h:517
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:635
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:632
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:1048
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
    Definition: vk_mem_alloc.h:582
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:700
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:697
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:883
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:967
    -
    Definition: vk_mem_alloc.h:750
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1121
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:485
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:948
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:1032
    +
    Definition: vk_mem_alloc.h:815
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:1186
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:550
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:500
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:630
    -
    No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
    Definition: vk_mem_alloc.h:675
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:836
    +
    PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
    Definition: vk_mem_alloc.h:565
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:695
    +
    No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
    Definition: vk_mem_alloc.h:740
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:901
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:628
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:495
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:499
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:714
    -
    Memory will be mapped on host. Could be used for transfer to/from device.
    Definition: vk_mem_alloc.h:680
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
    Definition: vk_mem_alloc.h:978
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:693
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:560
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:564
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:779
    +
    Memory will be mapped on host. Could be used for transfer to/from device.
    Definition: vk_mem_alloc.h:745
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
    Definition: vk_mem_alloc.h:1043
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:475
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:540
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:488
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:948
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:553
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:1013
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:730
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:795
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:805
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:636
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:643
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:870
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:701
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:708
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:889
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:636
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:954
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:701
    void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
    Unmaps persistently mapped memory of types that are HOST_COHERENT and DEVICE_LOCAL.
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:953
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:1018