diff --git a/docs/html/globals.html b/docs/html/globals.html index 7f943ab..95f9354 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -267,7 +267,7 @@ $(function() { : vk_mem_alloc.h
  • vmaTouchAllocation() -: vk_mem_alloc.h +: vk_mem_alloc.h
  • vmaUnmapMemory() : vk_mem_alloc.h diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index b97dc86..c7268fa 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -149,7 +149,7 @@ $(function() { : vk_mem_alloc.h
  • vmaTouchAllocation() -: vk_mem_alloc.h +: vk_mem_alloc.h
  • vmaUnmapMemory() : vk_mem_alloc.h diff --git a/docs/html/search/all_e.js b/docs/html/search/all_e.js index 98c5060..ba33b86 100644 --- a/docs/html/search/all_e.js +++ b/docs/html/search/all_e.js @@ -84,7 +84,7 @@ var searchData= ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], ['vmastatinfo',['VmaStatInfo',['../struct_vma_stat_info.html',1,'VmaStatInfo'],['../vk__mem__alloc_8h.html#a810b009a788ee8aac72a25b42ffbe31c',1,'VmaStatInfo(): vk_mem_alloc.h']]], ['vmastats',['VmaStats',['../struct_vma_stats.html',1,'VmaStats'],['../vk__mem__alloc_8h.html#a732be855fb4a7c248e6853d928a729af',1,'VmaStats(): vk_mem_alloc.h']]], - ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a108cb6dcb9ad32b81f0d61c08d1b4323',1,'vk_mem_alloc.h']]], + ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a43d8ba9673c846f049089a5029d5c73a',1,'vk_mem_alloc.h']]], ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]], ['vmavulkanfunctions',['VmaVulkanFunctions',['../struct_vma_vulkan_functions.html',1,'VmaVulkanFunctions'],['../vk__mem__alloc_8h.html#a97064a1a271b0061ebfc3a079862d0c5',1,'VmaVulkanFunctions(): vk_mem_alloc.h']]] ]; diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index cce33d7..e918fc3 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -29,6 +29,6 @@ var searchData= ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], ['vmasetallocationuserdata',['vmaSetAllocationUserData',['../vk__mem__alloc_8h.html#af9147d31ffc11d62fc187bde283ed14f',1,'vk_mem_alloc.h']]], ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], - ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a108cb6dcb9ad32b81f0d61c08d1b4323',1,'vk_mem_alloc.h']]], + ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a43d8ba9673c846f049089a5029d5c73a',1,'vk_mem_alloc.h']]], ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]] ]; diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index ce34209..a7b3265 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -269,9 +269,9 @@ Functions void vmaGetAllocationInfo (VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)  Returns current information about specified allocation. More...
      -bool vmaTouchAllocation (VmaAllocator allocator, VmaAllocation allocation) - TODO finish documentation... More...
    -  +VkBool32 vmaTouchAllocation (VmaAllocator allocator, VmaAllocation allocation) + TODO finish documentation... More...
    +  void vmaSetAllocationUserData (VmaAllocator allocator, VmaAllocation allocation, void *pUserData)  Sets pUserData in given allocation to new value. More...
      @@ -1975,14 +1975,14 @@ Functions - -

    ◆ vmaTouchAllocation()

    + +

    ◆ vmaTouchAllocation()

    - + diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index dc19ca4..bdec8ad 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,7 +62,7 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    826 #include <vulkan/vulkan.h>
    827 
    828 VK_DEFINE_HANDLE(VmaAllocator)
    829 
    830 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    832  VmaAllocator allocator,
    833  uint32_t memoryType,
    834  VkDeviceMemory memory,
    835  VkDeviceSize size);
    837 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    838  VmaAllocator allocator,
    839  uint32_t memoryType,
    840  VkDeviceMemory memory,
    841  VkDeviceSize size);
    842 
    850 typedef struct VmaDeviceMemoryCallbacks {
    856 
    886 
    889 typedef VkFlags VmaAllocatorCreateFlags;
    890 
    895 typedef struct VmaVulkanFunctions {
    896  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    897  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    898  PFN_vkAllocateMemory vkAllocateMemory;
    899  PFN_vkFreeMemory vkFreeMemory;
    900  PFN_vkMapMemory vkMapMemory;
    901  PFN_vkUnmapMemory vkUnmapMemory;
    902  PFN_vkBindBufferMemory vkBindBufferMemory;
    903  PFN_vkBindImageMemory vkBindImageMemory;
    904  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    905  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    906  PFN_vkCreateBuffer vkCreateBuffer;
    907  PFN_vkDestroyBuffer vkDestroyBuffer;
    908  PFN_vkCreateImage vkCreateImage;
    909  PFN_vkDestroyImage vkDestroyImage;
    910  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    911  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    913 
    916 {
    918  VmaAllocatorCreateFlags flags;
    920 
    921  VkPhysicalDevice physicalDevice;
    923 
    924  VkDevice device;
    926 
    929 
    930  const VkAllocationCallbacks* pAllocationCallbacks;
    932 
    947  uint32_t frameInUseCount;
    971  const VkDeviceSize* pHeapSizeLimit;
    985 
    987 VkResult vmaCreateAllocator(
    988  const VmaAllocatorCreateInfo* pCreateInfo,
    989  VmaAllocator* pAllocator);
    990 
    993  VmaAllocator allocator);
    994 
    1000  VmaAllocator allocator,
    1001  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1002 
    1008  VmaAllocator allocator,
    1009  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1010 
    1018  VmaAllocator allocator,
    1019  uint32_t memoryTypeIndex,
    1020  VkMemoryPropertyFlags* pFlags);
    1021 
    1031  VmaAllocator allocator,
    1032  uint32_t frameIndex);
    1033 
    1036 typedef struct VmaStatInfo
    1037 {
    1039  uint32_t blockCount;
    1045  VkDeviceSize usedBytes;
    1047  VkDeviceSize unusedBytes;
    1048  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    1049  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    1050 } VmaStatInfo;
    1051 
    1053 typedef struct VmaStats
    1054 {
    1055  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1056  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1058 } VmaStats;
    1059 
    1061 void vmaCalculateStats(
    1062  VmaAllocator allocator,
    1063  VmaStats* pStats);
    1064 
    1065 #define VMA_STATS_STRING_ENABLED 1
    1066 
    1067 #if VMA_STATS_STRING_ENABLED
    1068 
    1070 
    1072 void vmaBuildStatsString(
    1073  VmaAllocator allocator,
    1074  char** ppStatsString,
    1075  VkBool32 detailedMap);
    1076 
    1077 void vmaFreeStatsString(
    1078  VmaAllocator allocator,
    1079  char* pStatsString);
    1080 
    1081 #endif // #if VMA_STATS_STRING_ENABLED
    1082 
    1083 VK_DEFINE_HANDLE(VmaPool)
    1084 
    1085 typedef enum VmaMemoryUsage
    1086 {
    1135 } VmaMemoryUsage;
    1136 
    1151 
    1201 
    1205 
    1207 {
    1209  VmaAllocationCreateFlags flags;
    1220  VkMemoryPropertyFlags requiredFlags;
    1225  VkMemoryPropertyFlags preferredFlags;
    1233  uint32_t memoryTypeBits;
    1239  VmaPool pool;
    1246  void* pUserData;
    1248 
    1265 VkResult vmaFindMemoryTypeIndex(
    1266  VmaAllocator allocator,
    1267  uint32_t memoryTypeBits,
    1268  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1269  uint32_t* pMemoryTypeIndex);
    1270 
    1284  VmaAllocator allocator,
    1285  const VkBufferCreateInfo* pBufferCreateInfo,
    1286  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1287  uint32_t* pMemoryTypeIndex);
    1288 
    1302  VmaAllocator allocator,
    1303  const VkImageCreateInfo* pImageCreateInfo,
    1304  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1305  uint32_t* pMemoryTypeIndex);
    1306 
    1327 
    1330 typedef VkFlags VmaPoolCreateFlags;
    1331 
    1334 typedef struct VmaPoolCreateInfo {
    1340  VmaPoolCreateFlags flags;
    1345  VkDeviceSize blockSize;
    1374 
    1377 typedef struct VmaPoolStats {
    1380  VkDeviceSize size;
    1383  VkDeviceSize unusedSize;
    1396  VkDeviceSize unusedRangeSizeMax;
    1397 } VmaPoolStats;
    1398 
    1405 VkResult vmaCreatePool(
    1406  VmaAllocator allocator,
    1407  const VmaPoolCreateInfo* pCreateInfo,
    1408  VmaPool* pPool);
    1409 
    1412 void vmaDestroyPool(
    1413  VmaAllocator allocator,
    1414  VmaPool pool);
    1415 
    1422 void vmaGetPoolStats(
    1423  VmaAllocator allocator,
    1424  VmaPool pool,
    1425  VmaPoolStats* pPoolStats);
    1426 
    1434  VmaAllocator allocator,
    1435  VmaPool pool,
    1436  size_t* pLostAllocationCount);
    1437 
    1438 VK_DEFINE_HANDLE(VmaAllocation)
    1439 
    1440 
    1442 typedef struct VmaAllocationInfo {
    1447  uint32_t memoryType;
    1456  VkDeviceMemory deviceMemory;
    1461  VkDeviceSize offset;
    1466  VkDeviceSize size;
    1480  void* pUserData;
    1482 
    1493 VkResult vmaAllocateMemory(
    1494  VmaAllocator allocator,
    1495  const VkMemoryRequirements* pVkMemoryRequirements,
    1496  const VmaAllocationCreateInfo* pCreateInfo,
    1497  VmaAllocation* pAllocation,
    1498  VmaAllocationInfo* pAllocationInfo);
    1499 
    1507  VmaAllocator allocator,
    1508  VkBuffer buffer,
    1509  const VmaAllocationCreateInfo* pCreateInfo,
    1510  VmaAllocation* pAllocation,
    1511  VmaAllocationInfo* pAllocationInfo);
    1512 
    1514 VkResult vmaAllocateMemoryForImage(
    1515  VmaAllocator allocator,
    1516  VkImage image,
    1517  const VmaAllocationCreateInfo* pCreateInfo,
    1518  VmaAllocation* pAllocation,
    1519  VmaAllocationInfo* pAllocationInfo);
    1520 
    1522 void vmaFreeMemory(
    1523  VmaAllocator allocator,
    1524  VmaAllocation allocation);
    1525 
    1531  VmaAllocator allocator,
    1532  VmaAllocation allocation,
    1533  VmaAllocationInfo* pAllocationInfo);
    1534 
    1537 bool vmaTouchAllocation(
    1538  VmaAllocator allocator,
    1539  VmaAllocation allocation);
    1540 
    1555  VmaAllocator allocator,
    1556  VmaAllocation allocation,
    1557  void* pUserData);
    1558 
    1570  VmaAllocator allocator,
    1571  VmaAllocation* pAllocation);
    1572 
    1607 VkResult vmaMapMemory(
    1608  VmaAllocator allocator,
    1609  VmaAllocation allocation,
    1610  void** ppData);
    1611 
    1616 void vmaUnmapMemory(
    1617  VmaAllocator allocator,
    1618  VmaAllocation allocation);
    1619 
    1621 typedef struct VmaDefragmentationInfo {
    1626  VkDeviceSize maxBytesToMove;
    1633 
    1635 typedef struct VmaDefragmentationStats {
    1637  VkDeviceSize bytesMoved;
    1639  VkDeviceSize bytesFreed;
    1645 
    1728 VkResult vmaDefragment(
    1729  VmaAllocator allocator,
    1730  VmaAllocation* pAllocations,
    1731  size_t allocationCount,
    1732  VkBool32* pAllocationsChanged,
    1733  const VmaDefragmentationInfo *pDefragmentationInfo,
    1734  VmaDefragmentationStats* pDefragmentationStats);
    1735 
    1762 VkResult vmaCreateBuffer(
    1763  VmaAllocator allocator,
    1764  const VkBufferCreateInfo* pBufferCreateInfo,
    1765  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1766  VkBuffer* pBuffer,
    1767  VmaAllocation* pAllocation,
    1768  VmaAllocationInfo* pAllocationInfo);
    1769 
    1781 void vmaDestroyBuffer(
    1782  VmaAllocator allocator,
    1783  VkBuffer buffer,
    1784  VmaAllocation allocation);
    1785 
    1787 VkResult vmaCreateImage(
    1788  VmaAllocator allocator,
    1789  const VkImageCreateInfo* pImageCreateInfo,
    1790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1791  VkImage* pImage,
    1792  VmaAllocation* pAllocation,
    1793  VmaAllocationInfo* pAllocationInfo);
    1794 
    1806 void vmaDestroyImage(
    1807  VmaAllocator allocator,
    1808  VkImage image,
    1809  VmaAllocation allocation);
    1810 
    1811 #ifdef __cplusplus
    1812 }
    1813 #endif
    1814 
    1815 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1816 
    1817 // For Visual Studio IntelliSense.
    1818 #ifdef __INTELLISENSE__
    1819 #define VMA_IMPLEMENTATION
    1820 #endif
    1821 
    1822 #ifdef VMA_IMPLEMENTATION
    1823 #undef VMA_IMPLEMENTATION
    1824 
    1825 #include <cstdint>
    1826 #include <cstdlib>
    1827 #include <cstring>
    1828 
    1829 /*******************************************************************************
    1830 CONFIGURATION SECTION
    1831 
    1832 Define some of these macros before each #include of this header or change them
    1833 here if you need other then default behavior depending on your environment.
    1834 */
    1835 
    1836 /*
    1837 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1838 internally, like:
    1839 
    1840  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1841 
    1842 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1843 VmaAllocatorCreateInfo::pVulkanFunctions.
    1844 */
    1845 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    1846 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1847 #endif
    1848 
    1849 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1850 //#define VMA_USE_STL_CONTAINERS 1
    1851 
    1852 /* Set this macro to 1 to make the library including and using STL containers:
    1853 std::pair, std::vector, std::list, std::unordered_map.
    1854 
    1855 Set it to 0 or undefined to make the library using its own implementation of
    1856 the containers.
    1857 */
    1858 #if VMA_USE_STL_CONTAINERS
    1859  #define VMA_USE_STL_VECTOR 1
    1860  #define VMA_USE_STL_UNORDERED_MAP 1
    1861  #define VMA_USE_STL_LIST 1
    1862 #endif
    1863 
    1864 #if VMA_USE_STL_VECTOR
    1865  #include <vector>
    1866 #endif
    1867 
    1868 #if VMA_USE_STL_UNORDERED_MAP
    1869  #include <unordered_map>
    1870 #endif
    1871 
    1872 #if VMA_USE_STL_LIST
    1873  #include <list>
    1874 #endif
    1875 
    1876 /*
    1877 Following headers are used in this CONFIGURATION section only, so feel free to
    1878 remove them if not needed.
    1879 */
    1880 #include <cassert> // for assert
    1881 #include <algorithm> // for min, max
    1882 #include <mutex> // for std::mutex
    1883 #include <atomic> // for std::atomic
    1884 
    1885 #if !defined(_WIN32) && !defined(__APPLE__)
    1886  #include <malloc.h> // for aligned_alloc()
    1887 #endif
    1888 
    1889 #ifndef VMA_NULL
    1890  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1891  #define VMA_NULL nullptr
    1892 #endif
    1893 
    1894 #if defined(__APPLE__)
    1895 #include <cstdlib>
    1896 void *aligned_alloc(size_t alignment, size_t size)
    1897 {
    1898  // alignment must be >= sizeof(void*)
    1899  if(alignment < sizeof(void*))
    1900  {
    1901  alignment = sizeof(void*);
    1902  }
    1903 
    1904  void *pointer;
    1905  if(posix_memalign(&pointer, alignment, size) == 0)
    1906  return pointer;
    1907  return VMA_NULL;
    1908 }
    1909 #endif
    1910 
    1911 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1912 #ifndef VMA_ASSERT
    1913  #ifdef _DEBUG
    1914  #define VMA_ASSERT(expr) assert(expr)
    1915  #else
    1916  #define VMA_ASSERT(expr)
    1917  #endif
    1918 #endif
    1919 
    1920 // Assert that will be called very often, like inside data structures e.g. operator[].
    1921 // Making it non-empty can make program slow.
    1922 #ifndef VMA_HEAVY_ASSERT
    1923  #ifdef _DEBUG
    1924  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1925  #else
    1926  #define VMA_HEAVY_ASSERT(expr)
    1927  #endif
    1928 #endif
    1929 
    1930 #ifndef VMA_ALIGN_OF
    1931  #define VMA_ALIGN_OF(type) (__alignof(type))
    1932 #endif
    1933 
    1934 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1935  #if defined(_WIN32)
    1936  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1937  #else
    1938  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1939  #endif
    1940 #endif
    1941 
    1942 #ifndef VMA_SYSTEM_FREE
    1943  #if defined(_WIN32)
    1944  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1945  #else
    1946  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1947  #endif
    1948 #endif
    1949 
    1950 #ifndef VMA_MIN
    1951  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1952 #endif
    1953 
    1954 #ifndef VMA_MAX
    1955  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1956 #endif
    1957 
    1958 #ifndef VMA_SWAP
    1959  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1960 #endif
    1961 
    1962 #ifndef VMA_SORT
    1963  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1964 #endif
    1965 
    1966 #ifndef VMA_DEBUG_LOG
    1967  #define VMA_DEBUG_LOG(format, ...)
    1968  /*
    1969  #define VMA_DEBUG_LOG(format, ...) do { \
    1970  printf(format, __VA_ARGS__); \
    1971  printf("\n"); \
    1972  } while(false)
    1973  */
    1974 #endif
    1975 
    1976 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1977 #if VMA_STATS_STRING_ENABLED
    1978  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1979  {
    1980  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1981  }
    1982  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1983  {
    1984  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1985  }
    1986  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1987  {
    1988  snprintf(outStr, strLen, "%p", ptr);
    1989  }
    1990 #endif
    1991 
    1992 #ifndef VMA_MUTEX
    1993  class VmaMutex
    1994  {
    1995  public:
    1996  VmaMutex() { }
    1997  ~VmaMutex() { }
    1998  void Lock() { m_Mutex.lock(); }
    1999  void Unlock() { m_Mutex.unlock(); }
    2000  private:
    2001  std::mutex m_Mutex;
    2002  };
    2003  #define VMA_MUTEX VmaMutex
    2004 #endif
    2005 
    2006 /*
    2007 If providing your own implementation, you need to implement a subset of std::atomic:
    2008 
    2009 - Constructor(uint32_t desired)
    2010 - uint32_t load() const
    2011 - void store(uint32_t desired)
    2012 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2013 */
    2014 #ifndef VMA_ATOMIC_UINT32
    2015  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2016 #endif
    2017 
    2018 #ifndef VMA_BEST_FIT
    2019 
    2031  #define VMA_BEST_FIT (1)
    2032 #endif
    2033 
    2034 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2035 
    2039  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2040 #endif
    2041 
    2042 #ifndef VMA_DEBUG_ALIGNMENT
    2043 
    2047  #define VMA_DEBUG_ALIGNMENT (1)
    2048 #endif
    2049 
    2050 #ifndef VMA_DEBUG_MARGIN
    2051 
    2055  #define VMA_DEBUG_MARGIN (0)
    2056 #endif
    2057 
    2058 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2059 
    2063  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2064 #endif
    2065 
    2066 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2067 
    2071  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2072 #endif
    2073 
    2074 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2075  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2077 #endif
    2078 
    2079 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2080  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2082 #endif
    2083 
    2084 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2085 
    2086 /*******************************************************************************
    2087 END OF CONFIGURATION
    2088 */
    2089 
    2090 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    2091  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    2092 
    2093 // Returns number of bits set to 1 in (v).
    2094 static inline uint32_t VmaCountBitsSet(uint32_t v)
    2095 {
    2096  uint32_t c = v - ((v >> 1) & 0x55555555);
    2097  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    2098  c = ((c >> 4) + c) & 0x0F0F0F0F;
    2099  c = ((c >> 8) + c) & 0x00FF00FF;
    2100  c = ((c >> 16) + c) & 0x0000FFFF;
    2101  return c;
    2102 }
    2103 
    2104 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    2105 // Use types like uint32_t, uint64_t as T.
    2106 template <typename T>
    2107 static inline T VmaAlignUp(T val, T align)
    2108 {
    2109  return (val + align - 1) / align * align;
    2110 }
    2111 
    2112 // Division with mathematical rounding to nearest number.
    2113 template <typename T>
    2114 inline T VmaRoundDiv(T x, T y)
    2115 {
    2116  return (x + (y / (T)2)) / y;
    2117 }
    2118 
    2119 #ifndef VMA_SORT
    2120 
    2121 template<typename Iterator, typename Compare>
    2122 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    2123 {
    2124  Iterator centerValue = end; --centerValue;
    2125  Iterator insertIndex = beg;
    2126  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    2127  {
    2128  if(cmp(*memTypeIndex, *centerValue))
    2129  {
    2130  if(insertIndex != memTypeIndex)
    2131  {
    2132  VMA_SWAP(*memTypeIndex, *insertIndex);
    2133  }
    2134  ++insertIndex;
    2135  }
    2136  }
    2137  if(insertIndex != centerValue)
    2138  {
    2139  VMA_SWAP(*insertIndex, *centerValue);
    2140  }
    2141  return insertIndex;
    2142 }
    2143 
    2144 template<typename Iterator, typename Compare>
    2145 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    2146 {
    2147  if(beg < end)
    2148  {
    2149  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    2150  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    2151  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    2152  }
    2153 }
    2154 
    2155 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    2156 
    2157 #endif // #ifndef VMA_SORT
    2158 
    2159 /*
    2160 Returns true if two memory blocks occupy overlapping pages.
    2161 ResourceA must be in less memory offset than ResourceB.
    2162 
    2163 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    2164 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    2165 */
    2166 static inline bool VmaBlocksOnSamePage(
    2167  VkDeviceSize resourceAOffset,
    2168  VkDeviceSize resourceASize,
    2169  VkDeviceSize resourceBOffset,
    2170  VkDeviceSize pageSize)
    2171 {
    2172  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    2173  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    2174  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    2175  VkDeviceSize resourceBStart = resourceBOffset;
    2176  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    2177  return resourceAEndPage == resourceBStartPage;
    2178 }
    2179 
    2180 enum VmaSuballocationType
    2181 {
    2182  VMA_SUBALLOCATION_TYPE_FREE = 0,
    2183  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    2184  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    2185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    2186  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    2187  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    2188  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    2189 };
    2190 
    2191 /*
    2192 Returns true if given suballocation types could conflict and must respect
    2193 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    2194 or linear image and another one is optimal image. If type is unknown, behave
    2195 conservatively.
    2196 */
    2197 static inline bool VmaIsBufferImageGranularityConflict(
    2198  VmaSuballocationType suballocType1,
    2199  VmaSuballocationType suballocType2)
    2200 {
    2201  if(suballocType1 > suballocType2)
    2202  {
    2203  VMA_SWAP(suballocType1, suballocType2);
    2204  }
    2205 
    2206  switch(suballocType1)
    2207  {
    2208  case VMA_SUBALLOCATION_TYPE_FREE:
    2209  return false;
    2210  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    2211  return true;
    2212  case VMA_SUBALLOCATION_TYPE_BUFFER:
    2213  return
    2214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2216  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    2217  return
    2218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    2220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2221  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    2222  return
    2223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2224  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    2225  return false;
    2226  default:
    2227  VMA_ASSERT(0);
    2228  return true;
    2229  }
    2230 }
    2231 
    2232 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    2233 struct VmaMutexLock
    2234 {
    2235 public:
    2236  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    2237  m_pMutex(useMutex ? &mutex : VMA_NULL)
    2238  {
    2239  if(m_pMutex)
    2240  {
    2241  m_pMutex->Lock();
    2242  }
    2243  }
    2244 
    2245  ~VmaMutexLock()
    2246  {
    2247  if(m_pMutex)
    2248  {
    2249  m_pMutex->Unlock();
    2250  }
    2251  }
    2252 
    2253 private:
    2254  VMA_MUTEX* m_pMutex;
    2255 };
    2256 
    2257 #if VMA_DEBUG_GLOBAL_MUTEX
    2258  static VMA_MUTEX gDebugGlobalMutex;
    2259  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    2260 #else
    2261  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    2262 #endif
    2263 
    2264 // Minimum size of a free suballocation to register it in the free suballocation collection.
    2265 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    2266 
    2267 /*
    2268 Performs binary search and returns iterator to first element that is greater or
    2269 equal to (key), according to comparison (cmp).
    2270 
    2271 Cmp should return true if first argument is less than second argument.
    2272 
    2273 Returned value is the found element, if present in the collection or place where
    2274 new element with value (key) should be inserted.
    2275 */
    2276 template <typename IterT, typename KeyT, typename CmpT>
    2277 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    2278 {
    2279  size_t down = 0, up = (end - beg);
    2280  while(down < up)
    2281  {
    2282  const size_t mid = (down + up) / 2;
    2283  if(cmp(*(beg+mid), key))
    2284  {
    2285  down = mid + 1;
    2286  }
    2287  else
    2288  {
    2289  up = mid;
    2290  }
    2291  }
    2292  return beg + down;
    2293 }
    2294 
    2296 // Memory allocation
    2297 
    2298 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    2299 {
    2300  if((pAllocationCallbacks != VMA_NULL) &&
    2301  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    2302  {
    2303  return (*pAllocationCallbacks->pfnAllocation)(
    2304  pAllocationCallbacks->pUserData,
    2305  size,
    2306  alignment,
    2307  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    2308  }
    2309  else
    2310  {
    2311  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    2312  }
    2313 }
    2314 
    2315 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    2316 {
    2317  if((pAllocationCallbacks != VMA_NULL) &&
    2318  (pAllocationCallbacks->pfnFree != VMA_NULL))
    2319  {
    2320  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    2321  }
    2322  else
    2323  {
    2324  VMA_SYSTEM_FREE(ptr);
    2325  }
    2326 }
    2327 
    2328 template<typename T>
    2329 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    2330 {
    2331  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    2332 }
    2333 
    2334 template<typename T>
    2335 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    2336 {
    2337  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    2338 }
    2339 
    2340 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    2341 
    2342 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    2343 
    2344 template<typename T>
    2345 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    2346 {
    2347  ptr->~T();
    2348  VmaFree(pAllocationCallbacks, ptr);
    2349 }
    2350 
    2351 template<typename T>
    2352 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    2353 {
    2354  if(ptr != VMA_NULL)
    2355  {
    2356  for(size_t i = count; i--; )
    2357  {
    2358  ptr[i].~T();
    2359  }
    2360  VmaFree(pAllocationCallbacks, ptr);
    2361  }
    2362 }
    2363 
    2364 // STL-compatible allocator.
    2365 template<typename T>
    2366 class VmaStlAllocator
    2367 {
    2368 public:
    2369  const VkAllocationCallbacks* const m_pCallbacks;
    2370  typedef T value_type;
    2371 
    2372  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    2373  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    2374 
    2375  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    2376  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    2377 
    2378  template<typename U>
    2379  bool operator==(const VmaStlAllocator<U>& rhs) const
    2380  {
    2381  return m_pCallbacks == rhs.m_pCallbacks;
    2382  }
    2383  template<typename U>
    2384  bool operator!=(const VmaStlAllocator<U>& rhs) const
    2385  {
    2386  return m_pCallbacks != rhs.m_pCallbacks;
    2387  }
    2388 
    2389  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    2390 };
    2391 
    2392 #if VMA_USE_STL_VECTOR
    2393 
    2394 #define VmaVector std::vector
    2395 
    2396 template<typename T, typename allocatorT>
    2397 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    2398 {
    2399  vec.insert(vec.begin() + index, item);
    2400 }
    2401 
    2402 template<typename T, typename allocatorT>
    2403 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    2404 {
    2405  vec.erase(vec.begin() + index);
    2406 }
    2407 
    2408 #else // #if VMA_USE_STL_VECTOR
    2409 
    2410 /* Class with interface compatible with subset of std::vector.
    2411 T must be POD because constructors and destructors are not called and memcpy is
    2412 used for these objects. */
    2413 template<typename T, typename AllocatorT>
    2414 class VmaVector
    2415 {
    2416 public:
    2417  typedef T value_type;
    2418 
    2419  VmaVector(const AllocatorT& allocator) :
    2420  m_Allocator(allocator),
    2421  m_pArray(VMA_NULL),
    2422  m_Count(0),
    2423  m_Capacity(0)
    2424  {
    2425  }
    2426 
    2427  VmaVector(size_t count, const AllocatorT& allocator) :
    2428  m_Allocator(allocator),
    2429  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    2430  m_Count(count),
    2431  m_Capacity(count)
    2432  {
    2433  }
    2434 
    2435  VmaVector(const VmaVector<T, AllocatorT>& src) :
    2436  m_Allocator(src.m_Allocator),
    2437  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    2438  m_Count(src.m_Count),
    2439  m_Capacity(src.m_Count)
    2440  {
    2441  if(m_Count != 0)
    2442  {
    2443  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    2444  }
    2445  }
    2446 
    2447  ~VmaVector()
    2448  {
    2449  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2450  }
    2451 
    2452  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    2453  {
    2454  if(&rhs != this)
    2455  {
    2456  resize(rhs.m_Count);
    2457  if(m_Count != 0)
    2458  {
    2459  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2460  }
    2461  }
    2462  return *this;
    2463  }
    2464 
    2465  bool empty() const { return m_Count == 0; }
    2466  size_t size() const { return m_Count; }
    2467  T* data() { return m_pArray; }
    2468  const T* data() const { return m_pArray; }
    2469 
    2470  T& operator[](size_t index)
    2471  {
    2472  VMA_HEAVY_ASSERT(index < m_Count);
    2473  return m_pArray[index];
    2474  }
    2475  const T& operator[](size_t index) const
    2476  {
    2477  VMA_HEAVY_ASSERT(index < m_Count);
    2478  return m_pArray[index];
    2479  }
    2480 
    2481  T& front()
    2482  {
    2483  VMA_HEAVY_ASSERT(m_Count > 0);
    2484  return m_pArray[0];
    2485  }
    2486  const T& front() const
    2487  {
    2488  VMA_HEAVY_ASSERT(m_Count > 0);
    2489  return m_pArray[0];
    2490  }
    2491  T& back()
    2492  {
    2493  VMA_HEAVY_ASSERT(m_Count > 0);
    2494  return m_pArray[m_Count - 1];
    2495  }
    2496  const T& back() const
    2497  {
    2498  VMA_HEAVY_ASSERT(m_Count > 0);
    2499  return m_pArray[m_Count - 1];
    2500  }
    2501 
    2502  void reserve(size_t newCapacity, bool freeMemory = false)
    2503  {
    2504  newCapacity = VMA_MAX(newCapacity, m_Count);
    2505 
    2506  if((newCapacity < m_Capacity) && !freeMemory)
    2507  {
    2508  newCapacity = m_Capacity;
    2509  }
    2510 
    2511  if(newCapacity != m_Capacity)
    2512  {
    2513  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2514  if(m_Count != 0)
    2515  {
    2516  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2517  }
    2518  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2519  m_Capacity = newCapacity;
    2520  m_pArray = newArray;
    2521  }
    2522  }
    2523 
    2524  void resize(size_t newCount, bool freeMemory = false)
    2525  {
    2526  size_t newCapacity = m_Capacity;
    2527  if(newCount > m_Capacity)
    2528  {
    2529  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2530  }
    2531  else if(freeMemory)
    2532  {
    2533  newCapacity = newCount;
    2534  }
    2535 
    2536  if(newCapacity != m_Capacity)
    2537  {
    2538  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2539  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2540  if(elementsToCopy != 0)
    2541  {
    2542  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2543  }
    2544  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2545  m_Capacity = newCapacity;
    2546  m_pArray = newArray;
    2547  }
    2548 
    2549  m_Count = newCount;
    2550  }
    2551 
    2552  void clear(bool freeMemory = false)
    2553  {
    2554  resize(0, freeMemory);
    2555  }
    2556 
    2557  void insert(size_t index, const T& src)
    2558  {
    2559  VMA_HEAVY_ASSERT(index <= m_Count);
    2560  const size_t oldCount = size();
    2561  resize(oldCount + 1);
    2562  if(index < oldCount)
    2563  {
    2564  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2565  }
    2566  m_pArray[index] = src;
    2567  }
    2568 
    2569  void remove(size_t index)
    2570  {
    2571  VMA_HEAVY_ASSERT(index < m_Count);
    2572  const size_t oldCount = size();
    2573  if(index < oldCount - 1)
    2574  {
    2575  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2576  }
    2577  resize(oldCount - 1);
    2578  }
    2579 
    2580  void push_back(const T& src)
    2581  {
    2582  const size_t newIndex = size();
    2583  resize(newIndex + 1);
    2584  m_pArray[newIndex] = src;
    2585  }
    2586 
    2587  void pop_back()
    2588  {
    2589  VMA_HEAVY_ASSERT(m_Count > 0);
    2590  resize(size() - 1);
    2591  }
    2592 
    2593  void push_front(const T& src)
    2594  {
    2595  insert(0, src);
    2596  }
    2597 
    2598  void pop_front()
    2599  {
    2600  VMA_HEAVY_ASSERT(m_Count > 0);
    2601  remove(0);
    2602  }
    2603 
    2604  typedef T* iterator;
    2605 
    2606  iterator begin() { return m_pArray; }
    2607  iterator end() { return m_pArray + m_Count; }
    2608 
    2609 private:
    2610  AllocatorT m_Allocator;
    2611  T* m_pArray;
    2612  size_t m_Count;
    2613  size_t m_Capacity;
    2614 };
    2615 
    2616 template<typename T, typename allocatorT>
    2617 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2618 {
    2619  vec.insert(index, item);
    2620 }
    2621 
    2622 template<typename T, typename allocatorT>
    2623 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2624 {
    2625  vec.remove(index);
    2626 }
    2627 
    2628 #endif // #if VMA_USE_STL_VECTOR
    2629 
    2630 template<typename CmpLess, typename VectorT>
    2631 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2632 {
    2633  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2634  vector.data(),
    2635  vector.data() + vector.size(),
    2636  value,
    2637  CmpLess()) - vector.data();
    2638  VmaVectorInsert(vector, indexToInsert, value);
    2639  return indexToInsert;
    2640 }
    2641 
    2642 template<typename CmpLess, typename VectorT>
    2643 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2644 {
    2645  CmpLess comparator;
    2646  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2647  vector.begin(),
    2648  vector.end(),
    2649  value,
    2650  comparator);
    2651  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2652  {
    2653  size_t indexToRemove = it - vector.begin();
    2654  VmaVectorRemove(vector, indexToRemove);
    2655  return true;
    2656  }
    2657  return false;
    2658 }
    2659 
    2660 template<typename CmpLess, typename VectorT>
    2661 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2662 {
    2663  CmpLess comparator;
    2664  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2665  vector.data(),
    2666  vector.data() + vector.size(),
    2667  value,
    2668  comparator);
    2669  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2670  {
    2671  return it - vector.begin();
    2672  }
    2673  else
    2674  {
    2675  return vector.size();
    2676  }
    2677 }
    2678 
    2680 // class VmaPoolAllocator
    2681 
    2682 /*
    2683 Allocator for objects of type T using a list of arrays (pools) to speed up
    2684 allocation. Number of elements that can be allocated is not bounded because
    2685 allocator can create multiple blocks.
    2686 */
    2687 template<typename T>
    2688 class VmaPoolAllocator
    2689 {
    2690 public:
    2691  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2692  ~VmaPoolAllocator();
    2693  void Clear();
    2694  T* Alloc();
    2695  void Free(T* ptr);
    2696 
    2697 private:
    2698  union Item
    2699  {
    2700  uint32_t NextFreeIndex;
    2701  T Value;
    2702  };
    2703 
    2704  struct ItemBlock
    2705  {
    2706  Item* pItems;
    2707  uint32_t FirstFreeIndex;
    2708  };
    2709 
    2710  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2711  size_t m_ItemsPerBlock;
    2712  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2713 
    2714  ItemBlock& CreateNewBlock();
    2715 };
    2716 
    2717 template<typename T>
    2718 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2719  m_pAllocationCallbacks(pAllocationCallbacks),
    2720  m_ItemsPerBlock(itemsPerBlock),
    2721  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2722 {
    2723  VMA_ASSERT(itemsPerBlock > 0);
    2724 }
    2725 
    2726 template<typename T>
    2727 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2728 {
    2729  Clear();
    2730 }
    2731 
    2732 template<typename T>
    2733 void VmaPoolAllocator<T>::Clear()
    2734 {
    2735  for(size_t i = m_ItemBlocks.size(); i--; )
    2736  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2737  m_ItemBlocks.clear();
    2738 }
    2739 
    2740 template<typename T>
    2741 T* VmaPoolAllocator<T>::Alloc()
    2742 {
    2743  for(size_t i = m_ItemBlocks.size(); i--; )
    2744  {
    2745  ItemBlock& block = m_ItemBlocks[i];
    2746  // This block has some free items: Use first one.
    2747  if(block.FirstFreeIndex != UINT32_MAX)
    2748  {
    2749  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2750  block.FirstFreeIndex = pItem->NextFreeIndex;
    2751  return &pItem->Value;
    2752  }
    2753  }
    2754 
    2755  // No block has free item: Create new one and use it.
    2756  ItemBlock& newBlock = CreateNewBlock();
    2757  Item* const pItem = &newBlock.pItems[0];
    2758  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2759  return &pItem->Value;
    2760 }
    2761 
    2762 template<typename T>
    2763 void VmaPoolAllocator<T>::Free(T* ptr)
    2764 {
    2765  // Search all memory blocks to find ptr.
    2766  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2767  {
    2768  ItemBlock& block = m_ItemBlocks[i];
    2769 
    2770  // Casting to union.
    2771  Item* pItemPtr;
    2772  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2773 
    2774  // Check if pItemPtr is in address range of this block.
    2775  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2776  {
    2777  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2778  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2779  block.FirstFreeIndex = index;
    2780  return;
    2781  }
    2782  }
    2783  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2784 }
    2785 
    2786 template<typename T>
    2787 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2788 {
    2789  ItemBlock newBlock = {
    2790  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2791 
    2792  m_ItemBlocks.push_back(newBlock);
    2793 
    2794  // Setup singly-linked list of all free items in this block.
    2795  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2796  newBlock.pItems[i].NextFreeIndex = i + 1;
    2797  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2798  return m_ItemBlocks.back();
    2799 }
    2800 
    2802 // class VmaRawList, VmaList
    2803 
    2804 #if VMA_USE_STL_LIST
    2805 
    2806 #define VmaList std::list
    2807 
    2808 #else // #if VMA_USE_STL_LIST
    2809 
    2810 template<typename T>
    2811 struct VmaListItem
    2812 {
    2813  VmaListItem* pPrev;
    2814  VmaListItem* pNext;
    2815  T Value;
    2816 };
    2817 
    2818 // Doubly linked list.
    2819 template<typename T>
    2820 class VmaRawList
    2821 {
    2822 public:
    2823  typedef VmaListItem<T> ItemType;
    2824 
    2825  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2826  ~VmaRawList();
    2827  void Clear();
    2828 
    2829  size_t GetCount() const { return m_Count; }
    2830  bool IsEmpty() const { return m_Count == 0; }
    2831 
    2832  ItemType* Front() { return m_pFront; }
    2833  const ItemType* Front() const { return m_pFront; }
    2834  ItemType* Back() { return m_pBack; }
    2835  const ItemType* Back() const { return m_pBack; }
    2836 
    2837  ItemType* PushBack();
    2838  ItemType* PushFront();
    2839  ItemType* PushBack(const T& value);
    2840  ItemType* PushFront(const T& value);
    2841  void PopBack();
    2842  void PopFront();
    2843 
    2844  // Item can be null - it means PushBack.
    2845  ItemType* InsertBefore(ItemType* pItem);
    2846  // Item can be null - it means PushFront.
    2847  ItemType* InsertAfter(ItemType* pItem);
    2848 
    2849  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2850  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2851 
    2852  void Remove(ItemType* pItem);
    2853 
    2854 private:
    2855  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2856  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2857  ItemType* m_pFront;
    2858  ItemType* m_pBack;
    2859  size_t m_Count;
    2860 
    2861  // Declared not defined, to block copy constructor and assignment operator.
    2862  VmaRawList(const VmaRawList<T>& src);
    2863  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2864 };
    2865 
    2866 template<typename T>
    2867 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2868  m_pAllocationCallbacks(pAllocationCallbacks),
    2869  m_ItemAllocator(pAllocationCallbacks, 128),
    2870  m_pFront(VMA_NULL),
    2871  m_pBack(VMA_NULL),
    2872  m_Count(0)
    2873 {
    2874 }
    2875 
    2876 template<typename T>
    2877 VmaRawList<T>::~VmaRawList()
    2878 {
    2879  // Intentionally not calling Clear, because that would be unnecessary
    2880  // computations to return all items to m_ItemAllocator as free.
    2881 }
    2882 
    2883 template<typename T>
    2884 void VmaRawList<T>::Clear()
    2885 {
    2886  if(IsEmpty() == false)
    2887  {
    2888  ItemType* pItem = m_pBack;
    2889  while(pItem != VMA_NULL)
    2890  {
    2891  ItemType* const pPrevItem = pItem->pPrev;
    2892  m_ItemAllocator.Free(pItem);
    2893  pItem = pPrevItem;
    2894  }
    2895  m_pFront = VMA_NULL;
    2896  m_pBack = VMA_NULL;
    2897  m_Count = 0;
    2898  }
    2899 }
    2900 
    2901 template<typename T>
    2902 VmaListItem<T>* VmaRawList<T>::PushBack()
    2903 {
    2904  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2905  pNewItem->pNext = VMA_NULL;
    2906  if(IsEmpty())
    2907  {
    2908  pNewItem->pPrev = VMA_NULL;
    2909  m_pFront = pNewItem;
    2910  m_pBack = pNewItem;
    2911  m_Count = 1;
    2912  }
    2913  else
    2914  {
    2915  pNewItem->pPrev = m_pBack;
    2916  m_pBack->pNext = pNewItem;
    2917  m_pBack = pNewItem;
    2918  ++m_Count;
    2919  }
    2920  return pNewItem;
    2921 }
    2922 
    2923 template<typename T>
    2924 VmaListItem<T>* VmaRawList<T>::PushFront()
    2925 {
    2926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2927  pNewItem->pPrev = VMA_NULL;
    2928  if(IsEmpty())
    2929  {
    2930  pNewItem->pNext = VMA_NULL;
    2931  m_pFront = pNewItem;
    2932  m_pBack = pNewItem;
    2933  m_Count = 1;
    2934  }
    2935  else
    2936  {
    2937  pNewItem->pNext = m_pFront;
    2938  m_pFront->pPrev = pNewItem;
    2939  m_pFront = pNewItem;
    2940  ++m_Count;
    2941  }
    2942  return pNewItem;
    2943 }
    2944 
    2945 template<typename T>
    2946 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2947 {
    2948  ItemType* const pNewItem = PushBack();
    2949  pNewItem->Value = value;
    2950  return pNewItem;
    2951 }
    2952 
    2953 template<typename T>
    2954 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2955 {
    2956  ItemType* const pNewItem = PushFront();
    2957  pNewItem->Value = value;
    2958  return pNewItem;
    2959 }
    2960 
    2961 template<typename T>
    2962 void VmaRawList<T>::PopBack()
    2963 {
    2964  VMA_HEAVY_ASSERT(m_Count > 0);
    2965  ItemType* const pBackItem = m_pBack;
    2966  ItemType* const pPrevItem = pBackItem->pPrev;
    2967  if(pPrevItem != VMA_NULL)
    2968  {
    2969  pPrevItem->pNext = VMA_NULL;
    2970  }
    2971  m_pBack = pPrevItem;
    2972  m_ItemAllocator.Free(pBackItem);
    2973  --m_Count;
    2974 }
    2975 
    2976 template<typename T>
    2977 void VmaRawList<T>::PopFront()
    2978 {
    2979  VMA_HEAVY_ASSERT(m_Count > 0);
    2980  ItemType* const pFrontItem = m_pFront;
    2981  ItemType* const pNextItem = pFrontItem->pNext;
    2982  if(pNextItem != VMA_NULL)
    2983  {
    2984  pNextItem->pPrev = VMA_NULL;
    2985  }
    2986  m_pFront = pNextItem;
    2987  m_ItemAllocator.Free(pFrontItem);
    2988  --m_Count;
    2989 }
    2990 
    2991 template<typename T>
    2992 void VmaRawList<T>::Remove(ItemType* pItem)
    2993 {
    2994  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2995  VMA_HEAVY_ASSERT(m_Count > 0);
    2996 
    2997  if(pItem->pPrev != VMA_NULL)
    2998  {
    2999  pItem->pPrev->pNext = pItem->pNext;
    3000  }
    3001  else
    3002  {
    3003  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3004  m_pFront = pItem->pNext;
    3005  }
    3006 
    3007  if(pItem->pNext != VMA_NULL)
    3008  {
    3009  pItem->pNext->pPrev = pItem->pPrev;
    3010  }
    3011  else
    3012  {
    3013  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3014  m_pBack = pItem->pPrev;
    3015  }
    3016 
    3017  m_ItemAllocator.Free(pItem);
    3018  --m_Count;
    3019 }
    3020 
    3021 template<typename T>
    3022 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    3023 {
    3024  if(pItem != VMA_NULL)
    3025  {
    3026  ItemType* const prevItem = pItem->pPrev;
    3027  ItemType* const newItem = m_ItemAllocator.Alloc();
    3028  newItem->pPrev = prevItem;
    3029  newItem->pNext = pItem;
    3030  pItem->pPrev = newItem;
    3031  if(prevItem != VMA_NULL)
    3032  {
    3033  prevItem->pNext = newItem;
    3034  }
    3035  else
    3036  {
    3037  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3038  m_pFront = newItem;
    3039  }
    3040  ++m_Count;
    3041  return newItem;
    3042  }
    3043  else
    3044  return PushBack();
    3045 }
    3046 
    3047 template<typename T>
    3048 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    3049 {
    3050  if(pItem != VMA_NULL)
    3051  {
    3052  ItemType* const nextItem = pItem->pNext;
    3053  ItemType* const newItem = m_ItemAllocator.Alloc();
    3054  newItem->pNext = nextItem;
    3055  newItem->pPrev = pItem;
    3056  pItem->pNext = newItem;
    3057  if(nextItem != VMA_NULL)
    3058  {
    3059  nextItem->pPrev = newItem;
    3060  }
    3061  else
    3062  {
    3063  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3064  m_pBack = newItem;
    3065  }
    3066  ++m_Count;
    3067  return newItem;
    3068  }
    3069  else
    3070  return PushFront();
    3071 }
    3072 
    3073 template<typename T>
    3074 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    3075 {
    3076  ItemType* const newItem = InsertBefore(pItem);
    3077  newItem->Value = value;
    3078  return newItem;
    3079 }
    3080 
    3081 template<typename T>
    3082 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    3083 {
    3084  ItemType* const newItem = InsertAfter(pItem);
    3085  newItem->Value = value;
    3086  return newItem;
    3087 }
    3088 
    3089 template<typename T, typename AllocatorT>
    3090 class VmaList
    3091 {
    3092 public:
    3093  class iterator
    3094  {
    3095  public:
    3096  iterator() :
    3097  m_pList(VMA_NULL),
    3098  m_pItem(VMA_NULL)
    3099  {
    3100  }
    3101 
    3102  T& operator*() const
    3103  {
    3104  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3105  return m_pItem->Value;
    3106  }
    3107  T* operator->() const
    3108  {
    3109  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3110  return &m_pItem->Value;
    3111  }
    3112 
    3113  iterator& operator++()
    3114  {
    3115  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3116  m_pItem = m_pItem->pNext;
    3117  return *this;
    3118  }
    3119  iterator& operator--()
    3120  {
    3121  if(m_pItem != VMA_NULL)
    3122  {
    3123  m_pItem = m_pItem->pPrev;
    3124  }
    3125  else
    3126  {
    3127  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3128  m_pItem = m_pList->Back();
    3129  }
    3130  return *this;
    3131  }
    3132 
    3133  iterator operator++(int)
    3134  {
    3135  iterator result = *this;
    3136  ++*this;
    3137  return result;
    3138  }
    3139  iterator operator--(int)
    3140  {
    3141  iterator result = *this;
    3142  --*this;
    3143  return result;
    3144  }
    3145 
    3146  bool operator==(const iterator& rhs) const
    3147  {
    3148  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3149  return m_pItem == rhs.m_pItem;
    3150  }
    3151  bool operator!=(const iterator& rhs) const
    3152  {
    3153  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3154  return m_pItem != rhs.m_pItem;
    3155  }
    3156 
    3157  private:
    3158  VmaRawList<T>* m_pList;
    3159  VmaListItem<T>* m_pItem;
    3160 
    3161  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    3162  m_pList(pList),
    3163  m_pItem(pItem)
    3164  {
    3165  }
    3166 
    3167  friend class VmaList<T, AllocatorT>;
    3168  };
    3169 
    3170  class const_iterator
    3171  {
    3172  public:
    3173  const_iterator() :
    3174  m_pList(VMA_NULL),
    3175  m_pItem(VMA_NULL)
    3176  {
    3177  }
    3178 
    3179  const_iterator(const iterator& src) :
    3180  m_pList(src.m_pList),
    3181  m_pItem(src.m_pItem)
    3182  {
    3183  }
    3184 
    3185  const T& operator*() const
    3186  {
    3187  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3188  return m_pItem->Value;
    3189  }
    3190  const T* operator->() const
    3191  {
    3192  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3193  return &m_pItem->Value;
    3194  }
    3195 
    3196  const_iterator& operator++()
    3197  {
    3198  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3199  m_pItem = m_pItem->pNext;
    3200  return *this;
    3201  }
    3202  const_iterator& operator--()
    3203  {
    3204  if(m_pItem != VMA_NULL)
    3205  {
    3206  m_pItem = m_pItem->pPrev;
    3207  }
    3208  else
    3209  {
    3210  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3211  m_pItem = m_pList->Back();
    3212  }
    3213  return *this;
    3214  }
    3215 
    3216  const_iterator operator++(int)
    3217  {
    3218  const_iterator result = *this;
    3219  ++*this;
    3220  return result;
    3221  }
    3222  const_iterator operator--(int)
    3223  {
    3224  const_iterator result = *this;
    3225  --*this;
    3226  return result;
    3227  }
    3228 
    3229  bool operator==(const const_iterator& rhs) const
    3230  {
    3231  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3232  return m_pItem == rhs.m_pItem;
    3233  }
    3234  bool operator!=(const const_iterator& rhs) const
    3235  {
    3236  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3237  return m_pItem != rhs.m_pItem;
    3238  }
    3239 
    3240  private:
    3241  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    3242  m_pList(pList),
    3243  m_pItem(pItem)
    3244  {
    3245  }
    3246 
    3247  const VmaRawList<T>* m_pList;
    3248  const VmaListItem<T>* m_pItem;
    3249 
    3250  friend class VmaList<T, AllocatorT>;
    3251  };
    3252 
    3253  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    3254 
    3255  bool empty() const { return m_RawList.IsEmpty(); }
    3256  size_t size() const { return m_RawList.GetCount(); }
    3257 
    3258  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    3259  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    3260 
    3261  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    3262  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    3263 
    3264  void clear() { m_RawList.Clear(); }
    3265  void push_back(const T& value) { m_RawList.PushBack(value); }
    3266  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    3267  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    3268 
    3269 private:
    3270  VmaRawList<T> m_RawList;
    3271 };
    3272 
    3273 #endif // #if VMA_USE_STL_LIST
    3274 
    3276 // class VmaMap
    3277 
    3278 // Unused in this version.
    3279 #if 0
    3280 
    3281 #if VMA_USE_STL_UNORDERED_MAP
    3282 
    3283 #define VmaPair std::pair
    3284 
    3285 #define VMA_MAP_TYPE(KeyT, ValueT) \
    3286  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    3287 
    3288 #else // #if VMA_USE_STL_UNORDERED_MAP
    3289 
    3290 template<typename T1, typename T2>
    3291 struct VmaPair
    3292 {
    3293  T1 first;
    3294  T2 second;
    3295 
    3296  VmaPair() : first(), second() { }
    3297  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    3298 };
    3299 
    3300 /* Class compatible with subset of interface of std::unordered_map.
    3301 KeyT, ValueT must be POD because they will be stored in VmaVector.
    3302 */
    3303 template<typename KeyT, typename ValueT>
    3304 class VmaMap
    3305 {
    3306 public:
    3307  typedef VmaPair<KeyT, ValueT> PairType;
    3308  typedef PairType* iterator;
    3309 
    3310  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    3311 
    3312  iterator begin() { return m_Vector.begin(); }
    3313  iterator end() { return m_Vector.end(); }
    3314 
    3315  void insert(const PairType& pair);
    3316  iterator find(const KeyT& key);
    3317  void erase(iterator it);
    3318 
    3319 private:
    3320  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    3321 };
    3322 
    3323 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    3324 
    3325 template<typename FirstT, typename SecondT>
    3326 struct VmaPairFirstLess
    3327 {
    3328  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    3329  {
    3330  return lhs.first < rhs.first;
    3331  }
    3332  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    3333  {
    3334  return lhs.first < rhsFirst;
    3335  }
    3336 };
    3337 
    3338 template<typename KeyT, typename ValueT>
    3339 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    3340 {
    3341  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3342  m_Vector.data(),
    3343  m_Vector.data() + m_Vector.size(),
    3344  pair,
    3345  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    3346  VmaVectorInsert(m_Vector, indexToInsert, pair);
    3347 }
    3348 
    3349 template<typename KeyT, typename ValueT>
    3350 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    3351 {
    3352  PairType* it = VmaBinaryFindFirstNotLess(
    3353  m_Vector.data(),
    3354  m_Vector.data() + m_Vector.size(),
    3355  key,
    3356  VmaPairFirstLess<KeyT, ValueT>());
    3357  if((it != m_Vector.end()) && (it->first == key))
    3358  {
    3359  return it;
    3360  }
    3361  else
    3362  {
    3363  return m_Vector.end();
    3364  }
    3365 }
    3366 
    3367 template<typename KeyT, typename ValueT>
    3368 void VmaMap<KeyT, ValueT>::erase(iterator it)
    3369 {
    3370  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    3371 }
    3372 
    3373 #endif // #if VMA_USE_STL_UNORDERED_MAP
    3374 
    3375 #endif // #if 0
    3376 
    3378 
    3379 class VmaDeviceMemoryBlock;
    3380 
    3381 struct VmaAllocation_T
    3382 {
    3383 private:
    3384  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    3385 
    3386  enum FLAGS
    3387  {
    3388  FLAG_USER_DATA_STRING = 0x01,
    3389  };
    3390 
    3391 public:
    3392  enum ALLOCATION_TYPE
    3393  {
    3394  ALLOCATION_TYPE_NONE,
    3395  ALLOCATION_TYPE_BLOCK,
    3396  ALLOCATION_TYPE_DEDICATED,
    3397  };
    3398 
    3399  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    3400  m_Alignment(1),
    3401  m_Size(0),
    3402  m_pUserData(VMA_NULL),
    3403  m_LastUseFrameIndex(currentFrameIndex),
    3404  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    3405  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    3406  m_MapCount(0),
    3407  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    3408  {
    3409  }
    3410 
    3411  ~VmaAllocation_T()
    3412  {
    3413  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    3414 
    3415  // Check if owned string was freed.
    3416  VMA_ASSERT(m_pUserData == VMA_NULL);
    3417  }
    3418 
    3419  void InitBlockAllocation(
    3420  VmaPool hPool,
    3421  VmaDeviceMemoryBlock* block,
    3422  VkDeviceSize offset,
    3423  VkDeviceSize alignment,
    3424  VkDeviceSize size,
    3425  VmaSuballocationType suballocationType,
    3426  bool mapped,
    3427  bool canBecomeLost)
    3428  {
    3429  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3430  VMA_ASSERT(block != VMA_NULL);
    3431  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3432  m_Alignment = alignment;
    3433  m_Size = size;
    3434  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3435  m_SuballocationType = (uint8_t)suballocationType;
    3436  m_BlockAllocation.m_hPool = hPool;
    3437  m_BlockAllocation.m_Block = block;
    3438  m_BlockAllocation.m_Offset = offset;
    3439  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    3440  }
    3441 
    3442  void InitLost()
    3443  {
    3444  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3445  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    3446  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3447  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    3448  m_BlockAllocation.m_Block = VMA_NULL;
    3449  m_BlockAllocation.m_Offset = 0;
    3450  m_BlockAllocation.m_CanBecomeLost = true;
    3451  }
    3452 
    3453  void ChangeBlockAllocation(
    3454  VmaAllocator hAllocator,
    3455  VmaDeviceMemoryBlock* block,
    3456  VkDeviceSize offset);
    3457 
    3458  // pMappedData not null means allocation is created with MAPPED flag.
    3459  void InitDedicatedAllocation(
    3460  uint32_t memoryTypeIndex,
    3461  VkDeviceMemory hMemory,
    3462  VmaSuballocationType suballocationType,
    3463  void* pMappedData,
    3464  VkDeviceSize size)
    3465  {
    3466  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3467  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3468  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    3469  m_Alignment = 0;
    3470  m_Size = size;
    3471  m_SuballocationType = (uint8_t)suballocationType;
    3472  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3473  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3474  m_DedicatedAllocation.m_hMemory = hMemory;
    3475  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3476  }
    3477 
    3478  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    3479  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3480  VkDeviceSize GetSize() const { return m_Size; }
    3481  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    3482  void* GetUserData() const { return m_pUserData; }
    3483  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    3484  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    3485 
    3486  VmaDeviceMemoryBlock* GetBlock() const
    3487  {
    3488  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3489  return m_BlockAllocation.m_Block;
    3490  }
    3491  VkDeviceSize GetOffset() const;
    3492  VkDeviceMemory GetMemory() const;
    3493  uint32_t GetMemoryTypeIndex() const;
    3494  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    3495  void* GetMappedData() const;
    3496  bool CanBecomeLost() const;
    3497  VmaPool GetPool() const;
    3498 
    3499  uint32_t GetLastUseFrameIndex() const
    3500  {
    3501  return m_LastUseFrameIndex.load();
    3502  }
    3503  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3504  {
    3505  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3506  }
    3507  /*
    3508  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3509  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3510  - Else, returns false.
    3511 
    3512  If hAllocation is already lost, assert - you should not call it then.
    3513  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3514  */
    3515  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3516 
    3517  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3518  {
    3519  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3520  outInfo.blockCount = 1;
    3521  outInfo.allocationCount = 1;
    3522  outInfo.unusedRangeCount = 0;
    3523  outInfo.usedBytes = m_Size;
    3524  outInfo.unusedBytes = 0;
    3525  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3526  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3527  outInfo.unusedRangeSizeMax = 0;
    3528  }
    3529 
    3530  void BlockAllocMap();
    3531  void BlockAllocUnmap();
    3532  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    3533  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    3534 
    3535 private:
    3536  VkDeviceSize m_Alignment;
    3537  VkDeviceSize m_Size;
    3538  void* m_pUserData;
    3539  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3540  uint8_t m_Type; // ALLOCATION_TYPE
    3541  uint8_t m_SuballocationType; // VmaSuballocationType
    3542  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    3543  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    3544  uint8_t m_MapCount;
    3545  uint8_t m_Flags; // enum FLAGS
    3546 
    3547  // Allocation out of VmaDeviceMemoryBlock.
    3548  struct BlockAllocation
    3549  {
    3550  VmaPool m_hPool; // Null if belongs to general memory.
    3551  VmaDeviceMemoryBlock* m_Block;
    3552  VkDeviceSize m_Offset;
    3553  bool m_CanBecomeLost;
    3554  };
    3555 
    3556  // Allocation for an object that has its own private VkDeviceMemory.
    3557  struct DedicatedAllocation
    3558  {
    3559  uint32_t m_MemoryTypeIndex;
    3560  VkDeviceMemory m_hMemory;
    3561  void* m_pMappedData; // Not null means memory is mapped.
    3562  };
    3563 
    3564  union
    3565  {
    3566  // Allocation out of VmaDeviceMemoryBlock.
    3567  BlockAllocation m_BlockAllocation;
    3568  // Allocation for an object that has its own private VkDeviceMemory.
    3569  DedicatedAllocation m_DedicatedAllocation;
    3570  };
    3571 
    3572  void FreeUserDataString(VmaAllocator hAllocator);
    3573 };
    3574 
    3575 /*
    3576 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3577 allocated memory block or free.
    3578 */
    3579 struct VmaSuballocation
    3580 {
    3581  VkDeviceSize offset;
    3582  VkDeviceSize size;
    3583  VmaAllocation hAllocation;
    3584  VmaSuballocationType type;
    3585 };
    3586 
    3587 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3588 
    3589 // Cost of one additional allocation lost, as equivalent in bytes.
    3590 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3591 
    3592 /*
    3593 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3594 
    3595 If canMakeOtherLost was false:
    3596 - item points to a FREE suballocation.
    3597 - itemsToMakeLostCount is 0.
    3598 
    3599 If canMakeOtherLost was true:
    3600 - item points to first of sequence of suballocations, which are either FREE,
    3601  or point to VmaAllocations that can become lost.
    3602 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3603  the requested allocation to succeed.
    3604 */
    3605 struct VmaAllocationRequest
    3606 {
    3607  VkDeviceSize offset;
    3608  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3609  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3610  VmaSuballocationList::iterator item;
    3611  size_t itemsToMakeLostCount;
    3612 
    3613  VkDeviceSize CalcCost() const
    3614  {
    3615  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3616  }
    3617 };
    3618 
    3619 /*
    3620 Data structure used for bookkeeping of allocations and unused ranges of memory
    3621 in a single VkDeviceMemory block.
    3622 */
    3623 class VmaBlockMetadata
    3624 {
    3625 public:
    3626  VmaBlockMetadata(VmaAllocator hAllocator);
    3627  ~VmaBlockMetadata();
    3628  void Init(VkDeviceSize size);
    3629 
    3630  // Validates all data structures inside this object. If not valid, returns false.
    3631  bool Validate() const;
    3632  VkDeviceSize GetSize() const { return m_Size; }
    3633  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3634  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3635  VkDeviceSize GetUnusedRangeSizeMax() const;
    3636  // Returns true if this block is empty - contains only single free suballocation.
    3637  bool IsEmpty() const;
    3638 
    3639  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3640  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3641 
    3642 #if VMA_STATS_STRING_ENABLED
    3643  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3644 #endif
    3645 
    3646  // Creates trivial request for case when block is empty.
    3647  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3648 
    3649  // Tries to find a place for suballocation with given parameters inside this block.
    3650  // If succeeded, fills pAllocationRequest and returns true.
    3651  // If failed, returns false.
    3652  bool CreateAllocationRequest(
    3653  uint32_t currentFrameIndex,
    3654  uint32_t frameInUseCount,
    3655  VkDeviceSize bufferImageGranularity,
    3656  VkDeviceSize allocSize,
    3657  VkDeviceSize allocAlignment,
    3658  VmaSuballocationType allocType,
    3659  bool canMakeOtherLost,
    3660  VmaAllocationRequest* pAllocationRequest);
    3661 
    3662  bool MakeRequestedAllocationsLost(
    3663  uint32_t currentFrameIndex,
    3664  uint32_t frameInUseCount,
    3665  VmaAllocationRequest* pAllocationRequest);
    3666 
    3667  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3668 
    3669  // Makes actual allocation based on request. Request must already be checked and valid.
    3670  void Alloc(
    3671  const VmaAllocationRequest& request,
    3672  VmaSuballocationType type,
    3673  VkDeviceSize allocSize,
    3674  VmaAllocation hAllocation);
    3675 
    3676  // Frees suballocation assigned to given memory region.
    3677  void Free(const VmaAllocation allocation);
    3678  void FreeAtOffset(VkDeviceSize offset);
    3679 
    3680 private:
    3681  VkDeviceSize m_Size;
    3682  uint32_t m_FreeCount;
    3683  VkDeviceSize m_SumFreeSize;
    3684  VmaSuballocationList m_Suballocations;
    3685  // Suballocations that are free and have size greater than certain threshold.
    3686  // Sorted by size, ascending.
    3687  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3688 
    3689  bool ValidateFreeSuballocationList() const;
    3690 
    3691  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3692  // If yes, fills pOffset and returns true. If no, returns false.
    3693  bool CheckAllocation(
    3694  uint32_t currentFrameIndex,
    3695  uint32_t frameInUseCount,
    3696  VkDeviceSize bufferImageGranularity,
    3697  VkDeviceSize allocSize,
    3698  VkDeviceSize allocAlignment,
    3699  VmaSuballocationType allocType,
    3700  VmaSuballocationList::const_iterator suballocItem,
    3701  bool canMakeOtherLost,
    3702  VkDeviceSize* pOffset,
    3703  size_t* itemsToMakeLostCount,
    3704  VkDeviceSize* pSumFreeSize,
    3705  VkDeviceSize* pSumItemSize) const;
    3706  // Given free suballocation, it merges it with following one, which must also be free.
    3707  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3708  // Releases given suballocation, making it free.
    3709  // Merges it with adjacent free suballocations if applicable.
    3710  // Returns iterator to new free suballocation at this place.
    3711  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3712  // Given free suballocation, it inserts it into sorted list of
    3713  // m_FreeSuballocationsBySize if it's suitable.
    3714  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3715  // Given free suballocation, it removes it from sorted list of
    3716  // m_FreeSuballocationsBySize if it's suitable.
    3717  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3718 };
    3719 
    3720 // Helper class that represents mapped memory. Synchronized internally.
    3721 class VmaDeviceMemoryMapping
    3722 {
    3723 public:
    3724  VmaDeviceMemoryMapping();
    3725  ~VmaDeviceMemoryMapping();
    3726 
    3727  void* GetMappedData() const { return m_pMappedData; }
    3728 
    3729  // ppData can be null.
    3730  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
    3731  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
    3732 
    3733 private:
    3734  VMA_MUTEX m_Mutex;
    3735  uint32_t m_MapCount;
    3736  void* m_pMappedData;
    3737 };
    3738 
    3739 /*
    3740 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3741 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3742 
    3743 Thread-safety: This class must be externally synchronized.
    3744 */
    3745 class VmaDeviceMemoryBlock
    3746 {
    3747 public:
    3748  uint32_t m_MemoryTypeIndex;
    3749  VkDeviceMemory m_hMemory;
    3750  VmaDeviceMemoryMapping m_Mapping;
    3751  VmaBlockMetadata m_Metadata;
    3752 
    3753  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3754 
    3755  ~VmaDeviceMemoryBlock()
    3756  {
    3757  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3758  }
    3759 
    3760  // Always call after construction.
    3761  void Init(
    3762  uint32_t newMemoryTypeIndex,
    3763  VkDeviceMemory newMemory,
    3764  VkDeviceSize newSize);
    3765  // Always call before destruction.
    3766  void Destroy(VmaAllocator allocator);
    3767 
    3768  // Validates all data structures inside this object. If not valid, returns false.
    3769  bool Validate() const;
    3770 
    3771  // ppData can be null.
    3772  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    3773  void Unmap(VmaAllocator hAllocator, uint32_t count);
    3774 };
    3775 
    3776 struct VmaPointerLess
    3777 {
    3778  bool operator()(const void* lhs, const void* rhs) const
    3779  {
    3780  return lhs < rhs;
    3781  }
    3782 };
    3783 
    3784 class VmaDefragmentator;
    3785 
    3786 /*
    3787 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3788 Vulkan memory type.
    3789 
    3790 Synchronized internally with a mutex.
    3791 */
    3792 struct VmaBlockVector
    3793 {
    3794  VmaBlockVector(
    3795  VmaAllocator hAllocator,
    3796  uint32_t memoryTypeIndex,
    3797  VkDeviceSize preferredBlockSize,
    3798  size_t minBlockCount,
    3799  size_t maxBlockCount,
    3800  VkDeviceSize bufferImageGranularity,
    3801  uint32_t frameInUseCount,
    3802  bool isCustomPool);
    3803  ~VmaBlockVector();
    3804 
    3805  VkResult CreateMinBlocks();
    3806 
    3807  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3808  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3809  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3810  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3811 
    3812  void GetPoolStats(VmaPoolStats* pStats);
    3813 
    3814  bool IsEmpty() const { return m_Blocks.empty(); }
    3815 
    3816  VkResult Allocate(
    3817  VmaPool hCurrentPool,
    3818  uint32_t currentFrameIndex,
    3819  const VkMemoryRequirements& vkMemReq,
    3820  const VmaAllocationCreateInfo& createInfo,
    3821  VmaSuballocationType suballocType,
    3822  VmaAllocation* pAllocation);
    3823 
    3824  void Free(
    3825  VmaAllocation hAllocation);
    3826 
    3827  // Adds statistics of this BlockVector to pStats.
    3828  void AddStats(VmaStats* pStats);
    3829 
    3830 #if VMA_STATS_STRING_ENABLED
    3831  void PrintDetailedMap(class VmaJsonWriter& json);
    3832 #endif
    3833 
    3834  void MakePoolAllocationsLost(
    3835  uint32_t currentFrameIndex,
    3836  size_t* pLostAllocationCount);
    3837 
    3838  VmaDefragmentator* EnsureDefragmentator(
    3839  VmaAllocator hAllocator,
    3840  uint32_t currentFrameIndex);
    3841 
    3842  VkResult Defragment(
    3843  VmaDefragmentationStats* pDefragmentationStats,
    3844  VkDeviceSize& maxBytesToMove,
    3845  uint32_t& maxAllocationsToMove);
    3846 
    3847  void DestroyDefragmentator();
    3848 
    3849 private:
    3850  friend class VmaDefragmentator;
    3851 
    3852  const VmaAllocator m_hAllocator;
    3853  const uint32_t m_MemoryTypeIndex;
    3854  const VkDeviceSize m_PreferredBlockSize;
    3855  const size_t m_MinBlockCount;
    3856  const size_t m_MaxBlockCount;
    3857  const VkDeviceSize m_BufferImageGranularity;
    3858  const uint32_t m_FrameInUseCount;
    3859  const bool m_IsCustomPool;
    3860  VMA_MUTEX m_Mutex;
    3861  // Incrementally sorted by sumFreeSize, ascending.
    3862  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3863  /* There can be at most one allocation that is completely empty - a
    3864  hysteresis to avoid pessimistic case of alternating creation and destruction
    3865  of a VkDeviceMemory. */
    3866  bool m_HasEmptyBlock;
    3867  VmaDefragmentator* m_pDefragmentator;
    3868 
    3869  size_t CalcMaxBlockSize() const;
    3870 
    3871  // Finds and removes given block from vector.
    3872  void Remove(VmaDeviceMemoryBlock* pBlock);
    3873 
    3874  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3875  // after this call.
    3876  void IncrementallySortBlocks();
    3877 
    3878  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3879 };
    3880 
    3881 struct VmaPool_T
    3882 {
    3883 public:
    3884  VmaBlockVector m_BlockVector;
    3885 
    3886  // Takes ownership.
    3887  VmaPool_T(
    3888  VmaAllocator hAllocator,
    3889  const VmaPoolCreateInfo& createInfo);
    3890  ~VmaPool_T();
    3891 
    3892  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3893 
    3894 #if VMA_STATS_STRING_ENABLED
    3895  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3896 #endif
    3897 };
    3898 
    3899 class VmaDefragmentator
    3900 {
    3901  const VmaAllocator m_hAllocator;
    3902  VmaBlockVector* const m_pBlockVector;
    3903  uint32_t m_CurrentFrameIndex;
    3904  VkDeviceSize m_BytesMoved;
    3905  uint32_t m_AllocationsMoved;
    3906 
    3907  struct AllocationInfo
    3908  {
    3909  VmaAllocation m_hAllocation;
    3910  VkBool32* m_pChanged;
    3911 
    3912  AllocationInfo() :
    3913  m_hAllocation(VK_NULL_HANDLE),
    3914  m_pChanged(VMA_NULL)
    3915  {
    3916  }
    3917  };
    3918 
    3919  struct AllocationInfoSizeGreater
    3920  {
    3921  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3922  {
    3923  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3924  }
    3925  };
    3926 
    3927  // Used between AddAllocation and Defragment.
    3928  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3929 
    3930  struct BlockInfo
    3931  {
    3932  VmaDeviceMemoryBlock* m_pBlock;
    3933  bool m_HasNonMovableAllocations;
    3934  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3935 
    3936  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3937  m_pBlock(VMA_NULL),
    3938  m_HasNonMovableAllocations(true),
    3939  m_Allocations(pAllocationCallbacks),
    3940  m_pMappedDataForDefragmentation(VMA_NULL)
    3941  {
    3942  }
    3943 
    3944  void CalcHasNonMovableAllocations()
    3945  {
    3946  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3947  const size_t defragmentAllocCount = m_Allocations.size();
    3948  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3949  }
    3950 
    3951  void SortAllocationsBySizeDescecnding()
    3952  {
    3953  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3954  }
    3955 
    3956  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3957  void Unmap(VmaAllocator hAllocator);
    3958 
    3959  private:
    3960  // Not null if mapped for defragmentation only, not originally mapped.
    3961  void* m_pMappedDataForDefragmentation;
    3962  };
    3963 
    3964  struct BlockPointerLess
    3965  {
    3966  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3967  {
    3968  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3969  }
    3970  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3971  {
    3972  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3973  }
    3974  };
    3975 
    3976  // 1. Blocks with some non-movable allocations go first.
    3977  // 2. Blocks with smaller sumFreeSize go first.
    3978  struct BlockInfoCompareMoveDestination
    3979  {
    3980  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3981  {
    3982  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3983  {
    3984  return true;
    3985  }
    3986  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3987  {
    3988  return false;
    3989  }
    3990  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3991  {
    3992  return true;
    3993  }
    3994  return false;
    3995  }
    3996  };
    3997 
    3998  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3999  BlockInfoVector m_Blocks;
    4000 
    4001  VkResult DefragmentRound(
    4002  VkDeviceSize maxBytesToMove,
    4003  uint32_t maxAllocationsToMove);
    4004 
    4005  static bool MoveMakesSense(
    4006  size_t dstBlockIndex, VkDeviceSize dstOffset,
    4007  size_t srcBlockIndex, VkDeviceSize srcOffset);
    4008 
    4009 public:
    4010  VmaDefragmentator(
    4011  VmaAllocator hAllocator,
    4012  VmaBlockVector* pBlockVector,
    4013  uint32_t currentFrameIndex);
    4014 
    4015  ~VmaDefragmentator();
    4016 
    4017  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    4018  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    4019 
    4020  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    4021 
    4022  VkResult Defragment(
    4023  VkDeviceSize maxBytesToMove,
    4024  uint32_t maxAllocationsToMove);
    4025 };
    4026 
    4027 // Main allocator object.
    4028 struct VmaAllocator_T
    4029 {
    4030  bool m_UseMutex;
    4031  bool m_UseKhrDedicatedAllocation;
    4032  VkDevice m_hDevice;
    4033  bool m_AllocationCallbacksSpecified;
    4034  VkAllocationCallbacks m_AllocationCallbacks;
    4035  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    4036 
    4037  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    4038  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    4039  VMA_MUTEX m_HeapSizeLimitMutex;
    4040 
    4041  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    4042  VkPhysicalDeviceMemoryProperties m_MemProps;
    4043 
    4044  // Default pools.
    4045  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    4046 
    4047  // Each vector is sorted by memory (handle value).
    4048  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    4049  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    4050  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    4051 
    4052  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    4053  ~VmaAllocator_T();
    4054 
    4055  const VkAllocationCallbacks* GetAllocationCallbacks() const
    4056  {
    4057  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    4058  }
    4059  const VmaVulkanFunctions& GetVulkanFunctions() const
    4060  {
    4061  return m_VulkanFunctions;
    4062  }
    4063 
    4064  VkDeviceSize GetBufferImageGranularity() const
    4065  {
    4066  return VMA_MAX(
    4067  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    4068  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    4069  }
    4070 
    4071  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    4072  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    4073 
    4074  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    4075  {
    4076  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    4077  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    4078  }
    4079 
    4080  void GetBufferMemoryRequirements(
    4081  VkBuffer hBuffer,
    4082  VkMemoryRequirements& memReq,
    4083  bool& requiresDedicatedAllocation,
    4084  bool& prefersDedicatedAllocation) const;
    4085  void GetImageMemoryRequirements(
    4086  VkImage hImage,
    4087  VkMemoryRequirements& memReq,
    4088  bool& requiresDedicatedAllocation,
    4089  bool& prefersDedicatedAllocation) const;
    4090 
    4091  // Main allocation function.
    4092  VkResult AllocateMemory(
    4093  const VkMemoryRequirements& vkMemReq,
    4094  bool requiresDedicatedAllocation,
    4095  bool prefersDedicatedAllocation,
    4096  VkBuffer dedicatedBuffer,
    4097  VkImage dedicatedImage,
    4098  const VmaAllocationCreateInfo& createInfo,
    4099  VmaSuballocationType suballocType,
    4100  VmaAllocation* pAllocation);
    4101 
    4102  // Main deallocation function.
    4103  void FreeMemory(const VmaAllocation allocation);
    4104 
    4105  void CalculateStats(VmaStats* pStats);
    4106 
    4107 #if VMA_STATS_STRING_ENABLED
    4108  void PrintDetailedMap(class VmaJsonWriter& json);
    4109 #endif
    4110 
    4111  VkResult Defragment(
    4112  VmaAllocation* pAllocations,
    4113  size_t allocationCount,
    4114  VkBool32* pAllocationsChanged,
    4115  const VmaDefragmentationInfo* pDefragmentationInfo,
    4116  VmaDefragmentationStats* pDefragmentationStats);
    4117 
    4118  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    4119  bool TouchAllocation(VmaAllocation hAllocation);
    4120 
    4121  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    4122  void DestroyPool(VmaPool pool);
    4123  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    4124 
    4125  void SetCurrentFrameIndex(uint32_t frameIndex);
    4126 
    4127  void MakePoolAllocationsLost(
    4128  VmaPool hPool,
    4129  size_t* pLostAllocationCount);
    4130 
    4131  void CreateLostAllocation(VmaAllocation* pAllocation);
    4132 
    4133  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    4134  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    4135 
    4136  VkResult Map(VmaAllocation hAllocation, void** ppData);
    4137  void Unmap(VmaAllocation hAllocation);
    4138 
    4139 private:
    4140  VkDeviceSize m_PreferredLargeHeapBlockSize;
    4141 
    4142  VkPhysicalDevice m_PhysicalDevice;
    4143  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    4144 
    4145  VMA_MUTEX m_PoolsMutex;
    4146  // Protected by m_PoolsMutex. Sorted by pointer value.
    4147  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    4148 
    4149  VmaVulkanFunctions m_VulkanFunctions;
    4150 
    4151  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    4152 
    4153  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    4154 
    4155  VkResult AllocateMemoryOfType(
    4156  const VkMemoryRequirements& vkMemReq,
    4157  bool dedicatedAllocation,
    4158  VkBuffer dedicatedBuffer,
    4159  VkImage dedicatedImage,
    4160  const VmaAllocationCreateInfo& createInfo,
    4161  uint32_t memTypeIndex,
    4162  VmaSuballocationType suballocType,
    4163  VmaAllocation* pAllocation);
    4164 
    4165  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    4166  VkResult AllocateDedicatedMemory(
    4167  VkDeviceSize size,
    4168  VmaSuballocationType suballocType,
    4169  uint32_t memTypeIndex,
    4170  bool map,
    4171  bool isUserDataString,
    4172  void* pUserData,
    4173  VkBuffer dedicatedBuffer,
    4174  VkImage dedicatedImage,
    4175  VmaAllocation* pAllocation);
    4176 
    4177  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    4178  void FreeDedicatedMemory(VmaAllocation allocation);
    4179 };
    4180 
    4182 // Memory allocation #2 after VmaAllocator_T definition
    4183 
    4184 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    4185 {
    4186  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    4187 }
    4188 
    4189 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    4190 {
    4191  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    4192 }
    4193 
    4194 template<typename T>
    4195 static T* VmaAllocate(VmaAllocator hAllocator)
    4196 {
    4197  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    4198 }
    4199 
    4200 template<typename T>
    4201 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    4202 {
    4203  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    4204 }
    4205 
    4206 template<typename T>
    4207 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    4208 {
    4209  if(ptr != VMA_NULL)
    4210  {
    4211  ptr->~T();
    4212  VmaFree(hAllocator, ptr);
    4213  }
    4214 }
    4215 
    4216 template<typename T>
    4217 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    4218 {
    4219  if(ptr != VMA_NULL)
    4220  {
    4221  for(size_t i = count; i--; )
    4222  ptr[i].~T();
    4223  VmaFree(hAllocator, ptr);
    4224  }
    4225 }
    4226 
    4228 // VmaStringBuilder
    4229 
    4230 #if VMA_STATS_STRING_ENABLED
    4231 
    4232 class VmaStringBuilder
    4233 {
    4234 public:
    4235  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    4236  size_t GetLength() const { return m_Data.size(); }
    4237  const char* GetData() const { return m_Data.data(); }
    4238 
    4239  void Add(char ch) { m_Data.push_back(ch); }
    4240  void Add(const char* pStr);
    4241  void AddNewLine() { Add('\n'); }
    4242  void AddNumber(uint32_t num);
    4243  void AddNumber(uint64_t num);
    4244  void AddPointer(const void* ptr);
    4245 
    4246 private:
    4247  VmaVector< char, VmaStlAllocator<char> > m_Data;
    4248 };
    4249 
    4250 void VmaStringBuilder::Add(const char* pStr)
    4251 {
    4252  const size_t strLen = strlen(pStr);
    4253  if(strLen > 0)
    4254  {
    4255  const size_t oldCount = m_Data.size();
    4256  m_Data.resize(oldCount + strLen);
    4257  memcpy(m_Data.data() + oldCount, pStr, strLen);
    4258  }
    4259 }
    4260 
    4261 void VmaStringBuilder::AddNumber(uint32_t num)
    4262 {
    4263  char buf[11];
    4264  VmaUint32ToStr(buf, sizeof(buf), num);
    4265  Add(buf);
    4266 }
    4267 
    4268 void VmaStringBuilder::AddNumber(uint64_t num)
    4269 {
    4270  char buf[21];
    4271  VmaUint64ToStr(buf, sizeof(buf), num);
    4272  Add(buf);
    4273 }
    4274 
    4275 void VmaStringBuilder::AddPointer(const void* ptr)
    4276 {
    4277  char buf[21];
    4278  VmaPtrToStr(buf, sizeof(buf), ptr);
    4279  Add(buf);
    4280 }
    4281 
    4282 #endif // #if VMA_STATS_STRING_ENABLED
    4283 
    4285 // VmaJsonWriter
    4286 
    4287 #if VMA_STATS_STRING_ENABLED
    4288 
    4289 class VmaJsonWriter
    4290 {
    4291 public:
    4292  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    4293  ~VmaJsonWriter();
    4294 
    4295  void BeginObject(bool singleLine = false);
    4296  void EndObject();
    4297 
    4298  void BeginArray(bool singleLine = false);
    4299  void EndArray();
    4300 
    4301  void WriteString(const char* pStr);
    4302  void BeginString(const char* pStr = VMA_NULL);
    4303  void ContinueString(const char* pStr);
    4304  void ContinueString(uint32_t n);
    4305  void ContinueString(uint64_t n);
    4306  void ContinueString_Pointer(const void* ptr);
    4307  void EndString(const char* pStr = VMA_NULL);
    4308 
    4309  void WriteNumber(uint32_t n);
    4310  void WriteNumber(uint64_t n);
    4311  void WriteBool(bool b);
    4312  void WriteNull();
    4313 
    4314 private:
    4315  static const char* const INDENT;
    4316 
    4317  enum COLLECTION_TYPE
    4318  {
    4319  COLLECTION_TYPE_OBJECT,
    4320  COLLECTION_TYPE_ARRAY,
    4321  };
    4322  struct StackItem
    4323  {
    4324  COLLECTION_TYPE type;
    4325  uint32_t valueCount;
    4326  bool singleLineMode;
    4327  };
    4328 
    4329  VmaStringBuilder& m_SB;
    4330  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    4331  bool m_InsideString;
    4332 
    4333  void BeginValue(bool isString);
    4334  void WriteIndent(bool oneLess = false);
    4335 };
    4336 
    4337 const char* const VmaJsonWriter::INDENT = " ";
    4338 
    4339 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    4340  m_SB(sb),
    4341  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    4342  m_InsideString(false)
    4343 {
    4344 }
    4345 
    4346 VmaJsonWriter::~VmaJsonWriter()
    4347 {
    4348  VMA_ASSERT(!m_InsideString);
    4349  VMA_ASSERT(m_Stack.empty());
    4350 }
    4351 
    4352 void VmaJsonWriter::BeginObject(bool singleLine)
    4353 {
    4354  VMA_ASSERT(!m_InsideString);
    4355 
    4356  BeginValue(false);
    4357  m_SB.Add('{');
    4358 
    4359  StackItem item;
    4360  item.type = COLLECTION_TYPE_OBJECT;
    4361  item.valueCount = 0;
    4362  item.singleLineMode = singleLine;
    4363  m_Stack.push_back(item);
    4364 }
    4365 
    4366 void VmaJsonWriter::EndObject()
    4367 {
    4368  VMA_ASSERT(!m_InsideString);
    4369 
    4370  WriteIndent(true);
    4371  m_SB.Add('}');
    4372 
    4373  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    4374  m_Stack.pop_back();
    4375 }
    4376 
    4377 void VmaJsonWriter::BeginArray(bool singleLine)
    4378 {
    4379  VMA_ASSERT(!m_InsideString);
    4380 
    4381  BeginValue(false);
    4382  m_SB.Add('[');
    4383 
    4384  StackItem item;
    4385  item.type = COLLECTION_TYPE_ARRAY;
    4386  item.valueCount = 0;
    4387  item.singleLineMode = singleLine;
    4388  m_Stack.push_back(item);
    4389 }
    4390 
    4391 void VmaJsonWriter::EndArray()
    4392 {
    4393  VMA_ASSERT(!m_InsideString);
    4394 
    4395  WriteIndent(true);
    4396  m_SB.Add(']');
    4397 
    4398  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    4399  m_Stack.pop_back();
    4400 }
    4401 
    4402 void VmaJsonWriter::WriteString(const char* pStr)
    4403 {
    4404  BeginString(pStr);
    4405  EndString();
    4406 }
    4407 
    4408 void VmaJsonWriter::BeginString(const char* pStr)
    4409 {
    4410  VMA_ASSERT(!m_InsideString);
    4411 
    4412  BeginValue(true);
    4413  m_SB.Add('"');
    4414  m_InsideString = true;
    4415  if(pStr != VMA_NULL && pStr[0] != '\0')
    4416  {
    4417  ContinueString(pStr);
    4418  }
    4419 }
    4420 
    4421 void VmaJsonWriter::ContinueString(const char* pStr)
    4422 {
    4423  VMA_ASSERT(m_InsideString);
    4424 
    4425  const size_t strLen = strlen(pStr);
    4426  for(size_t i = 0; i < strLen; ++i)
    4427  {
    4428  char ch = pStr[i];
    4429  if(ch == '\'')
    4430  {
    4431  m_SB.Add("\\\\");
    4432  }
    4433  else if(ch == '"')
    4434  {
    4435  m_SB.Add("\\\"");
    4436  }
    4437  else if(ch >= 32)
    4438  {
    4439  m_SB.Add(ch);
    4440  }
    4441  else switch(ch)
    4442  {
    4443  case '\b':
    4444  m_SB.Add("\\b");
    4445  break;
    4446  case '\f':
    4447  m_SB.Add("\\f");
    4448  break;
    4449  case '\n':
    4450  m_SB.Add("\\n");
    4451  break;
    4452  case '\r':
    4453  m_SB.Add("\\r");
    4454  break;
    4455  case '\t':
    4456  m_SB.Add("\\t");
    4457  break;
    4458  default:
    4459  VMA_ASSERT(0 && "Character not currently supported.");
    4460  break;
    4461  }
    4462  }
    4463 }
    4464 
    4465 void VmaJsonWriter::ContinueString(uint32_t n)
    4466 {
    4467  VMA_ASSERT(m_InsideString);
    4468  m_SB.AddNumber(n);
    4469 }
    4470 
    4471 void VmaJsonWriter::ContinueString(uint64_t n)
    4472 {
    4473  VMA_ASSERT(m_InsideString);
    4474  m_SB.AddNumber(n);
    4475 }
    4476 
    4477 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    4478 {
    4479  VMA_ASSERT(m_InsideString);
    4480  m_SB.AddPointer(ptr);
    4481 }
    4482 
    4483 void VmaJsonWriter::EndString(const char* pStr)
    4484 {
    4485  VMA_ASSERT(m_InsideString);
    4486  if(pStr != VMA_NULL && pStr[0] != '\0')
    4487  {
    4488  ContinueString(pStr);
    4489  }
    4490  m_SB.Add('"');
    4491  m_InsideString = false;
    4492 }
    4493 
    4494 void VmaJsonWriter::WriteNumber(uint32_t n)
    4495 {
    4496  VMA_ASSERT(!m_InsideString);
    4497  BeginValue(false);
    4498  m_SB.AddNumber(n);
    4499 }
    4500 
    4501 void VmaJsonWriter::WriteNumber(uint64_t n)
    4502 {
    4503  VMA_ASSERT(!m_InsideString);
    4504  BeginValue(false);
    4505  m_SB.AddNumber(n);
    4506 }
    4507 
    4508 void VmaJsonWriter::WriteBool(bool b)
    4509 {
    4510  VMA_ASSERT(!m_InsideString);
    4511  BeginValue(false);
    4512  m_SB.Add(b ? "true" : "false");
    4513 }
    4514 
    4515 void VmaJsonWriter::WriteNull()
    4516 {
    4517  VMA_ASSERT(!m_InsideString);
    4518  BeginValue(false);
    4519  m_SB.Add("null");
    4520 }
    4521 
    4522 void VmaJsonWriter::BeginValue(bool isString)
    4523 {
    4524  if(!m_Stack.empty())
    4525  {
    4526  StackItem& currItem = m_Stack.back();
    4527  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4528  currItem.valueCount % 2 == 0)
    4529  {
    4530  VMA_ASSERT(isString);
    4531  }
    4532 
    4533  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4534  currItem.valueCount % 2 != 0)
    4535  {
    4536  m_SB.Add(": ");
    4537  }
    4538  else if(currItem.valueCount > 0)
    4539  {
    4540  m_SB.Add(", ");
    4541  WriteIndent();
    4542  }
    4543  else
    4544  {
    4545  WriteIndent();
    4546  }
    4547  ++currItem.valueCount;
    4548  }
    4549 }
    4550 
    4551 void VmaJsonWriter::WriteIndent(bool oneLess)
    4552 {
    4553  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4554  {
    4555  m_SB.AddNewLine();
    4556 
    4557  size_t count = m_Stack.size();
    4558  if(count > 0 && oneLess)
    4559  {
    4560  --count;
    4561  }
    4562  for(size_t i = 0; i < count; ++i)
    4563  {
    4564  m_SB.Add(INDENT);
    4565  }
    4566  }
    4567 }
    4568 
    4569 #endif // #if VMA_STATS_STRING_ENABLED
    4570 
    4572 
    4573 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    4574 {
    4575  if(IsUserDataString())
    4576  {
    4577  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    4578 
    4579  FreeUserDataString(hAllocator);
    4580 
    4581  if(pUserData != VMA_NULL)
    4582  {
    4583  const char* const newStrSrc = (char*)pUserData;
    4584  const size_t newStrLen = strlen(newStrSrc);
    4585  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    4586  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    4587  m_pUserData = newStrDst;
    4588  }
    4589  }
    4590  else
    4591  {
    4592  m_pUserData = pUserData;
    4593  }
    4594 }
    4595 
    4596 void VmaAllocation_T::ChangeBlockAllocation(
    4597  VmaAllocator hAllocator,
    4598  VmaDeviceMemoryBlock* block,
    4599  VkDeviceSize offset)
    4600 {
    4601  VMA_ASSERT(block != VMA_NULL);
    4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4603 
    4604  // Move mapping reference counter from old block to new block.
    4605  if(block != m_BlockAllocation.m_Block)
    4606  {
    4607  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    4608  if(IsPersistentMap())
    4609  ++mapRefCount;
    4610  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    4611  block->Map(hAllocator, mapRefCount, VMA_NULL);
    4612  }
    4613 
    4614  m_BlockAllocation.m_Block = block;
    4615  m_BlockAllocation.m_Offset = offset;
    4616 }
    4617 
    4618 VkDeviceSize VmaAllocation_T::GetOffset() const
    4619 {
    4620  switch(m_Type)
    4621  {
    4622  case ALLOCATION_TYPE_BLOCK:
    4623  return m_BlockAllocation.m_Offset;
    4624  case ALLOCATION_TYPE_DEDICATED:
    4625  return 0;
    4626  default:
    4627  VMA_ASSERT(0);
    4628  return 0;
    4629  }
    4630 }
    4631 
    4632 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4633 {
    4634  switch(m_Type)
    4635  {
    4636  case ALLOCATION_TYPE_BLOCK:
    4637  return m_BlockAllocation.m_Block->m_hMemory;
    4638  case ALLOCATION_TYPE_DEDICATED:
    4639  return m_DedicatedAllocation.m_hMemory;
    4640  default:
    4641  VMA_ASSERT(0);
    4642  return VK_NULL_HANDLE;
    4643  }
    4644 }
    4645 
    4646 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4647 {
    4648  switch(m_Type)
    4649  {
    4650  case ALLOCATION_TYPE_BLOCK:
    4651  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4652  case ALLOCATION_TYPE_DEDICATED:
    4653  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4654  default:
    4655  VMA_ASSERT(0);
    4656  return UINT32_MAX;
    4657  }
    4658 }
    4659 
    4660 void* VmaAllocation_T::GetMappedData() const
    4661 {
    4662  switch(m_Type)
    4663  {
    4664  case ALLOCATION_TYPE_BLOCK:
    4665  if(m_MapCount != 0)
    4666  {
    4667  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
    4668  VMA_ASSERT(pBlockData != VMA_NULL);
    4669  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    4670  }
    4671  else
    4672  {
    4673  return VMA_NULL;
    4674  }
    4675  break;
    4676  case ALLOCATION_TYPE_DEDICATED:
    4677  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    4678  return m_DedicatedAllocation.m_pMappedData;
    4679  default:
    4680  VMA_ASSERT(0);
    4681  return VMA_NULL;
    4682  }
    4683 }
    4684 
    4685 bool VmaAllocation_T::CanBecomeLost() const
    4686 {
    4687  switch(m_Type)
    4688  {
    4689  case ALLOCATION_TYPE_BLOCK:
    4690  return m_BlockAllocation.m_CanBecomeLost;
    4691  case ALLOCATION_TYPE_DEDICATED:
    4692  return false;
    4693  default:
    4694  VMA_ASSERT(0);
    4695  return false;
    4696  }
    4697 }
    4698 
    4699 VmaPool VmaAllocation_T::GetPool() const
    4700 {
    4701  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4702  return m_BlockAllocation.m_hPool;
    4703 }
    4704 
    4705 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4706 {
    4707  VMA_ASSERT(CanBecomeLost());
    4708 
    4709  /*
    4710  Warning: This is a carefully designed algorithm.
    4711  Do not modify unless you really know what you're doing :)
    4712  */
    4713  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4714  for(;;)
    4715  {
    4716  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4717  {
    4718  VMA_ASSERT(0);
    4719  return false;
    4720  }
    4721  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4722  {
    4723  return false;
    4724  }
    4725  else // Last use time earlier than current time.
    4726  {
    4727  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4728  {
    4729  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4730  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4731  return true;
    4732  }
    4733  }
    4734  }
    4735 }
    4736 
    4737 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    4738 {
    4739  VMA_ASSERT(IsUserDataString());
    4740  if(m_pUserData != VMA_NULL)
    4741  {
    4742  char* const oldStr = (char*)m_pUserData;
    4743  const size_t oldStrLen = strlen(oldStr);
    4744  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    4745  m_pUserData = VMA_NULL;
    4746  }
    4747 }
    4748 
    4749 void VmaAllocation_T::BlockAllocMap()
    4750 {
    4751  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    4752 
    4753  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    4754  {
    4755  ++m_MapCount;
    4756  }
    4757  else
    4758  {
    4759  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    4760  }
    4761 }
    4762 
    4763 void VmaAllocation_T::BlockAllocUnmap()
    4764 {
    4765  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    4766 
    4767  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    4768  {
    4769  --m_MapCount;
    4770  }
    4771  else
    4772  {
    4773  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    4774  }
    4775 }
    4776 
    4777 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    4778 {
    4779  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    4780 
    4781  if(m_MapCount != 0)
    4782  {
    4783  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    4784  {
    4785  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    4786  *ppData = m_DedicatedAllocation.m_pMappedData;
    4787  ++m_MapCount;
    4788  return VK_SUCCESS;
    4789  }
    4790  else
    4791  {
    4792  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    4793  return VK_ERROR_MEMORY_MAP_FAILED;
    4794  }
    4795  }
    4796  else
    4797  {
    4798  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4799  hAllocator->m_hDevice,
    4800  m_DedicatedAllocation.m_hMemory,
    4801  0, // offset
    4802  VK_WHOLE_SIZE,
    4803  0, // flags
    4804  ppData);
    4805  if(result == VK_SUCCESS)
    4806  {
    4807  m_DedicatedAllocation.m_pMappedData = *ppData;
    4808  m_MapCount = 1;
    4809  }
    4810  return result;
    4811  }
    4812 }
    4813 
    4814 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    4815 {
    4816  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    4817 
    4818  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    4819  {
    4820  --m_MapCount;
    4821  if(m_MapCount == 0)
    4822  {
    4823  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4824  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    4825  hAllocator->m_hDevice,
    4826  m_DedicatedAllocation.m_hMemory);
    4827  }
    4828  }
    4829  else
    4830  {
    4831  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    4832  }
    4833 }
    4834 
    4835 #if VMA_STATS_STRING_ENABLED
    4836 
    4837 // Correspond to values of enum VmaSuballocationType.
    4838 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4839  "FREE",
    4840  "UNKNOWN",
    4841  "BUFFER",
    4842  "IMAGE_UNKNOWN",
    4843  "IMAGE_LINEAR",
    4844  "IMAGE_OPTIMAL",
    4845 };
    4846 
    4847 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4848 {
    4849  json.BeginObject();
    4850 
    4851  json.WriteString("Blocks");
    4852  json.WriteNumber(stat.blockCount);
    4853 
    4854  json.WriteString("Allocations");
    4855  json.WriteNumber(stat.allocationCount);
    4856 
    4857  json.WriteString("UnusedRanges");
    4858  json.WriteNumber(stat.unusedRangeCount);
    4859 
    4860  json.WriteString("UsedBytes");
    4861  json.WriteNumber(stat.usedBytes);
    4862 
    4863  json.WriteString("UnusedBytes");
    4864  json.WriteNumber(stat.unusedBytes);
    4865 
    4866  if(stat.allocationCount > 1)
    4867  {
    4868  json.WriteString("AllocationSize");
    4869  json.BeginObject(true);
    4870  json.WriteString("Min");
    4871  json.WriteNumber(stat.allocationSizeMin);
    4872  json.WriteString("Avg");
    4873  json.WriteNumber(stat.allocationSizeAvg);
    4874  json.WriteString("Max");
    4875  json.WriteNumber(stat.allocationSizeMax);
    4876  json.EndObject();
    4877  }
    4878 
    4879  if(stat.unusedRangeCount > 1)
    4880  {
    4881  json.WriteString("UnusedRangeSize");
    4882  json.BeginObject(true);
    4883  json.WriteString("Min");
    4884  json.WriteNumber(stat.unusedRangeSizeMin);
    4885  json.WriteString("Avg");
    4886  json.WriteNumber(stat.unusedRangeSizeAvg);
    4887  json.WriteString("Max");
    4888  json.WriteNumber(stat.unusedRangeSizeMax);
    4889  json.EndObject();
    4890  }
    4891 
    4892  json.EndObject();
    4893 }
    4894 
    4895 #endif // #if VMA_STATS_STRING_ENABLED
    4896 
    4897 struct VmaSuballocationItemSizeLess
    4898 {
    4899  bool operator()(
    4900  const VmaSuballocationList::iterator lhs,
    4901  const VmaSuballocationList::iterator rhs) const
    4902  {
    4903  return lhs->size < rhs->size;
    4904  }
    4905  bool operator()(
    4906  const VmaSuballocationList::iterator lhs,
    4907  VkDeviceSize rhsSize) const
    4908  {
    4909  return lhs->size < rhsSize;
    4910  }
    4911 };
    4912 
    4914 // class VmaBlockMetadata
    4915 
    4916 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4917  m_Size(0),
    4918  m_FreeCount(0),
    4919  m_SumFreeSize(0),
    4920  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4921  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4922 {
    4923 }
    4924 
    4925 VmaBlockMetadata::~VmaBlockMetadata()
    4926 {
    4927 }
    4928 
    4929 void VmaBlockMetadata::Init(VkDeviceSize size)
    4930 {
    4931  m_Size = size;
    4932  m_FreeCount = 1;
    4933  m_SumFreeSize = size;
    4934 
    4935  VmaSuballocation suballoc = {};
    4936  suballoc.offset = 0;
    4937  suballoc.size = size;
    4938  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4939  suballoc.hAllocation = VK_NULL_HANDLE;
    4940 
    4941  m_Suballocations.push_back(suballoc);
    4942  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4943  --suballocItem;
    4944  m_FreeSuballocationsBySize.push_back(suballocItem);
    4945 }
    4946 
    4947 bool VmaBlockMetadata::Validate() const
    4948 {
    4949  if(m_Suballocations.empty())
    4950  {
    4951  return false;
    4952  }
    4953 
    4954  // Expected offset of new suballocation as calculates from previous ones.
    4955  VkDeviceSize calculatedOffset = 0;
    4956  // Expected number of free suballocations as calculated from traversing their list.
    4957  uint32_t calculatedFreeCount = 0;
    4958  // Expected sum size of free suballocations as calculated from traversing their list.
    4959  VkDeviceSize calculatedSumFreeSize = 0;
    4960  // Expected number of free suballocations that should be registered in
    4961  // m_FreeSuballocationsBySize calculated from traversing their list.
    4962  size_t freeSuballocationsToRegister = 0;
    4963  // True if previous visisted suballocation was free.
    4964  bool prevFree = false;
    4965 
    4966  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4967  suballocItem != m_Suballocations.cend();
    4968  ++suballocItem)
    4969  {
    4970  const VmaSuballocation& subAlloc = *suballocItem;
    4971 
    4972  // Actual offset of this suballocation doesn't match expected one.
    4973  if(subAlloc.offset != calculatedOffset)
    4974  {
    4975  return false;
    4976  }
    4977 
    4978  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4979  // Two adjacent free suballocations are invalid. They should be merged.
    4980  if(prevFree && currFree)
    4981  {
    4982  return false;
    4983  }
    4984 
    4985  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4986  {
    4987  return false;
    4988  }
    4989 
    4990  if(currFree)
    4991  {
    4992  calculatedSumFreeSize += subAlloc.size;
    4993  ++calculatedFreeCount;
    4994  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4995  {
    4996  ++freeSuballocationsToRegister;
    4997  }
    4998  }
    4999  else
    5000  {
    5001  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
    5002  {
    5003  return false;
    5004  }
    5005  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
    5006  {
    5007  return false;
    5008  }
    5009  }
    5010 
    5011  calculatedOffset += subAlloc.size;
    5012  prevFree = currFree;
    5013  }
    5014 
    5015  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    5016  // match expected one.
    5017  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    5018  {
    5019  return false;
    5020  }
    5021 
    5022  VkDeviceSize lastSize = 0;
    5023  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    5024  {
    5025  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    5026 
    5027  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    5028  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    5029  {
    5030  return false;
    5031  }
    5032  // They must be sorted by size ascending.
    5033  if(suballocItem->size < lastSize)
    5034  {
    5035  return false;
    5036  }
    5037 
    5038  lastSize = suballocItem->size;
    5039  }
    5040 
    5041  // Check if totals match calculacted values.
    5042  if(!ValidateFreeSuballocationList() ||
    5043  (calculatedOffset != m_Size) ||
    5044  (calculatedSumFreeSize != m_SumFreeSize) ||
    5045  (calculatedFreeCount != m_FreeCount))
    5046  {
    5047  return false;
    5048  }
    5049 
    5050  return true;
    5051 }
    5052 
    5053 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    5054 {
    5055  if(!m_FreeSuballocationsBySize.empty())
    5056  {
    5057  return m_FreeSuballocationsBySize.back()->size;
    5058  }
    5059  else
    5060  {
    5061  return 0;
    5062  }
    5063 }
    5064 
    5065 bool VmaBlockMetadata::IsEmpty() const
    5066 {
    5067  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    5068 }
    5069 
    5070 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    5071 {
    5072  outInfo.blockCount = 1;
    5073 
    5074  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5075  outInfo.allocationCount = rangeCount - m_FreeCount;
    5076  outInfo.unusedRangeCount = m_FreeCount;
    5077 
    5078  outInfo.unusedBytes = m_SumFreeSize;
    5079  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    5080 
    5081  outInfo.allocationSizeMin = UINT64_MAX;
    5082  outInfo.allocationSizeMax = 0;
    5083  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5084  outInfo.unusedRangeSizeMax = 0;
    5085 
    5086  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5087  suballocItem != m_Suballocations.cend();
    5088  ++suballocItem)
    5089  {
    5090  const VmaSuballocation& suballoc = *suballocItem;
    5091  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    5092  {
    5093  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    5094  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    5095  }
    5096  else
    5097  {
    5098  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    5099  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    5100  }
    5101  }
    5102 }
    5103 
    5104 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    5105 {
    5106  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5107 
    5108  inoutStats.size += m_Size;
    5109  inoutStats.unusedSize += m_SumFreeSize;
    5110  inoutStats.allocationCount += rangeCount - m_FreeCount;
    5111  inoutStats.unusedRangeCount += m_FreeCount;
    5112  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    5113 }
    5114 
    5115 #if VMA_STATS_STRING_ENABLED
    5116 
    5117 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    5118 {
    5119  json.BeginObject();
    5120 
    5121  json.WriteString("TotalBytes");
    5122  json.WriteNumber(m_Size);
    5123 
    5124  json.WriteString("UnusedBytes");
    5125  json.WriteNumber(m_SumFreeSize);
    5126 
    5127  json.WriteString("Allocations");
    5128  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
    5129 
    5130  json.WriteString("UnusedRanges");
    5131  json.WriteNumber(m_FreeCount);
    5132 
    5133  json.WriteString("Suballocations");
    5134  json.BeginArray();
    5135  size_t i = 0;
    5136  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5137  suballocItem != m_Suballocations.cend();
    5138  ++suballocItem, ++i)
    5139  {
    5140  json.BeginObject(true);
    5141 
    5142  json.WriteString("Type");
    5143  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    5144 
    5145  json.WriteString("Size");
    5146  json.WriteNumber(suballocItem->size);
    5147 
    5148  json.WriteString("Offset");
    5149  json.WriteNumber(suballocItem->offset);
    5150 
    5151  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    5152  {
    5153  const void* pUserData = suballocItem->hAllocation->GetUserData();
    5154  if(pUserData != VMA_NULL)
    5155  {
    5156  json.WriteString("UserData");
    5157  if(suballocItem->hAllocation->IsUserDataString())
    5158  {
    5159  json.WriteString((const char*)pUserData);
    5160  }
    5161  else
    5162  {
    5163  json.BeginString();
    5164  json.ContinueString_Pointer(pUserData);
    5165  json.EndString();
    5166  }
    5167  }
    5168  }
    5169 
    5170  json.EndObject();
    5171  }
    5172  json.EndArray();
    5173 
    5174  json.EndObject();
    5175 }
    5176 
    5177 #endif // #if VMA_STATS_STRING_ENABLED
    5178 
    5179 /*
    5180 How many suitable free suballocations to analyze before choosing best one.
    5181 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    5182  be chosen.
    5183 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    5184  suballocations will be analized and best one will be chosen.
    5185 - Any other value is also acceptable.
    5186 */
    5187 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    5188 
    5189 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    5190 {
    5191  VMA_ASSERT(IsEmpty());
    5192  pAllocationRequest->offset = 0;
    5193  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    5194  pAllocationRequest->sumItemSize = 0;
    5195  pAllocationRequest->item = m_Suballocations.begin();
    5196  pAllocationRequest->itemsToMakeLostCount = 0;
    5197 }
    5198 
    5199 bool VmaBlockMetadata::CreateAllocationRequest(
    5200  uint32_t currentFrameIndex,
    5201  uint32_t frameInUseCount,
    5202  VkDeviceSize bufferImageGranularity,
    5203  VkDeviceSize allocSize,
    5204  VkDeviceSize allocAlignment,
    5205  VmaSuballocationType allocType,
    5206  bool canMakeOtherLost,
    5207  VmaAllocationRequest* pAllocationRequest)
    5208 {
    5209  VMA_ASSERT(allocSize > 0);
    5210  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5211  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    5212  VMA_HEAVY_ASSERT(Validate());
    5213 
    5214  // There is not enough total free space in this block to fullfill the request: Early return.
    5215  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    5216  {
    5217  return false;
    5218  }
    5219 
    5220  // New algorithm, efficiently searching freeSuballocationsBySize.
    5221  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    5222  if(freeSuballocCount > 0)
    5223  {
    5224  if(VMA_BEST_FIT)
    5225  {
    5226  // Find first free suballocation with size not less than allocSize.
    5227  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5228  m_FreeSuballocationsBySize.data(),
    5229  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    5230  allocSize,
    5231  VmaSuballocationItemSizeLess());
    5232  size_t index = it - m_FreeSuballocationsBySize.data();
    5233  for(; index < freeSuballocCount; ++index)
    5234  {
    5235  if(CheckAllocation(
    5236  currentFrameIndex,
    5237  frameInUseCount,
    5238  bufferImageGranularity,
    5239  allocSize,
    5240  allocAlignment,
    5241  allocType,
    5242  m_FreeSuballocationsBySize[index],
    5243  false, // canMakeOtherLost
    5244  &pAllocationRequest->offset,
    5245  &pAllocationRequest->itemsToMakeLostCount,
    5246  &pAllocationRequest->sumFreeSize,
    5247  &pAllocationRequest->sumItemSize))
    5248  {
    5249  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5250  return true;
    5251  }
    5252  }
    5253  }
    5254  else
    5255  {
    5256  // Search staring from biggest suballocations.
    5257  for(size_t index = freeSuballocCount; index--; )
    5258  {
    5259  if(CheckAllocation(
    5260  currentFrameIndex,
    5261  frameInUseCount,
    5262  bufferImageGranularity,
    5263  allocSize,
    5264  allocAlignment,
    5265  allocType,
    5266  m_FreeSuballocationsBySize[index],
    5267  false, // canMakeOtherLost
    5268  &pAllocationRequest->offset,
    5269  &pAllocationRequest->itemsToMakeLostCount,
    5270  &pAllocationRequest->sumFreeSize,
    5271  &pAllocationRequest->sumItemSize))
    5272  {
    5273  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5274  return true;
    5275  }
    5276  }
    5277  }
    5278  }
    5279 
    5280  if(canMakeOtherLost)
    5281  {
    5282  // Brute-force algorithm. TODO: Come up with something better.
    5283 
    5284  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    5285  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    5286 
    5287  VmaAllocationRequest tmpAllocRequest = {};
    5288  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    5289  suballocIt != m_Suballocations.end();
    5290  ++suballocIt)
    5291  {
    5292  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    5293  suballocIt->hAllocation->CanBecomeLost())
    5294  {
    5295  if(CheckAllocation(
    5296  currentFrameIndex,
    5297  frameInUseCount,
    5298  bufferImageGranularity,
    5299  allocSize,
    5300  allocAlignment,
    5301  allocType,
    5302  suballocIt,
    5303  canMakeOtherLost,
    5304  &tmpAllocRequest.offset,
    5305  &tmpAllocRequest.itemsToMakeLostCount,
    5306  &tmpAllocRequest.sumFreeSize,
    5307  &tmpAllocRequest.sumItemSize))
    5308  {
    5309  tmpAllocRequest.item = suballocIt;
    5310 
    5311  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    5312  {
    5313  *pAllocationRequest = tmpAllocRequest;
    5314  }
    5315  }
    5316  }
    5317  }
    5318 
    5319  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    5320  {
    5321  return true;
    5322  }
    5323  }
    5324 
    5325  return false;
    5326 }
    5327 
    5328 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    5329  uint32_t currentFrameIndex,
    5330  uint32_t frameInUseCount,
    5331  VmaAllocationRequest* pAllocationRequest)
    5332 {
    5333  while(pAllocationRequest->itemsToMakeLostCount > 0)
    5334  {
    5335  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    5336  {
    5337  ++pAllocationRequest->item;
    5338  }
    5339  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5340  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    5341  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    5342  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5343  {
    5344  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    5345  --pAllocationRequest->itemsToMakeLostCount;
    5346  }
    5347  else
    5348  {
    5349  return false;
    5350  }
    5351  }
    5352 
    5353  VMA_HEAVY_ASSERT(Validate());
    5354  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5355  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5356 
    5357  return true;
    5358 }
    5359 
    5360 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    5361 {
    5362  uint32_t lostAllocationCount = 0;
    5363  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    5364  it != m_Suballocations.end();
    5365  ++it)
    5366  {
    5367  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    5368  it->hAllocation->CanBecomeLost() &&
    5369  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5370  {
    5371  it = FreeSuballocation(it);
    5372  ++lostAllocationCount;
    5373  }
    5374  }
    5375  return lostAllocationCount;
    5376 }
    5377 
    5378 void VmaBlockMetadata::Alloc(
    5379  const VmaAllocationRequest& request,
    5380  VmaSuballocationType type,
    5381  VkDeviceSize allocSize,
    5382  VmaAllocation hAllocation)
    5383 {
    5384  VMA_ASSERT(request.item != m_Suballocations.end());
    5385  VmaSuballocation& suballoc = *request.item;
    5386  // Given suballocation is a free block.
    5387  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5388  // Given offset is inside this suballocation.
    5389  VMA_ASSERT(request.offset >= suballoc.offset);
    5390  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    5391  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    5392  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    5393 
    5394  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    5395  // it to become used.
    5396  UnregisterFreeSuballocation(request.item);
    5397 
    5398  suballoc.offset = request.offset;
    5399  suballoc.size = allocSize;
    5400  suballoc.type = type;
    5401  suballoc.hAllocation = hAllocation;
    5402 
    5403  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    5404  if(paddingEnd)
    5405  {
    5406  VmaSuballocation paddingSuballoc = {};
    5407  paddingSuballoc.offset = request.offset + allocSize;
    5408  paddingSuballoc.size = paddingEnd;
    5409  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5410  VmaSuballocationList::iterator next = request.item;
    5411  ++next;
    5412  const VmaSuballocationList::iterator paddingEndItem =
    5413  m_Suballocations.insert(next, paddingSuballoc);
    5414  RegisterFreeSuballocation(paddingEndItem);
    5415  }
    5416 
    5417  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    5418  if(paddingBegin)
    5419  {
    5420  VmaSuballocation paddingSuballoc = {};
    5421  paddingSuballoc.offset = request.offset - paddingBegin;
    5422  paddingSuballoc.size = paddingBegin;
    5423  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5424  const VmaSuballocationList::iterator paddingBeginItem =
    5425  m_Suballocations.insert(request.item, paddingSuballoc);
    5426  RegisterFreeSuballocation(paddingBeginItem);
    5427  }
    5428 
    5429  // Update totals.
    5430  m_FreeCount = m_FreeCount - 1;
    5431  if(paddingBegin > 0)
    5432  {
    5433  ++m_FreeCount;
    5434  }
    5435  if(paddingEnd > 0)
    5436  {
    5437  ++m_FreeCount;
    5438  }
    5439  m_SumFreeSize -= allocSize;
    5440 }
    5441 
    5442 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    5443 {
    5444  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5445  suballocItem != m_Suballocations.end();
    5446  ++suballocItem)
    5447  {
    5448  VmaSuballocation& suballoc = *suballocItem;
    5449  if(suballoc.hAllocation == allocation)
    5450  {
    5451  FreeSuballocation(suballocItem);
    5452  VMA_HEAVY_ASSERT(Validate());
    5453  return;
    5454  }
    5455  }
    5456  VMA_ASSERT(0 && "Not found!");
    5457 }
    5458 
    5459 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
    5460 {
    5461  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5462  suballocItem != m_Suballocations.end();
    5463  ++suballocItem)
    5464  {
    5465  VmaSuballocation& suballoc = *suballocItem;
    5466  if(suballoc.offset == offset)
    5467  {
    5468  FreeSuballocation(suballocItem);
    5469  return;
    5470  }
    5471  }
    5472  VMA_ASSERT(0 && "Not found!");
    5473 }
    5474 
    5475 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    5476 {
    5477  VkDeviceSize lastSize = 0;
    5478  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    5479  {
    5480  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    5481 
    5482  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    5483  {
    5484  VMA_ASSERT(0);
    5485  return false;
    5486  }
    5487  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5488  {
    5489  VMA_ASSERT(0);
    5490  return false;
    5491  }
    5492  if(it->size < lastSize)
    5493  {
    5494  VMA_ASSERT(0);
    5495  return false;
    5496  }
    5497 
    5498  lastSize = it->size;
    5499  }
    5500  return true;
    5501 }
    5502 
    5503 bool VmaBlockMetadata::CheckAllocation(
    5504  uint32_t currentFrameIndex,
    5505  uint32_t frameInUseCount,
    5506  VkDeviceSize bufferImageGranularity,
    5507  VkDeviceSize allocSize,
    5508  VkDeviceSize allocAlignment,
    5509  VmaSuballocationType allocType,
    5510  VmaSuballocationList::const_iterator suballocItem,
    5511  bool canMakeOtherLost,
    5512  VkDeviceSize* pOffset,
    5513  size_t* itemsToMakeLostCount,
    5514  VkDeviceSize* pSumFreeSize,
    5515  VkDeviceSize* pSumItemSize) const
    5516 {
    5517  VMA_ASSERT(allocSize > 0);
    5518  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5519  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    5520  VMA_ASSERT(pOffset != VMA_NULL);
    5521 
    5522  *itemsToMakeLostCount = 0;
    5523  *pSumFreeSize = 0;
    5524  *pSumItemSize = 0;
    5525 
    5526  if(canMakeOtherLost)
    5527  {
    5528  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5529  {
    5530  *pSumFreeSize = suballocItem->size;
    5531  }
    5532  else
    5533  {
    5534  if(suballocItem->hAllocation->CanBecomeLost() &&
    5535  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5536  {
    5537  ++*itemsToMakeLostCount;
    5538  *pSumItemSize = suballocItem->size;
    5539  }
    5540  else
    5541  {
    5542  return false;
    5543  }
    5544  }
    5545 
    5546  // Remaining size is too small for this request: Early return.
    5547  if(m_Size - suballocItem->offset < allocSize)
    5548  {
    5549  return false;
    5550  }
    5551 
    5552  // Start from offset equal to beginning of this suballocation.
    5553  *pOffset = suballocItem->offset;
    5554 
    5555  // Apply VMA_DEBUG_MARGIN at the beginning.
    5556  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5557  {
    5558  *pOffset += VMA_DEBUG_MARGIN;
    5559  }
    5560 
    5561  // Apply alignment.
    5562  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5563  *pOffset = VmaAlignUp(*pOffset, alignment);
    5564 
    5565  // Check previous suballocations for BufferImageGranularity conflicts.
    5566  // Make bigger alignment if necessary.
    5567  if(bufferImageGranularity > 1)
    5568  {
    5569  bool bufferImageGranularityConflict = false;
    5570  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5571  while(prevSuballocItem != m_Suballocations.cbegin())
    5572  {
    5573  --prevSuballocItem;
    5574  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5575  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5576  {
    5577  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5578  {
    5579  bufferImageGranularityConflict = true;
    5580  break;
    5581  }
    5582  }
    5583  else
    5584  // Already on previous page.
    5585  break;
    5586  }
    5587  if(bufferImageGranularityConflict)
    5588  {
    5589  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5590  }
    5591  }
    5592 
    5593  // Now that we have final *pOffset, check if we are past suballocItem.
    5594  // If yes, return false - this function should be called for another suballocItem as starting point.
    5595  if(*pOffset >= suballocItem->offset + suballocItem->size)
    5596  {
    5597  return false;
    5598  }
    5599 
    5600  // Calculate padding at the beginning based on current offset.
    5601  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    5602 
    5603  // Calculate required margin at the end if this is not last suballocation.
    5604  VmaSuballocationList::const_iterator next = suballocItem;
    5605  ++next;
    5606  const VkDeviceSize requiredEndMargin =
    5607  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5608 
    5609  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    5610  // Another early return check.
    5611  if(suballocItem->offset + totalSize > m_Size)
    5612  {
    5613  return false;
    5614  }
    5615 
    5616  // Advance lastSuballocItem until desired size is reached.
    5617  // Update itemsToMakeLostCount.
    5618  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    5619  if(totalSize > suballocItem->size)
    5620  {
    5621  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    5622  while(remainingSize > 0)
    5623  {
    5624  ++lastSuballocItem;
    5625  if(lastSuballocItem == m_Suballocations.cend())
    5626  {
    5627  return false;
    5628  }
    5629  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5630  {
    5631  *pSumFreeSize += lastSuballocItem->size;
    5632  }
    5633  else
    5634  {
    5635  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    5636  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    5637  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5638  {
    5639  ++*itemsToMakeLostCount;
    5640  *pSumItemSize += lastSuballocItem->size;
    5641  }
    5642  else
    5643  {
    5644  return false;
    5645  }
    5646  }
    5647  remainingSize = (lastSuballocItem->size < remainingSize) ?
    5648  remainingSize - lastSuballocItem->size : 0;
    5649  }
    5650  }
    5651 
    5652  // Check next suballocations for BufferImageGranularity conflicts.
    5653  // If conflict exists, we must mark more allocations lost or fail.
    5654  if(bufferImageGranularity > 1)
    5655  {
    5656  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    5657  ++nextSuballocItem;
    5658  while(nextSuballocItem != m_Suballocations.cend())
    5659  {
    5660  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5661  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5662  {
    5663  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5664  {
    5665  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    5666  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    5667  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5668  {
    5669  ++*itemsToMakeLostCount;
    5670  }
    5671  else
    5672  {
    5673  return false;
    5674  }
    5675  }
    5676  }
    5677  else
    5678  {
    5679  // Already on next page.
    5680  break;
    5681  }
    5682  ++nextSuballocItem;
    5683  }
    5684  }
    5685  }
    5686  else
    5687  {
    5688  const VmaSuballocation& suballoc = *suballocItem;
    5689  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5690 
    5691  *pSumFreeSize = suballoc.size;
    5692 
    5693  // Size of this suballocation is too small for this request: Early return.
    5694  if(suballoc.size < allocSize)
    5695  {
    5696  return false;
    5697  }
    5698 
    5699  // Start from offset equal to beginning of this suballocation.
    5700  *pOffset = suballoc.offset;
    5701 
    5702  // Apply VMA_DEBUG_MARGIN at the beginning.
    5703  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5704  {
    5705  *pOffset += VMA_DEBUG_MARGIN;
    5706  }
    5707 
    5708  // Apply alignment.
    5709  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5710  *pOffset = VmaAlignUp(*pOffset, alignment);
    5711 
    5712  // Check previous suballocations for BufferImageGranularity conflicts.
    5713  // Make bigger alignment if necessary.
    5714  if(bufferImageGranularity > 1)
    5715  {
    5716  bool bufferImageGranularityConflict = false;
    5717  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5718  while(prevSuballocItem != m_Suballocations.cbegin())
    5719  {
    5720  --prevSuballocItem;
    5721  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5722  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5723  {
    5724  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5725  {
    5726  bufferImageGranularityConflict = true;
    5727  break;
    5728  }
    5729  }
    5730  else
    5731  // Already on previous page.
    5732  break;
    5733  }
    5734  if(bufferImageGranularityConflict)
    5735  {
    5736  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5737  }
    5738  }
    5739 
    5740  // Calculate padding at the beginning based on current offset.
    5741  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5742 
    5743  // Calculate required margin at the end if this is not last suballocation.
    5744  VmaSuballocationList::const_iterator next = suballocItem;
    5745  ++next;
    5746  const VkDeviceSize requiredEndMargin =
    5747  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5748 
    5749  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5750  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5751  {
    5752  return false;
    5753  }
    5754 
    5755  // Check next suballocations for BufferImageGranularity conflicts.
    5756  // If conflict exists, allocation cannot be made here.
    5757  if(bufferImageGranularity > 1)
    5758  {
    5759  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5760  ++nextSuballocItem;
    5761  while(nextSuballocItem != m_Suballocations.cend())
    5762  {
    5763  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5764  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5765  {
    5766  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5767  {
    5768  return false;
    5769  }
    5770  }
    5771  else
    5772  {
    5773  // Already on next page.
    5774  break;
    5775  }
    5776  ++nextSuballocItem;
    5777  }
    5778  }
    5779  }
    5780 
    5781  // All tests passed: Success. pOffset is already filled.
    5782  return true;
    5783 }
    5784 
    5785 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5786 {
    5787  VMA_ASSERT(item != m_Suballocations.end());
    5788  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5789 
    5790  VmaSuballocationList::iterator nextItem = item;
    5791  ++nextItem;
    5792  VMA_ASSERT(nextItem != m_Suballocations.end());
    5793  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5794 
    5795  item->size += nextItem->size;
    5796  --m_FreeCount;
    5797  m_Suballocations.erase(nextItem);
    5798 }
    5799 
    5800 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5801 {
    5802  // Change this suballocation to be marked as free.
    5803  VmaSuballocation& suballoc = *suballocItem;
    5804  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5805  suballoc.hAllocation = VK_NULL_HANDLE;
    5806 
    5807  // Update totals.
    5808  ++m_FreeCount;
    5809  m_SumFreeSize += suballoc.size;
    5810 
    5811  // Merge with previous and/or next suballocation if it's also free.
    5812  bool mergeWithNext = false;
    5813  bool mergeWithPrev = false;
    5814 
    5815  VmaSuballocationList::iterator nextItem = suballocItem;
    5816  ++nextItem;
    5817  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5818  {
    5819  mergeWithNext = true;
    5820  }
    5821 
    5822  VmaSuballocationList::iterator prevItem = suballocItem;
    5823  if(suballocItem != m_Suballocations.begin())
    5824  {
    5825  --prevItem;
    5826  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5827  {
    5828  mergeWithPrev = true;
    5829  }
    5830  }
    5831 
    5832  if(mergeWithNext)
    5833  {
    5834  UnregisterFreeSuballocation(nextItem);
    5835  MergeFreeWithNext(suballocItem);
    5836  }
    5837 
    5838  if(mergeWithPrev)
    5839  {
    5840  UnregisterFreeSuballocation(prevItem);
    5841  MergeFreeWithNext(prevItem);
    5842  RegisterFreeSuballocation(prevItem);
    5843  return prevItem;
    5844  }
    5845  else
    5846  {
    5847  RegisterFreeSuballocation(suballocItem);
    5848  return suballocItem;
    5849  }
    5850 }
    5851 
    5852 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5853 {
    5854  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5855  VMA_ASSERT(item->size > 0);
    5856 
    5857  // You may want to enable this validation at the beginning or at the end of
    5858  // this function, depending on what do you want to check.
    5859  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5860 
    5861  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5862  {
    5863  if(m_FreeSuballocationsBySize.empty())
    5864  {
    5865  m_FreeSuballocationsBySize.push_back(item);
    5866  }
    5867  else
    5868  {
    5869  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5870  }
    5871  }
    5872 
    5873  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5874 }
    5875 
    5876 
    5877 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5878 {
    5879  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5880  VMA_ASSERT(item->size > 0);
    5881 
    5882  // You may want to enable this validation at the beginning or at the end of
    5883  // this function, depending on what do you want to check.
    5884  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5885 
    5886  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5887  {
    5888  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5889  m_FreeSuballocationsBySize.data(),
    5890  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5891  item,
    5892  VmaSuballocationItemSizeLess());
    5893  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5894  index < m_FreeSuballocationsBySize.size();
    5895  ++index)
    5896  {
    5897  if(m_FreeSuballocationsBySize[index] == item)
    5898  {
    5899  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5900  return;
    5901  }
    5902  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5903  }
    5904  VMA_ASSERT(0 && "Not found.");
    5905  }
    5906 
    5907  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5908 }
    5909 
    5911 // class VmaDeviceMemoryMapping
    5912 
    5913 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
    5914  m_MapCount(0),
    5915  m_pMappedData(VMA_NULL)
    5916 {
    5917 }
    5918 
    5919 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
    5920 {
    5921  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5922 }
    5923 
    5924 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
    5925 {
    5926  if(count == 0)
    5927  {
    5928  return VK_SUCCESS;
    5929  }
    5930 
    5931  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    5932  if(m_MapCount != 0)
    5933  {
    5934  m_MapCount += count;
    5935  VMA_ASSERT(m_pMappedData != VMA_NULL);
    5936  if(ppData != VMA_NULL)
    5937  {
    5938  *ppData = m_pMappedData;
    5939  }
    5940  return VK_SUCCESS;
    5941  }
    5942  else
    5943  {
    5944  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5945  hAllocator->m_hDevice,
    5946  hMemory,
    5947  0, // offset
    5948  VK_WHOLE_SIZE,
    5949  0, // flags
    5950  &m_pMappedData);
    5951  if(result == VK_SUCCESS)
    5952  {
    5953  if(ppData != VMA_NULL)
    5954  {
    5955  *ppData = m_pMappedData;
    5956  }
    5957  m_MapCount = count;
    5958  }
    5959  return result;
    5960  }
    5961 }
    5962 
    5963 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
    5964 {
    5965  if(count == 0)
    5966  {
    5967  return;
    5968  }
    5969 
    5970  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    5971  if(m_MapCount >= count)
    5972  {
    5973  m_MapCount -= count;
    5974  if(m_MapCount == 0)
    5975  {
    5976  m_pMappedData = VMA_NULL;
    5977  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
    5978  }
    5979  }
    5980  else
    5981  {
    5982  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    5983  }
    5984 }
    5985 
    5987 // class VmaDeviceMemoryBlock
    5988 
    5989 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5990  m_MemoryTypeIndex(UINT32_MAX),
    5991  m_hMemory(VK_NULL_HANDLE),
    5992  m_Metadata(hAllocator)
    5993 {
    5994 }
    5995 
    5996 void VmaDeviceMemoryBlock::Init(
    5997  uint32_t newMemoryTypeIndex,
    5998  VkDeviceMemory newMemory,
    5999  VkDeviceSize newSize)
    6000 {
    6001  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    6002 
    6003  m_MemoryTypeIndex = newMemoryTypeIndex;
    6004  m_hMemory = newMemory;
    6005 
    6006  m_Metadata.Init(newSize);
    6007 }
    6008 
    6009 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    6010 {
    6011  // This is the most important assert in the entire library.
    6012  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    6013  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    6014 
    6015  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    6016  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    6017  m_hMemory = VK_NULL_HANDLE;
    6018 }
    6019 
    6020 bool VmaDeviceMemoryBlock::Validate() const
    6021 {
    6022  if((m_hMemory == VK_NULL_HANDLE) ||
    6023  (m_Metadata.GetSize() == 0))
    6024  {
    6025  return false;
    6026  }
    6027 
    6028  return m_Metadata.Validate();
    6029 }
    6030 
    6031 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    6032 {
    6033  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
    6034 }
    6035 
    6036 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    6037 {
    6038  m_Mapping.Unmap(hAllocator, m_hMemory, count);
    6039 }
    6040 
    6041 static void InitStatInfo(VmaStatInfo& outInfo)
    6042 {
    6043  memset(&outInfo, 0, sizeof(outInfo));
    6044  outInfo.allocationSizeMin = UINT64_MAX;
    6045  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6046 }
    6047 
    6048 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    6049 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    6050 {
    6051  inoutInfo.blockCount += srcInfo.blockCount;
    6052  inoutInfo.allocationCount += srcInfo.allocationCount;
    6053  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    6054  inoutInfo.usedBytes += srcInfo.usedBytes;
    6055  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    6056  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    6057  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    6058  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    6059  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    6060 }
    6061 
    6062 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    6063 {
    6064  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    6065  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    6066  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    6067  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    6068 }
    6069 
    6070 VmaPool_T::VmaPool_T(
    6071  VmaAllocator hAllocator,
    6072  const VmaPoolCreateInfo& createInfo) :
    6073  m_BlockVector(
    6074  hAllocator,
    6075  createInfo.memoryTypeIndex,
    6076  createInfo.blockSize,
    6077  createInfo.minBlockCount,
    6078  createInfo.maxBlockCount,
    6079  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    6080  createInfo.frameInUseCount,
    6081  true) // isCustomPool
    6082 {
    6083 }
    6084 
    6085 VmaPool_T::~VmaPool_T()
    6086 {
    6087 }
    6088 
    6089 #if VMA_STATS_STRING_ENABLED
    6090 
    6091 #endif // #if VMA_STATS_STRING_ENABLED
    6092 
    6093 VmaBlockVector::VmaBlockVector(
    6094  VmaAllocator hAllocator,
    6095  uint32_t memoryTypeIndex,
    6096  VkDeviceSize preferredBlockSize,
    6097  size_t minBlockCount,
    6098  size_t maxBlockCount,
    6099  VkDeviceSize bufferImageGranularity,
    6100  uint32_t frameInUseCount,
    6101  bool isCustomPool) :
    6102  m_hAllocator(hAllocator),
    6103  m_MemoryTypeIndex(memoryTypeIndex),
    6104  m_PreferredBlockSize(preferredBlockSize),
    6105  m_MinBlockCount(minBlockCount),
    6106  m_MaxBlockCount(maxBlockCount),
    6107  m_BufferImageGranularity(bufferImageGranularity),
    6108  m_FrameInUseCount(frameInUseCount),
    6109  m_IsCustomPool(isCustomPool),
    6110  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    6111  m_HasEmptyBlock(false),
    6112  m_pDefragmentator(VMA_NULL)
    6113 {
    6114 }
    6115 
    6116 VmaBlockVector::~VmaBlockVector()
    6117 {
    6118  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    6119 
    6120  for(size_t i = m_Blocks.size(); i--; )
    6121  {
    6122  m_Blocks[i]->Destroy(m_hAllocator);
    6123  vma_delete(m_hAllocator, m_Blocks[i]);
    6124  }
    6125 }
    6126 
    6127 VkResult VmaBlockVector::CreateMinBlocks()
    6128 {
    6129  for(size_t i = 0; i < m_MinBlockCount; ++i)
    6130  {
    6131  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    6132  if(res != VK_SUCCESS)
    6133  {
    6134  return res;
    6135  }
    6136  }
    6137  return VK_SUCCESS;
    6138 }
    6139 
    6140 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    6141 {
    6142  pStats->size = 0;
    6143  pStats->unusedSize = 0;
    6144  pStats->allocationCount = 0;
    6145  pStats->unusedRangeCount = 0;
    6146  pStats->unusedRangeSizeMax = 0;
    6147 
    6148  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6149 
    6150  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6151  {
    6152  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6153  VMA_ASSERT(pBlock);
    6154  VMA_HEAVY_ASSERT(pBlock->Validate());
    6155  pBlock->m_Metadata.AddPoolStats(*pStats);
    6156  }
    6157 }
    6158 
    6159 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    6160 
    6161 VkResult VmaBlockVector::Allocate(
    6162  VmaPool hCurrentPool,
    6163  uint32_t currentFrameIndex,
    6164  const VkMemoryRequirements& vkMemReq,
    6165  const VmaAllocationCreateInfo& createInfo,
    6166  VmaSuballocationType suballocType,
    6167  VmaAllocation* pAllocation)
    6168 {
    6169  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    6170  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    6171 
    6172  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6173 
    6174  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    6175  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6176  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6177  {
    6178  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6179  VMA_ASSERT(pCurrBlock);
    6180  VmaAllocationRequest currRequest = {};
    6181  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6182  currentFrameIndex,
    6183  m_FrameInUseCount,
    6184  m_BufferImageGranularity,
    6185  vkMemReq.size,
    6186  vkMemReq.alignment,
    6187  suballocType,
    6188  false, // canMakeOtherLost
    6189  &currRequest))
    6190  {
    6191  // Allocate from pCurrBlock.
    6192  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    6193 
    6194  if(mapped)
    6195  {
    6196  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
    6197  if(res != VK_SUCCESS)
    6198  {
    6199  return res;
    6200  }
    6201  }
    6202 
    6203  // We no longer have an empty Allocation.
    6204  if(pCurrBlock->m_Metadata.IsEmpty())
    6205  {
    6206  m_HasEmptyBlock = false;
    6207  }
    6208 
    6209  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6210  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    6211  (*pAllocation)->InitBlockAllocation(
    6212  hCurrentPool,
    6213  pCurrBlock,
    6214  currRequest.offset,
    6215  vkMemReq.alignment,
    6216  vkMemReq.size,
    6217  suballocType,
    6218  mapped,
    6219  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6220  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    6221  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6222  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6223  return VK_SUCCESS;
    6224  }
    6225  }
    6226 
    6227  const bool canCreateNewBlock =
    6228  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    6229  (m_Blocks.size() < m_MaxBlockCount);
    6230 
    6231  // 2. Try to create new block.
    6232  if(canCreateNewBlock)
    6233  {
    6234  // Calculate optimal size for new block.
    6235  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    6236  uint32_t newBlockSizeShift = 0;
    6237  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    6238 
    6239  // Allocating blocks of other sizes is allowed only in default pools.
    6240  // In custom pools block size is fixed.
    6241  if(m_IsCustomPool == false)
    6242  {
    6243  // Allocate 1/8, 1/4, 1/2 as first blocks.
    6244  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    6245  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    6246  {
    6247  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6248  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
    6249  {
    6250  newBlockSize = smallerNewBlockSize;
    6251  ++newBlockSizeShift;
    6252  }
    6253  else
    6254  {
    6255  break;
    6256  }
    6257  }
    6258  }
    6259 
    6260  size_t newBlockIndex = 0;
    6261  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    6262  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    6263  if(m_IsCustomPool == false)
    6264  {
    6265  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    6266  {
    6267  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6268  if(smallerNewBlockSize >= vkMemReq.size)
    6269  {
    6270  newBlockSize = smallerNewBlockSize;
    6271  ++newBlockSizeShift;
    6272  res = CreateBlock(newBlockSize, &newBlockIndex);
    6273  }
    6274  else
    6275  {
    6276  break;
    6277  }
    6278  }
    6279  }
    6280 
    6281  if(res == VK_SUCCESS)
    6282  {
    6283  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    6284  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    6285 
    6286  if(mapped)
    6287  {
    6288  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    6289  if(res != VK_SUCCESS)
    6290  {
    6291  return res;
    6292  }
    6293  }
    6294 
    6295  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    6296  VmaAllocationRequest allocRequest;
    6297  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    6298  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6299  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    6300  (*pAllocation)->InitBlockAllocation(
    6301  hCurrentPool,
    6302  pBlock,
    6303  allocRequest.offset,
    6304  vkMemReq.alignment,
    6305  vkMemReq.size,
    6306  suballocType,
    6307  mapped,
    6308  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6309  VMA_HEAVY_ASSERT(pBlock->Validate());
    6310  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    6311  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6312  return VK_SUCCESS;
    6313  }
    6314  }
    6315 
    6316  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    6317 
    6318  // 3. Try to allocate from existing blocks with making other allocations lost.
    6319  if(canMakeOtherLost)
    6320  {
    6321  uint32_t tryIndex = 0;
    6322  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    6323  {
    6324  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    6325  VmaAllocationRequest bestRequest = {};
    6326  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    6327 
    6328  // 1. Search existing allocations.
    6329  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6330  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6331  {
    6332  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6333  VMA_ASSERT(pCurrBlock);
    6334  VmaAllocationRequest currRequest = {};
    6335  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6336  currentFrameIndex,
    6337  m_FrameInUseCount,
    6338  m_BufferImageGranularity,
    6339  vkMemReq.size,
    6340  vkMemReq.alignment,
    6341  suballocType,
    6342  canMakeOtherLost,
    6343  &currRequest))
    6344  {
    6345  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    6346  if(pBestRequestBlock == VMA_NULL ||
    6347  currRequestCost < bestRequestCost)
    6348  {
    6349  pBestRequestBlock = pCurrBlock;
    6350  bestRequest = currRequest;
    6351  bestRequestCost = currRequestCost;
    6352 
    6353  if(bestRequestCost == 0)
    6354  {
    6355  break;
    6356  }
    6357  }
    6358  }
    6359  }
    6360 
    6361  if(pBestRequestBlock != VMA_NULL)
    6362  {
    6363  if(mapped)
    6364  {
    6365  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    6366  if(res != VK_SUCCESS)
    6367  {
    6368  return res;
    6369  }
    6370  }
    6371 
    6372  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    6373  currentFrameIndex,
    6374  m_FrameInUseCount,
    6375  &bestRequest))
    6376  {
    6377  // We no longer have an empty Allocation.
    6378  if(pBestRequestBlock->m_Metadata.IsEmpty())
    6379  {
    6380  m_HasEmptyBlock = false;
    6381  }
    6382  // Allocate from this pBlock.
    6383  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6384  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    6385  (*pAllocation)->InitBlockAllocation(
    6386  hCurrentPool,
    6387  pBestRequestBlock,
    6388  bestRequest.offset,
    6389  vkMemReq.alignment,
    6390  vkMemReq.size,
    6391  suballocType,
    6392  mapped,
    6393  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6394  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    6395  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6396  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6397  return VK_SUCCESS;
    6398  }
    6399  // else: Some allocations must have been touched while we are here. Next try.
    6400  }
    6401  else
    6402  {
    6403  // Could not find place in any of the blocks - break outer loop.
    6404  break;
    6405  }
    6406  }
    6407  /* Maximum number of tries exceeded - a very unlike event when many other
    6408  threads are simultaneously touching allocations making it impossible to make
    6409  lost at the same time as we try to allocate. */
    6410  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    6411  {
    6412  return VK_ERROR_TOO_MANY_OBJECTS;
    6413  }
    6414  }
    6415 
    6416  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6417 }
    6418 
    6419 void VmaBlockVector::Free(
    6420  VmaAllocation hAllocation)
    6421 {
    6422  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    6423 
    6424  // Scope for lock.
    6425  {
    6426  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6427 
    6428  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    6429 
    6430  if(hAllocation->IsPersistentMap())
    6431  {
    6432  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
    6433  }
    6434 
    6435  pBlock->m_Metadata.Free(hAllocation);
    6436  VMA_HEAVY_ASSERT(pBlock->Validate());
    6437 
    6438  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    6439 
    6440  // pBlock became empty after this deallocation.
    6441  if(pBlock->m_Metadata.IsEmpty())
    6442  {
    6443  // Already has empty Allocation. We don't want to have two, so delete this one.
    6444  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    6445  {
    6446  pBlockToDelete = pBlock;
    6447  Remove(pBlock);
    6448  }
    6449  // We now have first empty Allocation.
    6450  else
    6451  {
    6452  m_HasEmptyBlock = true;
    6453  }
    6454  }
    6455  // pBlock didn't become empty, but we have another empty block - find and free that one.
    6456  // (This is optional, heuristics.)
    6457  else if(m_HasEmptyBlock)
    6458  {
    6459  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    6460  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    6461  {
    6462  pBlockToDelete = pLastBlock;
    6463  m_Blocks.pop_back();
    6464  m_HasEmptyBlock = false;
    6465  }
    6466  }
    6467 
    6468  IncrementallySortBlocks();
    6469  }
    6470 
    6471  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    6472  // lock, for performance reason.
    6473  if(pBlockToDelete != VMA_NULL)
    6474  {
    6475  VMA_DEBUG_LOG(" Deleted empty allocation");
    6476  pBlockToDelete->Destroy(m_hAllocator);
    6477  vma_delete(m_hAllocator, pBlockToDelete);
    6478  }
    6479 }
    6480 
    6481 size_t VmaBlockVector::CalcMaxBlockSize() const
    6482 {
    6483  size_t result = 0;
    6484  for(size_t i = m_Blocks.size(); i--; )
    6485  {
    6486  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
    6487  if(result >= m_PreferredBlockSize)
    6488  {
    6489  break;
    6490  }
    6491  }
    6492  return result;
    6493 }
    6494 
    6495 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    6496 {
    6497  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6498  {
    6499  if(m_Blocks[blockIndex] == pBlock)
    6500  {
    6501  VmaVectorRemove(m_Blocks, blockIndex);
    6502  return;
    6503  }
    6504  }
    6505  VMA_ASSERT(0);
    6506 }
    6507 
    6508 void VmaBlockVector::IncrementallySortBlocks()
    6509 {
    6510  // Bubble sort only until first swap.
    6511  for(size_t i = 1; i < m_Blocks.size(); ++i)
    6512  {
    6513  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    6514  {
    6515  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    6516  return;
    6517  }
    6518  }
    6519 }
    6520 
    6521 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    6522 {
    6523  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6524  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    6525  allocInfo.allocationSize = blockSize;
    6526  VkDeviceMemory mem = VK_NULL_HANDLE;
    6527  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    6528  if(res < 0)
    6529  {
    6530  return res;
    6531  }
    6532 
    6533  // New VkDeviceMemory successfully created.
    6534 
    6535  // Create new Allocation for it.
    6536  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    6537  pBlock->Init(
    6538  m_MemoryTypeIndex,
    6539  mem,
    6540  allocInfo.allocationSize);
    6541 
    6542  m_Blocks.push_back(pBlock);
    6543  if(pNewBlockIndex != VMA_NULL)
    6544  {
    6545  *pNewBlockIndex = m_Blocks.size() - 1;
    6546  }
    6547 
    6548  return VK_SUCCESS;
    6549 }
    6550 
    6551 #if VMA_STATS_STRING_ENABLED
    6552 
    6553 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    6554 {
    6555  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6556 
    6557  json.BeginObject();
    6558 
    6559  if(m_IsCustomPool)
    6560  {
    6561  json.WriteString("MemoryTypeIndex");
    6562  json.WriteNumber(m_MemoryTypeIndex);
    6563 
    6564  json.WriteString("BlockSize");
    6565  json.WriteNumber(m_PreferredBlockSize);
    6566 
    6567  json.WriteString("BlockCount");
    6568  json.BeginObject(true);
    6569  if(m_MinBlockCount > 0)
    6570  {
    6571  json.WriteString("Min");
    6572  json.WriteNumber((uint64_t)m_MinBlockCount);
    6573  }
    6574  if(m_MaxBlockCount < SIZE_MAX)
    6575  {
    6576  json.WriteString("Max");
    6577  json.WriteNumber((uint64_t)m_MaxBlockCount);
    6578  }
    6579  json.WriteString("Cur");
    6580  json.WriteNumber((uint64_t)m_Blocks.size());
    6581  json.EndObject();
    6582 
    6583  if(m_FrameInUseCount > 0)
    6584  {
    6585  json.WriteString("FrameInUseCount");
    6586  json.WriteNumber(m_FrameInUseCount);
    6587  }
    6588  }
    6589  else
    6590  {
    6591  json.WriteString("PreferredBlockSize");
    6592  json.WriteNumber(m_PreferredBlockSize);
    6593  }
    6594 
    6595  json.WriteString("Blocks");
    6596  json.BeginArray();
    6597  for(size_t i = 0; i < m_Blocks.size(); ++i)
    6598  {
    6599  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    6600  }
    6601  json.EndArray();
    6602 
    6603  json.EndObject();
    6604 }
    6605 
    6606 #endif // #if VMA_STATS_STRING_ENABLED
    6607 
    6608 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    6609  VmaAllocator hAllocator,
    6610  uint32_t currentFrameIndex)
    6611 {
    6612  if(m_pDefragmentator == VMA_NULL)
    6613  {
    6614  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    6615  hAllocator,
    6616  this,
    6617  currentFrameIndex);
    6618  }
    6619 
    6620  return m_pDefragmentator;
    6621 }
    6622 
    6623 VkResult VmaBlockVector::Defragment(
    6624  VmaDefragmentationStats* pDefragmentationStats,
    6625  VkDeviceSize& maxBytesToMove,
    6626  uint32_t& maxAllocationsToMove)
    6627 {
    6628  if(m_pDefragmentator == VMA_NULL)
    6629  {
    6630  return VK_SUCCESS;
    6631  }
    6632 
    6633  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6634 
    6635  // Defragment.
    6636  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    6637 
    6638  // Accumulate statistics.
    6639  if(pDefragmentationStats != VMA_NULL)
    6640  {
    6641  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    6642  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    6643  pDefragmentationStats->bytesMoved += bytesMoved;
    6644  pDefragmentationStats->allocationsMoved += allocationsMoved;
    6645  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    6646  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    6647  maxBytesToMove -= bytesMoved;
    6648  maxAllocationsToMove -= allocationsMoved;
    6649  }
    6650 
    6651  // Free empty blocks.
    6652  m_HasEmptyBlock = false;
    6653  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    6654  {
    6655  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    6656  if(pBlock->m_Metadata.IsEmpty())
    6657  {
    6658  if(m_Blocks.size() > m_MinBlockCount)
    6659  {
    6660  if(pDefragmentationStats != VMA_NULL)
    6661  {
    6662  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    6663  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    6664  }
    6665 
    6666  VmaVectorRemove(m_Blocks, blockIndex);
    6667  pBlock->Destroy(m_hAllocator);
    6668  vma_delete(m_hAllocator, pBlock);
    6669  }
    6670  else
    6671  {
    6672  m_HasEmptyBlock = true;
    6673  }
    6674  }
    6675  }
    6676 
    6677  return result;
    6678 }
    6679 
    6680 void VmaBlockVector::DestroyDefragmentator()
    6681 {
    6682  if(m_pDefragmentator != VMA_NULL)
    6683  {
    6684  vma_delete(m_hAllocator, m_pDefragmentator);
    6685  m_pDefragmentator = VMA_NULL;
    6686  }
    6687 }
    6688 
    6689 void VmaBlockVector::MakePoolAllocationsLost(
    6690  uint32_t currentFrameIndex,
    6691  size_t* pLostAllocationCount)
    6692 {
    6693  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6694  size_t lostAllocationCount = 0;
    6695  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6696  {
    6697  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6698  VMA_ASSERT(pBlock);
    6699  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    6700  }
    6701  if(pLostAllocationCount != VMA_NULL)
    6702  {
    6703  *pLostAllocationCount = lostAllocationCount;
    6704  }
    6705 }
    6706 
    6707 void VmaBlockVector::AddStats(VmaStats* pStats)
    6708 {
    6709  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    6710  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    6711 
    6712  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6713 
    6714  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6715  {
    6716  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6717  VMA_ASSERT(pBlock);
    6718  VMA_HEAVY_ASSERT(pBlock->Validate());
    6719  VmaStatInfo allocationStatInfo;
    6720  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    6721  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6722  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6723  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6724  }
    6725 }
    6726 
    6728 // VmaDefragmentator members definition
    6729 
    6730 VmaDefragmentator::VmaDefragmentator(
    6731  VmaAllocator hAllocator,
    6732  VmaBlockVector* pBlockVector,
    6733  uint32_t currentFrameIndex) :
    6734  m_hAllocator(hAllocator),
    6735  m_pBlockVector(pBlockVector),
    6736  m_CurrentFrameIndex(currentFrameIndex),
    6737  m_BytesMoved(0),
    6738  m_AllocationsMoved(0),
    6739  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6740  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6741 {
    6742 }
    6743 
    6744 VmaDefragmentator::~VmaDefragmentator()
    6745 {
    6746  for(size_t i = m_Blocks.size(); i--; )
    6747  {
    6748  vma_delete(m_hAllocator, m_Blocks[i]);
    6749  }
    6750 }
    6751 
    6752 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6753 {
    6754  AllocationInfo allocInfo;
    6755  allocInfo.m_hAllocation = hAlloc;
    6756  allocInfo.m_pChanged = pChanged;
    6757  m_Allocations.push_back(allocInfo);
    6758 }
    6759 
    6760 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6761 {
    6762  // It has already been mapped for defragmentation.
    6763  if(m_pMappedDataForDefragmentation)
    6764  {
    6765  *ppMappedData = m_pMappedDataForDefragmentation;
    6766  return VK_SUCCESS;
    6767  }
    6768 
    6769  // It is originally mapped.
    6770  if(m_pBlock->m_Mapping.GetMappedData())
    6771  {
    6772  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
    6773  return VK_SUCCESS;
    6774  }
    6775 
    6776  // Map on first usage.
    6777  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    6778  *ppMappedData = m_pMappedDataForDefragmentation;
    6779  return res;
    6780 }
    6781 
    6782 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6783 {
    6784  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6785  {
    6786  m_pBlock->Unmap(hAllocator, 1);
    6787  }
    6788 }
    6789 
    6790 VkResult VmaDefragmentator::DefragmentRound(
    6791  VkDeviceSize maxBytesToMove,
    6792  uint32_t maxAllocationsToMove)
    6793 {
    6794  if(m_Blocks.empty())
    6795  {
    6796  return VK_SUCCESS;
    6797  }
    6798 
    6799  size_t srcBlockIndex = m_Blocks.size() - 1;
    6800  size_t srcAllocIndex = SIZE_MAX;
    6801  for(;;)
    6802  {
    6803  // 1. Find next allocation to move.
    6804  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6805  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6806  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6807  {
    6808  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6809  {
    6810  // Finished: no more allocations to process.
    6811  if(srcBlockIndex == 0)
    6812  {
    6813  return VK_SUCCESS;
    6814  }
    6815  else
    6816  {
    6817  --srcBlockIndex;
    6818  srcAllocIndex = SIZE_MAX;
    6819  }
    6820  }
    6821  else
    6822  {
    6823  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6824  }
    6825  }
    6826 
    6827  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6828  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6829 
    6830  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6831  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6832  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6833  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6834 
    6835  // 2. Try to find new place for this allocation in preceding or current block.
    6836  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6837  {
    6838  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6839  VmaAllocationRequest dstAllocRequest;
    6840  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6841  m_CurrentFrameIndex,
    6842  m_pBlockVector->GetFrameInUseCount(),
    6843  m_pBlockVector->GetBufferImageGranularity(),
    6844  size,
    6845  alignment,
    6846  suballocType,
    6847  false, // canMakeOtherLost
    6848  &dstAllocRequest) &&
    6849  MoveMakesSense(
    6850  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6851  {
    6852  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6853 
    6854  // Reached limit on number of allocations or bytes to move.
    6855  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6856  (m_BytesMoved + size > maxBytesToMove))
    6857  {
    6858  return VK_INCOMPLETE;
    6859  }
    6860 
    6861  void* pDstMappedData = VMA_NULL;
    6862  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6863  if(res != VK_SUCCESS)
    6864  {
    6865  return res;
    6866  }
    6867 
    6868  void* pSrcMappedData = VMA_NULL;
    6869  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6870  if(res != VK_SUCCESS)
    6871  {
    6872  return res;
    6873  }
    6874 
    6875  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6876  memcpy(
    6877  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6878  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6879  static_cast<size_t>(size));
    6880 
    6881  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6882  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
    6883 
    6884  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6885 
    6886  if(allocInfo.m_pChanged != VMA_NULL)
    6887  {
    6888  *allocInfo.m_pChanged = VK_TRUE;
    6889  }
    6890 
    6891  ++m_AllocationsMoved;
    6892  m_BytesMoved += size;
    6893 
    6894  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6895 
    6896  break;
    6897  }
    6898  }
    6899 
    6900  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6901 
    6902  if(srcAllocIndex > 0)
    6903  {
    6904  --srcAllocIndex;
    6905  }
    6906  else
    6907  {
    6908  if(srcBlockIndex > 0)
    6909  {
    6910  --srcBlockIndex;
    6911  srcAllocIndex = SIZE_MAX;
    6912  }
    6913  else
    6914  {
    6915  return VK_SUCCESS;
    6916  }
    6917  }
    6918  }
    6919 }
    6920 
    6921 VkResult VmaDefragmentator::Defragment(
    6922  VkDeviceSize maxBytesToMove,
    6923  uint32_t maxAllocationsToMove)
    6924 {
    6925  if(m_Allocations.empty())
    6926  {
    6927  return VK_SUCCESS;
    6928  }
    6929 
    6930  // Create block info for each block.
    6931  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6932  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6933  {
    6934  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6935  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6936  m_Blocks.push_back(pBlockInfo);
    6937  }
    6938 
    6939  // Sort them by m_pBlock pointer value.
    6940  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6941 
    6942  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6943  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6944  {
    6945  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6946  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6947  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6948  {
    6949  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6950  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6951  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6952  {
    6953  (*it)->m_Allocations.push_back(allocInfo);
    6954  }
    6955  else
    6956  {
    6957  VMA_ASSERT(0);
    6958  }
    6959  }
    6960  }
    6961  m_Allocations.clear();
    6962 
    6963  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6964  {
    6965  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6966  pBlockInfo->CalcHasNonMovableAllocations();
    6967  pBlockInfo->SortAllocationsBySizeDescecnding();
    6968  }
    6969 
    6970  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6971  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6972 
    6973  // Execute defragmentation rounds (the main part).
    6974  VkResult result = VK_SUCCESS;
    6975  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6976  {
    6977  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6978  }
    6979 
    6980  // Unmap blocks that were mapped for defragmentation.
    6981  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6982  {
    6983  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6984  }
    6985 
    6986  return result;
    6987 }
    6988 
    6989 bool VmaDefragmentator::MoveMakesSense(
    6990  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6991  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6992 {
    6993  if(dstBlockIndex < srcBlockIndex)
    6994  {
    6995  return true;
    6996  }
    6997  if(dstBlockIndex > srcBlockIndex)
    6998  {
    6999  return false;
    7000  }
    7001  if(dstOffset < srcOffset)
    7002  {
    7003  return true;
    7004  }
    7005  return false;
    7006 }
    7007 
    7009 // VmaAllocator_T
    7010 
    7011 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    7012  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    7013  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    7014  m_hDevice(pCreateInfo->device),
    7015  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    7016  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    7017  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    7018  m_PreferredLargeHeapBlockSize(0),
    7019  m_PhysicalDevice(pCreateInfo->physicalDevice),
    7020  m_CurrentFrameIndex(0),
    7021  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    7022 {
    7023  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    7024 
    7025  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    7026  memset(&m_MemProps, 0, sizeof(m_MemProps));
    7027  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    7028 
    7029  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    7030  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    7031 
    7032  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    7033  {
    7034  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    7035  }
    7036 
    7037  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    7038  {
    7039  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    7040  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    7041  }
    7042 
    7043  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    7044 
    7045  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    7046  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    7047 
    7048  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    7049  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    7050 
    7051  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    7052  {
    7053  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    7054  {
    7055  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    7056  if(limit != VK_WHOLE_SIZE)
    7057  {
    7058  m_HeapSizeLimit[heapIndex] = limit;
    7059  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    7060  {
    7061  m_MemProps.memoryHeaps[heapIndex].size = limit;
    7062  }
    7063  }
    7064  }
    7065  }
    7066 
    7067  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7068  {
    7069  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    7070 
    7071  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    7072  this,
    7073  memTypeIndex,
    7074  preferredBlockSize,
    7075  0,
    7076  SIZE_MAX,
    7077  GetBufferImageGranularity(),
    7078  pCreateInfo->frameInUseCount,
    7079  false); // isCustomPool
    7080  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    7081  // becase minBlockCount is 0.
    7082  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    7083  }
    7084 }
    7085 
    7086 VmaAllocator_T::~VmaAllocator_T()
    7087 {
    7088  VMA_ASSERT(m_Pools.empty());
    7089 
    7090  for(size_t i = GetMemoryTypeCount(); i--; )
    7091  {
    7092  vma_delete(this, m_pDedicatedAllocations[i]);
    7093  vma_delete(this, m_pBlockVectors[i]);
    7094  }
    7095 }
    7096 
    7097 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    7098 {
    7099 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7100  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    7101  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    7102  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    7103  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    7104  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    7105  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    7106  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    7107  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    7108  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    7109  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    7110  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    7111  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    7112  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    7113  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    7114  if(m_UseKhrDedicatedAllocation)
    7115  {
    7116  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    7117  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    7118  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    7119  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    7120  }
    7121 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7122 
    7123 #define VMA_COPY_IF_NOT_NULL(funcName) \
    7124  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    7125 
    7126  if(pVulkanFunctions != VMA_NULL)
    7127  {
    7128  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    7129  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    7130  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    7131  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    7132  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    7133  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    7134  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    7135  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    7136  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    7137  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    7138  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    7139  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    7140  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    7141  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    7142  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    7143  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    7144  }
    7145 
    7146 #undef VMA_COPY_IF_NOT_NULL
    7147 
    7148  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    7149  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    7150  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    7151  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    7152  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    7153  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    7154  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    7155  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    7156  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    7157  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    7158  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    7159  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    7160  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    7161  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    7162  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    7163  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    7164  if(m_UseKhrDedicatedAllocation)
    7165  {
    7166  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    7167  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    7168  }
    7169 }
    7170 
    7171 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    7172 {
    7173  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    7174  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    7175  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    7176  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    7177 }
    7178 
    7179 VkResult VmaAllocator_T::AllocateMemoryOfType(
    7180  const VkMemoryRequirements& vkMemReq,
    7181  bool dedicatedAllocation,
    7182  VkBuffer dedicatedBuffer,
    7183  VkImage dedicatedImage,
    7184  const VmaAllocationCreateInfo& createInfo,
    7185  uint32_t memTypeIndex,
    7186  VmaSuballocationType suballocType,
    7187  VmaAllocation* pAllocation)
    7188 {
    7189  VMA_ASSERT(pAllocation != VMA_NULL);
    7190  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    7191 
    7192  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    7193 
    7194  // If memory type is not HOST_VISIBLE, disable MAPPED.
    7195  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7196  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    7197  {
    7198  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    7199  }
    7200 
    7201  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    7202  VMA_ASSERT(blockVector);
    7203 
    7204  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    7205  bool preferDedicatedMemory =
    7206  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    7207  dedicatedAllocation ||
    7208  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    7209  vkMemReq.size > preferredBlockSize / 2;
    7210 
    7211  if(preferDedicatedMemory &&
    7212  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    7213  finalCreateInfo.pool == VK_NULL_HANDLE)
    7214  {
    7216  }
    7217 
    7218  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    7219  {
    7220  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7221  {
    7222  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7223  }
    7224  else
    7225  {
    7226  return AllocateDedicatedMemory(
    7227  vkMemReq.size,
    7228  suballocType,
    7229  memTypeIndex,
    7230  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7231  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7232  finalCreateInfo.pUserData,
    7233  dedicatedBuffer,
    7234  dedicatedImage,
    7235  pAllocation);
    7236  }
    7237  }
    7238  else
    7239  {
    7240  VkResult res = blockVector->Allocate(
    7241  VK_NULL_HANDLE, // hCurrentPool
    7242  m_CurrentFrameIndex.load(),
    7243  vkMemReq,
    7244  finalCreateInfo,
    7245  suballocType,
    7246  pAllocation);
    7247  if(res == VK_SUCCESS)
    7248  {
    7249  return res;
    7250  }
    7251 
    7252  // 5. Try dedicated memory.
    7253  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7254  {
    7255  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7256  }
    7257  else
    7258  {
    7259  res = AllocateDedicatedMemory(
    7260  vkMemReq.size,
    7261  suballocType,
    7262  memTypeIndex,
    7263  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7264  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7265  finalCreateInfo.pUserData,
    7266  dedicatedBuffer,
    7267  dedicatedImage,
    7268  pAllocation);
    7269  if(res == VK_SUCCESS)
    7270  {
    7271  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    7272  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    7273  return VK_SUCCESS;
    7274  }
    7275  else
    7276  {
    7277  // Everything failed: Return error code.
    7278  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7279  return res;
    7280  }
    7281  }
    7282  }
    7283 }
    7284 
    7285 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    7286  VkDeviceSize size,
    7287  VmaSuballocationType suballocType,
    7288  uint32_t memTypeIndex,
    7289  bool map,
    7290  bool isUserDataString,
    7291  void* pUserData,
    7292  VkBuffer dedicatedBuffer,
    7293  VkImage dedicatedImage,
    7294  VmaAllocation* pAllocation)
    7295 {
    7296  VMA_ASSERT(pAllocation);
    7297 
    7298  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    7299  allocInfo.memoryTypeIndex = memTypeIndex;
    7300  allocInfo.allocationSize = size;
    7301 
    7302  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    7303  if(m_UseKhrDedicatedAllocation)
    7304  {
    7305  if(dedicatedBuffer != VK_NULL_HANDLE)
    7306  {
    7307  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    7308  dedicatedAllocInfo.buffer = dedicatedBuffer;
    7309  allocInfo.pNext = &dedicatedAllocInfo;
    7310  }
    7311  else if(dedicatedImage != VK_NULL_HANDLE)
    7312  {
    7313  dedicatedAllocInfo.image = dedicatedImage;
    7314  allocInfo.pNext = &dedicatedAllocInfo;
    7315  }
    7316  }
    7317 
    7318  // Allocate VkDeviceMemory.
    7319  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    7320  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    7321  if(res < 0)
    7322  {
    7323  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7324  return res;
    7325  }
    7326 
    7327  void* pMappedData = VMA_NULL;
    7328  if(map)
    7329  {
    7330  res = (*m_VulkanFunctions.vkMapMemory)(
    7331  m_hDevice,
    7332  hMemory,
    7333  0,
    7334  VK_WHOLE_SIZE,
    7335  0,
    7336  &pMappedData);
    7337  if(res < 0)
    7338  {
    7339  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    7340  FreeVulkanMemory(memTypeIndex, size, hMemory);
    7341  return res;
    7342  }
    7343  }
    7344 
    7345  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    7346  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    7347  (*pAllocation)->SetUserData(this, pUserData);
    7348 
    7349  // Register it in m_pDedicatedAllocations.
    7350  {
    7351  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7352  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    7353  VMA_ASSERT(pDedicatedAllocations);
    7354  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    7355  }
    7356 
    7357  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    7358 
    7359  return VK_SUCCESS;
    7360 }
    7361 
    7362 void VmaAllocator_T::GetBufferMemoryRequirements(
    7363  VkBuffer hBuffer,
    7364  VkMemoryRequirements& memReq,
    7365  bool& requiresDedicatedAllocation,
    7366  bool& prefersDedicatedAllocation) const
    7367 {
    7368  if(m_UseKhrDedicatedAllocation)
    7369  {
    7370  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7371  memReqInfo.buffer = hBuffer;
    7372 
    7373  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7374 
    7375  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7376  memReq2.pNext = &memDedicatedReq;
    7377 
    7378  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7379 
    7380  memReq = memReq2.memoryRequirements;
    7381  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7382  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7383  }
    7384  else
    7385  {
    7386  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    7387  requiresDedicatedAllocation = false;
    7388  prefersDedicatedAllocation = false;
    7389  }
    7390 }
    7391 
    7392 void VmaAllocator_T::GetImageMemoryRequirements(
    7393  VkImage hImage,
    7394  VkMemoryRequirements& memReq,
    7395  bool& requiresDedicatedAllocation,
    7396  bool& prefersDedicatedAllocation) const
    7397 {
    7398  if(m_UseKhrDedicatedAllocation)
    7399  {
    7400  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7401  memReqInfo.image = hImage;
    7402 
    7403  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7404 
    7405  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7406  memReq2.pNext = &memDedicatedReq;
    7407 
    7408  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7409 
    7410  memReq = memReq2.memoryRequirements;
    7411  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7412  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7413  }
    7414  else
    7415  {
    7416  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    7417  requiresDedicatedAllocation = false;
    7418  prefersDedicatedAllocation = false;
    7419  }
    7420 }
    7421 
    7422 VkResult VmaAllocator_T::AllocateMemory(
    7423  const VkMemoryRequirements& vkMemReq,
    7424  bool requiresDedicatedAllocation,
    7425  bool prefersDedicatedAllocation,
    7426  VkBuffer dedicatedBuffer,
    7427  VkImage dedicatedImage,
    7428  const VmaAllocationCreateInfo& createInfo,
    7429  VmaSuballocationType suballocType,
    7430  VmaAllocation* pAllocation)
    7431 {
    7432  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    7433  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7434  {
    7435  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    7436  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7437  }
    7438  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7440  {
    7441  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    7442  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7443  }
    7444  if(requiresDedicatedAllocation)
    7445  {
    7446  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7447  {
    7448  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    7449  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7450  }
    7451  if(createInfo.pool != VK_NULL_HANDLE)
    7452  {
    7453  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    7454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7455  }
    7456  }
    7457  if((createInfo.pool != VK_NULL_HANDLE) &&
    7458  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    7459  {
    7460  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    7461  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7462  }
    7463 
    7464  if(createInfo.pool != VK_NULL_HANDLE)
    7465  {
    7466  return createInfo.pool->m_BlockVector.Allocate(
    7467  createInfo.pool,
    7468  m_CurrentFrameIndex.load(),
    7469  vkMemReq,
    7470  createInfo,
    7471  suballocType,
    7472  pAllocation);
    7473  }
    7474  else
    7475  {
    7476  // Bit mask of memory Vulkan types acceptable for this allocation.
    7477  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    7478  uint32_t memTypeIndex = UINT32_MAX;
    7479  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    7480  if(res == VK_SUCCESS)
    7481  {
    7482  res = AllocateMemoryOfType(
    7483  vkMemReq,
    7484  requiresDedicatedAllocation || prefersDedicatedAllocation,
    7485  dedicatedBuffer,
    7486  dedicatedImage,
    7487  createInfo,
    7488  memTypeIndex,
    7489  suballocType,
    7490  pAllocation);
    7491  // Succeeded on first try.
    7492  if(res == VK_SUCCESS)
    7493  {
    7494  return res;
    7495  }
    7496  // Allocation from this memory type failed. Try other compatible memory types.
    7497  else
    7498  {
    7499  for(;;)
    7500  {
    7501  // Remove old memTypeIndex from list of possibilities.
    7502  memoryTypeBits &= ~(1u << memTypeIndex);
    7503  // Find alternative memTypeIndex.
    7504  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    7505  if(res == VK_SUCCESS)
    7506  {
    7507  res = AllocateMemoryOfType(
    7508  vkMemReq,
    7509  requiresDedicatedAllocation || prefersDedicatedAllocation,
    7510  dedicatedBuffer,
    7511  dedicatedImage,
    7512  createInfo,
    7513  memTypeIndex,
    7514  suballocType,
    7515  pAllocation);
    7516  // Allocation from this alternative memory type succeeded.
    7517  if(res == VK_SUCCESS)
    7518  {
    7519  return res;
    7520  }
    7521  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    7522  }
    7523  // No other matching memory type index could be found.
    7524  else
    7525  {
    7526  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    7527  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7528  }
    7529  }
    7530  }
    7531  }
    7532  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    7533  else
    7534  return res;
    7535  }
    7536 }
    7537 
    7538 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    7539 {
    7540  VMA_ASSERT(allocation);
    7541 
    7542  if(allocation->CanBecomeLost() == false ||
    7543  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    7544  {
    7545  switch(allocation->GetType())
    7546  {
    7547  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7548  {
    7549  VmaBlockVector* pBlockVector = VMA_NULL;
    7550  VmaPool hPool = allocation->GetPool();
    7551  if(hPool != VK_NULL_HANDLE)
    7552  {
    7553  pBlockVector = &hPool->m_BlockVector;
    7554  }
    7555  else
    7556  {
    7557  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7558  pBlockVector = m_pBlockVectors[memTypeIndex];
    7559  }
    7560  pBlockVector->Free(allocation);
    7561  }
    7562  break;
    7563  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7564  FreeDedicatedMemory(allocation);
    7565  break;
    7566  default:
    7567  VMA_ASSERT(0);
    7568  }
    7569  }
    7570 
    7571  allocation->SetUserData(this, VMA_NULL);
    7572  vma_delete(this, allocation);
    7573 }
    7574 
    7575 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    7576 {
    7577  // Initialize.
    7578  InitStatInfo(pStats->total);
    7579  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    7580  InitStatInfo(pStats->memoryType[i]);
    7581  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    7582  InitStatInfo(pStats->memoryHeap[i]);
    7583 
    7584  // Process default pools.
    7585  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7586  {
    7587  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    7588  VMA_ASSERT(pBlockVector);
    7589  pBlockVector->AddStats(pStats);
    7590  }
    7591 
    7592  // Process custom pools.
    7593  {
    7594  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7595  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    7596  {
    7597  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    7598  }
    7599  }
    7600 
    7601  // Process dedicated allocations.
    7602  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7603  {
    7604  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    7605  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7606  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    7607  VMA_ASSERT(pDedicatedAllocVector);
    7608  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    7609  {
    7610  VmaStatInfo allocationStatInfo;
    7611  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    7612  VmaAddStatInfo(pStats->total, allocationStatInfo);
    7613  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    7614  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    7615  }
    7616  }
    7617 
    7618  // Postprocess.
    7619  VmaPostprocessCalcStatInfo(pStats->total);
    7620  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    7621  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    7622  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    7623  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    7624 }
    7625 
    7626 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    7627 
    7628 VkResult VmaAllocator_T::Defragment(
    7629  VmaAllocation* pAllocations,
    7630  size_t allocationCount,
    7631  VkBool32* pAllocationsChanged,
    7632  const VmaDefragmentationInfo* pDefragmentationInfo,
    7633  VmaDefragmentationStats* pDefragmentationStats)
    7634 {
    7635  if(pAllocationsChanged != VMA_NULL)
    7636  {
    7637  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    7638  }
    7639  if(pDefragmentationStats != VMA_NULL)
    7640  {
    7641  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    7642  }
    7643 
    7644  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    7645 
    7646  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    7647 
    7648  const size_t poolCount = m_Pools.size();
    7649 
    7650  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    7651  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    7652  {
    7653  VmaAllocation hAlloc = pAllocations[allocIndex];
    7654  VMA_ASSERT(hAlloc);
    7655  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7656  // DedicatedAlloc cannot be defragmented.
    7657  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7658  // Only HOST_VISIBLE memory types can be defragmented.
    7659  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7660  // Lost allocation cannot be defragmented.
    7661  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7662  {
    7663  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    7664 
    7665  const VmaPool hAllocPool = hAlloc->GetPool();
    7666  // This allocation belongs to custom pool.
    7667  if(hAllocPool != VK_NULL_HANDLE)
    7668  {
    7669  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7670  }
    7671  // This allocation belongs to general pool.
    7672  else
    7673  {
    7674  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    7675  }
    7676 
    7677  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7678 
    7679  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7680  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7681  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7682  }
    7683  }
    7684 
    7685  VkResult result = VK_SUCCESS;
    7686 
    7687  // ======== Main processing.
    7688 
    7689  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7690  uint32_t maxAllocationsToMove = UINT32_MAX;
    7691  if(pDefragmentationInfo != VMA_NULL)
    7692  {
    7693  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7694  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7695  }
    7696 
    7697  // Process standard memory.
    7698  for(uint32_t memTypeIndex = 0;
    7699  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7700  ++memTypeIndex)
    7701  {
    7702  // Only HOST_VISIBLE memory types can be defragmented.
    7703  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7704  {
    7705  result = m_pBlockVectors[memTypeIndex]->Defragment(
    7706  pDefragmentationStats,
    7707  maxBytesToMove,
    7708  maxAllocationsToMove);
    7709  }
    7710  }
    7711 
    7712  // Process custom pools.
    7713  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7714  {
    7715  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7716  pDefragmentationStats,
    7717  maxBytesToMove,
    7718  maxAllocationsToMove);
    7719  }
    7720 
    7721  // ======== Destroy defragmentators.
    7722 
    7723  // Process custom pools.
    7724  for(size_t poolIndex = poolCount; poolIndex--; )
    7725  {
    7726  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7727  }
    7728 
    7729  // Process standard memory.
    7730  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7731  {
    7732  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7733  {
    7734  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    7735  }
    7736  }
    7737 
    7738  return result;
    7739 }
    7740 
    7741 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7742 {
    7743  if(hAllocation->CanBecomeLost())
    7744  {
    7745  /*
    7746  Warning: This is a carefully designed algorithm.
    7747  Do not modify unless you really know what you're doing :)
    7748  */
    7749  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7750  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7751  for(;;)
    7752  {
    7753  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7754  {
    7755  pAllocationInfo->memoryType = UINT32_MAX;
    7756  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7757  pAllocationInfo->offset = 0;
    7758  pAllocationInfo->size = hAllocation->GetSize();
    7759  pAllocationInfo->pMappedData = VMA_NULL;
    7760  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7761  return;
    7762  }
    7763  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7764  {
    7765  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7766  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7767  pAllocationInfo->offset = hAllocation->GetOffset();
    7768  pAllocationInfo->size = hAllocation->GetSize();
    7769  pAllocationInfo->pMappedData = VMA_NULL;
    7770  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7771  return;
    7772  }
    7773  else // Last use time earlier than current time.
    7774  {
    7775  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7776  {
    7777  localLastUseFrameIndex = localCurrFrameIndex;
    7778  }
    7779  }
    7780  }
    7781  }
    7782  else
    7783  {
    7784  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7785  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7786  pAllocationInfo->offset = hAllocation->GetOffset();
    7787  pAllocationInfo->size = hAllocation->GetSize();
    7788  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7789  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7790  }
    7791 }
    7792 
    7793 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    7794 {
    7795  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    7796  if(hAllocation->CanBecomeLost())
    7797  {
    7798  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7799  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7800  for(;;)
    7801  {
    7802  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7803  {
    7804  return false;
    7805  }
    7806  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7807  {
    7808  return true;
    7809  }
    7810  else // Last use time earlier than current time.
    7811  {
    7812  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7813  {
    7814  localLastUseFrameIndex = localCurrFrameIndex;
    7815  }
    7816  }
    7817  }
    7818  }
    7819  else
    7820  {
    7821  return true;
    7822  }
    7823 }
    7824 
    7825 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7826 {
    7827  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7828 
    7829  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7830 
    7831  if(newCreateInfo.maxBlockCount == 0)
    7832  {
    7833  newCreateInfo.maxBlockCount = SIZE_MAX;
    7834  }
    7835  if(newCreateInfo.blockSize == 0)
    7836  {
    7837  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7838  }
    7839 
    7840  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7841 
    7842  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7843  if(res != VK_SUCCESS)
    7844  {
    7845  vma_delete(this, *pPool);
    7846  *pPool = VMA_NULL;
    7847  return res;
    7848  }
    7849 
    7850  // Add to m_Pools.
    7851  {
    7852  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7853  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7854  }
    7855 
    7856  return VK_SUCCESS;
    7857 }
    7858 
    7859 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7860 {
    7861  // Remove from m_Pools.
    7862  {
    7863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7864  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7865  VMA_ASSERT(success && "Pool not found in Allocator.");
    7866  }
    7867 
    7868  vma_delete(this, pool);
    7869 }
    7870 
    7871 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7872 {
    7873  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7874 }
    7875 
    7876 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7877 {
    7878  m_CurrentFrameIndex.store(frameIndex);
    7879 }
    7880 
    7881 void VmaAllocator_T::MakePoolAllocationsLost(
    7882  VmaPool hPool,
    7883  size_t* pLostAllocationCount)
    7884 {
    7885  hPool->m_BlockVector.MakePoolAllocationsLost(
    7886  m_CurrentFrameIndex.load(),
    7887  pLostAllocationCount);
    7888 }
    7889 
    7890 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7891 {
    7892  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    7893  (*pAllocation)->InitLost();
    7894 }
    7895 
    7896 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7897 {
    7898  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7899 
    7900  VkResult res;
    7901  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7902  {
    7903  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7904  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7905  {
    7906  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7907  if(res == VK_SUCCESS)
    7908  {
    7909  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7910  }
    7911  }
    7912  else
    7913  {
    7914  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7915  }
    7916  }
    7917  else
    7918  {
    7919  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7920  }
    7921 
    7922  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7923  {
    7924  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7925  }
    7926 
    7927  return res;
    7928 }
    7929 
    7930 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7931 {
    7932  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7933  {
    7934  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7935  }
    7936 
    7937  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7938 
    7939  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7940  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7941  {
    7942  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7943  m_HeapSizeLimit[heapIndex] += size;
    7944  }
    7945 }
    7946 
    7947 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    7948 {
    7949  if(hAllocation->CanBecomeLost())
    7950  {
    7951  return VK_ERROR_MEMORY_MAP_FAILED;
    7952  }
    7953 
    7954  switch(hAllocation->GetType())
    7955  {
    7956  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7957  {
    7958  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    7959  char *pBytes = VMA_NULL;
    7960  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    7961  if(res == VK_SUCCESS)
    7962  {
    7963  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    7964  hAllocation->BlockAllocMap();
    7965  }
    7966  return res;
    7967  }
    7968  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7969  return hAllocation->DedicatedAllocMap(this, ppData);
    7970  default:
    7971  VMA_ASSERT(0);
    7972  return VK_ERROR_MEMORY_MAP_FAILED;
    7973  }
    7974 }
    7975 
    7976 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    7977 {
    7978  switch(hAllocation->GetType())
    7979  {
    7980  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7981  {
    7982  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    7983  hAllocation->BlockAllocUnmap();
    7984  pBlock->Unmap(this, 1);
    7985  }
    7986  break;
    7987  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7988  hAllocation->DedicatedAllocUnmap(this);
    7989  break;
    7990  default:
    7991  VMA_ASSERT(0);
    7992  }
    7993 }
    7994 
    7995 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7996 {
    7997  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7998 
    7999  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    8000  {
    8001  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8002  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    8003  VMA_ASSERT(pDedicatedAllocations);
    8004  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    8005  VMA_ASSERT(success);
    8006  }
    8007 
    8008  VkDeviceMemory hMemory = allocation->GetMemory();
    8009 
    8010  if(allocation->GetMappedData() != VMA_NULL)
    8011  {
    8012  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    8013  }
    8014 
    8015  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    8016 
    8017  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    8018 }
    8019 
    8020 #if VMA_STATS_STRING_ENABLED
    8021 
    8022 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    8023 {
    8024  bool dedicatedAllocationsStarted = false;
    8025  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8026  {
    8027  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8028  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    8029  VMA_ASSERT(pDedicatedAllocVector);
    8030  if(pDedicatedAllocVector->empty() == false)
    8031  {
    8032  if(dedicatedAllocationsStarted == false)
    8033  {
    8034  dedicatedAllocationsStarted = true;
    8035  json.WriteString("DedicatedAllocations");
    8036  json.BeginObject();
    8037  }
    8038 
    8039  json.BeginString("Type ");
    8040  json.ContinueString(memTypeIndex);
    8041  json.EndString();
    8042 
    8043  json.BeginArray();
    8044 
    8045  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    8046  {
    8047  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    8048  json.BeginObject(true);
    8049 
    8050  json.WriteString("Type");
    8051  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    8052 
    8053  json.WriteString("Size");
    8054  json.WriteNumber(hAlloc->GetSize());
    8055 
    8056  const void* pUserData = hAlloc->GetUserData();
    8057  if(pUserData != VMA_NULL)
    8058  {
    8059  json.WriteString("UserData");
    8060  if(hAlloc->IsUserDataString())
    8061  {
    8062  json.WriteString((const char*)pUserData);
    8063  }
    8064  else
    8065  {
    8066  json.BeginString();
    8067  json.ContinueString_Pointer(pUserData);
    8068  json.EndString();
    8069  }
    8070  }
    8071 
    8072  json.EndObject();
    8073  }
    8074 
    8075  json.EndArray();
    8076  }
    8077  }
    8078  if(dedicatedAllocationsStarted)
    8079  {
    8080  json.EndObject();
    8081  }
    8082 
    8083  {
    8084  bool allocationsStarted = false;
    8085  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8086  {
    8087  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    8088  {
    8089  if(allocationsStarted == false)
    8090  {
    8091  allocationsStarted = true;
    8092  json.WriteString("DefaultPools");
    8093  json.BeginObject();
    8094  }
    8095 
    8096  json.BeginString("Type ");
    8097  json.ContinueString(memTypeIndex);
    8098  json.EndString();
    8099 
    8100  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    8101  }
    8102  }
    8103  if(allocationsStarted)
    8104  {
    8105  json.EndObject();
    8106  }
    8107  }
    8108 
    8109  {
    8110  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8111  const size_t poolCount = m_Pools.size();
    8112  if(poolCount > 0)
    8113  {
    8114  json.WriteString("Pools");
    8115  json.BeginArray();
    8116  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    8117  {
    8118  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    8119  }
    8120  json.EndArray();
    8121  }
    8122  }
    8123 }
    8124 
    8125 #endif // #if VMA_STATS_STRING_ENABLED
    8126 
    8127 static VkResult AllocateMemoryForImage(
    8128  VmaAllocator allocator,
    8129  VkImage image,
    8130  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8131  VmaSuballocationType suballocType,
    8132  VmaAllocation* pAllocation)
    8133 {
    8134  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    8135 
    8136  VkMemoryRequirements vkMemReq = {};
    8137  bool requiresDedicatedAllocation = false;
    8138  bool prefersDedicatedAllocation = false;
    8139  allocator->GetImageMemoryRequirements(image, vkMemReq,
    8140  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8141 
    8142  return allocator->AllocateMemory(
    8143  vkMemReq,
    8144  requiresDedicatedAllocation,
    8145  prefersDedicatedAllocation,
    8146  VK_NULL_HANDLE, // dedicatedBuffer
    8147  image, // dedicatedImage
    8148  *pAllocationCreateInfo,
    8149  suballocType,
    8150  pAllocation);
    8151 }
    8152 
    8154 // Public interface
    8155 
    8156 VkResult vmaCreateAllocator(
    8157  const VmaAllocatorCreateInfo* pCreateInfo,
    8158  VmaAllocator* pAllocator)
    8159 {
    8160  VMA_ASSERT(pCreateInfo && pAllocator);
    8161  VMA_DEBUG_LOG("vmaCreateAllocator");
    8162  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    8163  return VK_SUCCESS;
    8164 }
    8165 
    8166 void vmaDestroyAllocator(
    8167  VmaAllocator allocator)
    8168 {
    8169  if(allocator != VK_NULL_HANDLE)
    8170  {
    8171  VMA_DEBUG_LOG("vmaDestroyAllocator");
    8172  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    8173  vma_delete(&allocationCallbacks, allocator);
    8174  }
    8175 }
    8176 
    8178  VmaAllocator allocator,
    8179  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    8180 {
    8181  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    8182  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    8183 }
    8184 
    8186  VmaAllocator allocator,
    8187  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    8188 {
    8189  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    8190  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    8191 }
    8192 
    8194  VmaAllocator allocator,
    8195  uint32_t memoryTypeIndex,
    8196  VkMemoryPropertyFlags* pFlags)
    8197 {
    8198  VMA_ASSERT(allocator && pFlags);
    8199  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    8200  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    8201 }
    8202 
    8204  VmaAllocator allocator,
    8205  uint32_t frameIndex)
    8206 {
    8207  VMA_ASSERT(allocator);
    8208  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    8209 
    8210  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8211 
    8212  allocator->SetCurrentFrameIndex(frameIndex);
    8213 }
    8214 
    8215 void vmaCalculateStats(
    8216  VmaAllocator allocator,
    8217  VmaStats* pStats)
    8218 {
    8219  VMA_ASSERT(allocator && pStats);
    8220  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8221  allocator->CalculateStats(pStats);
    8222 }
    8223 
    8224 #if VMA_STATS_STRING_ENABLED
    8225 
    8226 void vmaBuildStatsString(
    8227  VmaAllocator allocator,
    8228  char** ppStatsString,
    8229  VkBool32 detailedMap)
    8230 {
    8231  VMA_ASSERT(allocator && ppStatsString);
    8232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8233 
    8234  VmaStringBuilder sb(allocator);
    8235  {
    8236  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    8237  json.BeginObject();
    8238 
    8239  VmaStats stats;
    8240  allocator->CalculateStats(&stats);
    8241 
    8242  json.WriteString("Total");
    8243  VmaPrintStatInfo(json, stats.total);
    8244 
    8245  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    8246  {
    8247  json.BeginString("Heap ");
    8248  json.ContinueString(heapIndex);
    8249  json.EndString();
    8250  json.BeginObject();
    8251 
    8252  json.WriteString("Size");
    8253  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    8254 
    8255  json.WriteString("Flags");
    8256  json.BeginArray(true);
    8257  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    8258  {
    8259  json.WriteString("DEVICE_LOCAL");
    8260  }
    8261  json.EndArray();
    8262 
    8263  if(stats.memoryHeap[heapIndex].blockCount > 0)
    8264  {
    8265  json.WriteString("Stats");
    8266  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    8267  }
    8268 
    8269  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    8270  {
    8271  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    8272  {
    8273  json.BeginString("Type ");
    8274  json.ContinueString(typeIndex);
    8275  json.EndString();
    8276 
    8277  json.BeginObject();
    8278 
    8279  json.WriteString("Flags");
    8280  json.BeginArray(true);
    8281  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    8282  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    8283  {
    8284  json.WriteString("DEVICE_LOCAL");
    8285  }
    8286  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8287  {
    8288  json.WriteString("HOST_VISIBLE");
    8289  }
    8290  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    8291  {
    8292  json.WriteString("HOST_COHERENT");
    8293  }
    8294  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    8295  {
    8296  json.WriteString("HOST_CACHED");
    8297  }
    8298  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    8299  {
    8300  json.WriteString("LAZILY_ALLOCATED");
    8301  }
    8302  json.EndArray();
    8303 
    8304  if(stats.memoryType[typeIndex].blockCount > 0)
    8305  {
    8306  json.WriteString("Stats");
    8307  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    8308  }
    8309 
    8310  json.EndObject();
    8311  }
    8312  }
    8313 
    8314  json.EndObject();
    8315  }
    8316  if(detailedMap == VK_TRUE)
    8317  {
    8318  allocator->PrintDetailedMap(json);
    8319  }
    8320 
    8321  json.EndObject();
    8322  }
    8323 
    8324  const size_t len = sb.GetLength();
    8325  char* const pChars = vma_new_array(allocator, char, len + 1);
    8326  if(len > 0)
    8327  {
    8328  memcpy(pChars, sb.GetData(), len);
    8329  }
    8330  pChars[len] = '\0';
    8331  *ppStatsString = pChars;
    8332 }
    8333 
    8334 void vmaFreeStatsString(
    8335  VmaAllocator allocator,
    8336  char* pStatsString)
    8337 {
    8338  if(pStatsString != VMA_NULL)
    8339  {
    8340  VMA_ASSERT(allocator);
    8341  size_t len = strlen(pStatsString);
    8342  vma_delete_array(allocator, pStatsString, len + 1);
    8343  }
    8344 }
    8345 
    8346 #endif // #if VMA_STATS_STRING_ENABLED
    8347 
    8348 /*
    8349 This function is not protected by any mutex because it just reads immutable data.
    8350 */
    8351 VkResult vmaFindMemoryTypeIndex(
    8352  VmaAllocator allocator,
    8353  uint32_t memoryTypeBits,
    8354  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8355  uint32_t* pMemoryTypeIndex)
    8356 {
    8357  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8358  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8359  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8360 
    8361  if(pAllocationCreateInfo->memoryTypeBits != 0)
    8362  {
    8363  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    8364  }
    8365 
    8366  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    8367  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    8368 
    8369  // Convert usage to requiredFlags and preferredFlags.
    8370  switch(pAllocationCreateInfo->usage)
    8371  {
    8373  break;
    8375  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    8376  break;
    8378  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    8379  break;
    8381  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8382  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    8383  break;
    8385  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8386  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    8387  break;
    8388  default:
    8389  break;
    8390  }
    8391 
    8392  *pMemoryTypeIndex = UINT32_MAX;
    8393  uint32_t minCost = UINT32_MAX;
    8394  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    8395  memTypeIndex < allocator->GetMemoryTypeCount();
    8396  ++memTypeIndex, memTypeBit <<= 1)
    8397  {
    8398  // This memory type is acceptable according to memoryTypeBits bitmask.
    8399  if((memTypeBit & memoryTypeBits) != 0)
    8400  {
    8401  const VkMemoryPropertyFlags currFlags =
    8402  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    8403  // This memory type contains requiredFlags.
    8404  if((requiredFlags & ~currFlags) == 0)
    8405  {
    8406  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    8407  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    8408  // Remember memory type with lowest cost.
    8409  if(currCost < minCost)
    8410  {
    8411  *pMemoryTypeIndex = memTypeIndex;
    8412  if(currCost == 0)
    8413  {
    8414  return VK_SUCCESS;
    8415  }
    8416  minCost = currCost;
    8417  }
    8418  }
    8419  }
    8420  }
    8421  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    8422 }
    8423 
    8425  VmaAllocator allocator,
    8426  const VkBufferCreateInfo* pBufferCreateInfo,
    8427  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8428  uint32_t* pMemoryTypeIndex)
    8429 {
    8430  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8431  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    8432  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8433  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8434 
    8435  const VkDevice hDev = allocator->m_hDevice;
    8436  VkBuffer hBuffer = VK_NULL_HANDLE;
    8437  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    8438  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    8439  if(res == VK_SUCCESS)
    8440  {
    8441  VkMemoryRequirements memReq = {};
    8442  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    8443  hDev, hBuffer, &memReq);
    8444 
    8445  res = vmaFindMemoryTypeIndex(
    8446  allocator,
    8447  memReq.memoryTypeBits,
    8448  pAllocationCreateInfo,
    8449  pMemoryTypeIndex);
    8450 
    8451  allocator->GetVulkanFunctions().vkDestroyBuffer(
    8452  hDev, hBuffer, allocator->GetAllocationCallbacks());
    8453  }
    8454  return res;
    8455 }
    8456 
    8458  VmaAllocator allocator,
    8459  const VkImageCreateInfo* pImageCreateInfo,
    8460  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8461  uint32_t* pMemoryTypeIndex)
    8462 {
    8463  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8464  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    8465  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8466  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8467 
    8468  const VkDevice hDev = allocator->m_hDevice;
    8469  VkImage hImage = VK_NULL_HANDLE;
    8470  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    8471  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    8472  if(res == VK_SUCCESS)
    8473  {
    8474  VkMemoryRequirements memReq = {};
    8475  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    8476  hDev, hImage, &memReq);
    8477 
    8478  res = vmaFindMemoryTypeIndex(
    8479  allocator,
    8480  memReq.memoryTypeBits,
    8481  pAllocationCreateInfo,
    8482  pMemoryTypeIndex);
    8483 
    8484  allocator->GetVulkanFunctions().vkDestroyImage(
    8485  hDev, hImage, allocator->GetAllocationCallbacks());
    8486  }
    8487  return res;
    8488 }
    8489 
    8490 VkResult vmaCreatePool(
    8491  VmaAllocator allocator,
    8492  const VmaPoolCreateInfo* pCreateInfo,
    8493  VmaPool* pPool)
    8494 {
    8495  VMA_ASSERT(allocator && pCreateInfo && pPool);
    8496 
    8497  VMA_DEBUG_LOG("vmaCreatePool");
    8498 
    8499  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8500 
    8501  return allocator->CreatePool(pCreateInfo, pPool);
    8502 }
    8503 
    8504 void vmaDestroyPool(
    8505  VmaAllocator allocator,
    8506  VmaPool pool)
    8507 {
    8508  VMA_ASSERT(allocator);
    8509 
    8510  if(pool == VK_NULL_HANDLE)
    8511  {
    8512  return;
    8513  }
    8514 
    8515  VMA_DEBUG_LOG("vmaDestroyPool");
    8516 
    8517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8518 
    8519  allocator->DestroyPool(pool);
    8520 }
    8521 
    8522 void vmaGetPoolStats(
    8523  VmaAllocator allocator,
    8524  VmaPool pool,
    8525  VmaPoolStats* pPoolStats)
    8526 {
    8527  VMA_ASSERT(allocator && pool && pPoolStats);
    8528 
    8529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8530 
    8531  allocator->GetPoolStats(pool, pPoolStats);
    8532 }
    8533 
    8535  VmaAllocator allocator,
    8536  VmaPool pool,
    8537  size_t* pLostAllocationCount)
    8538 {
    8539  VMA_ASSERT(allocator && pool);
    8540 
    8541  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8542 
    8543  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    8544 }
    8545 
    8546 VkResult vmaAllocateMemory(
    8547  VmaAllocator allocator,
    8548  const VkMemoryRequirements* pVkMemoryRequirements,
    8549  const VmaAllocationCreateInfo* pCreateInfo,
    8550  VmaAllocation* pAllocation,
    8551  VmaAllocationInfo* pAllocationInfo)
    8552 {
    8553  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    8554 
    8555  VMA_DEBUG_LOG("vmaAllocateMemory");
    8556 
    8557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8558 
    8559  VkResult result = allocator->AllocateMemory(
    8560  *pVkMemoryRequirements,
    8561  false, // requiresDedicatedAllocation
    8562  false, // prefersDedicatedAllocation
    8563  VK_NULL_HANDLE, // dedicatedBuffer
    8564  VK_NULL_HANDLE, // dedicatedImage
    8565  *pCreateInfo,
    8566  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    8567  pAllocation);
    8568 
    8569  if(pAllocationInfo && result == VK_SUCCESS)
    8570  {
    8571  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8572  }
    8573 
    8574  return result;
    8575 }
    8576 
    8578  VmaAllocator allocator,
    8579  VkBuffer buffer,
    8580  const VmaAllocationCreateInfo* pCreateInfo,
    8581  VmaAllocation* pAllocation,
    8582  VmaAllocationInfo* pAllocationInfo)
    8583 {
    8584  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    8585 
    8586  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    8587 
    8588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8589 
    8590  VkMemoryRequirements vkMemReq = {};
    8591  bool requiresDedicatedAllocation = false;
    8592  bool prefersDedicatedAllocation = false;
    8593  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    8594  requiresDedicatedAllocation,
    8595  prefersDedicatedAllocation);
    8596 
    8597  VkResult result = allocator->AllocateMemory(
    8598  vkMemReq,
    8599  requiresDedicatedAllocation,
    8600  prefersDedicatedAllocation,
    8601  buffer, // dedicatedBuffer
    8602  VK_NULL_HANDLE, // dedicatedImage
    8603  *pCreateInfo,
    8604  VMA_SUBALLOCATION_TYPE_BUFFER,
    8605  pAllocation);
    8606 
    8607  if(pAllocationInfo && result == VK_SUCCESS)
    8608  {
    8609  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8610  }
    8611 
    8612  return result;
    8613 }
    8614 
    8615 VkResult vmaAllocateMemoryForImage(
    8616  VmaAllocator allocator,
    8617  VkImage image,
    8618  const VmaAllocationCreateInfo* pCreateInfo,
    8619  VmaAllocation* pAllocation,
    8620  VmaAllocationInfo* pAllocationInfo)
    8621 {
    8622  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    8623 
    8624  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    8625 
    8626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8627 
    8628  VkResult result = AllocateMemoryForImage(
    8629  allocator,
    8630  image,
    8631  pCreateInfo,
    8632  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    8633  pAllocation);
    8634 
    8635  if(pAllocationInfo && result == VK_SUCCESS)
    8636  {
    8637  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8638  }
    8639 
    8640  return result;
    8641 }
    8642 
    8643 void vmaFreeMemory(
    8644  VmaAllocator allocator,
    8645  VmaAllocation allocation)
    8646 {
    8647  VMA_ASSERT(allocator && allocation);
    8648 
    8649  VMA_DEBUG_LOG("vmaFreeMemory");
    8650 
    8651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8652 
    8653  allocator->FreeMemory(allocation);
    8654 }
    8655 
    8657  VmaAllocator allocator,
    8658  VmaAllocation allocation,
    8659  VmaAllocationInfo* pAllocationInfo)
    8660 {
    8661  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    8662 
    8663  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8664 
    8665  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    8666 }
    8667 
    8668 bool vmaTouchAllocation(
    8669  VmaAllocator allocator,
    8670  VmaAllocation allocation)
    8671 {
    8672  VMA_ASSERT(allocator && allocation);
    8673 
    8674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8675 
    8676  return allocator->TouchAllocation(allocation);
    8677 }
    8678 
    8680  VmaAllocator allocator,
    8681  VmaAllocation allocation,
    8682  void* pUserData)
    8683 {
    8684  VMA_ASSERT(allocator && allocation);
    8685 
    8686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8687 
    8688  allocation->SetUserData(allocator, pUserData);
    8689 }
    8690 
    8692  VmaAllocator allocator,
    8693  VmaAllocation* pAllocation)
    8694 {
    8695  VMA_ASSERT(allocator && pAllocation);
    8696 
    8697  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    8698 
    8699  allocator->CreateLostAllocation(pAllocation);
    8700 }
    8701 
    8702 VkResult vmaMapMemory(
    8703  VmaAllocator allocator,
    8704  VmaAllocation allocation,
    8705  void** ppData)
    8706 {
    8707  VMA_ASSERT(allocator && allocation && ppData);
    8708 
    8709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8710 
    8711  return allocator->Map(allocation, ppData);
    8712 }
    8713 
    8714 void vmaUnmapMemory(
    8715  VmaAllocator allocator,
    8716  VmaAllocation allocation)
    8717 {
    8718  VMA_ASSERT(allocator && allocation);
    8719 
    8720  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8721 
    8722  allocator->Unmap(allocation);
    8723 }
    8724 
    8725 VkResult vmaDefragment(
    8726  VmaAllocator allocator,
    8727  VmaAllocation* pAllocations,
    8728  size_t allocationCount,
    8729  VkBool32* pAllocationsChanged,
    8730  const VmaDefragmentationInfo *pDefragmentationInfo,
    8731  VmaDefragmentationStats* pDefragmentationStats)
    8732 {
    8733  VMA_ASSERT(allocator && pAllocations);
    8734 
    8735  VMA_DEBUG_LOG("vmaDefragment");
    8736 
    8737  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8738 
    8739  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    8740 }
    8741 
    8742 VkResult vmaCreateBuffer(
    8743  VmaAllocator allocator,
    8744  const VkBufferCreateInfo* pBufferCreateInfo,
    8745  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8746  VkBuffer* pBuffer,
    8747  VmaAllocation* pAllocation,
    8748  VmaAllocationInfo* pAllocationInfo)
    8749 {
    8750  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    8751 
    8752  VMA_DEBUG_LOG("vmaCreateBuffer");
    8753 
    8754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8755 
    8756  *pBuffer = VK_NULL_HANDLE;
    8757  *pAllocation = VK_NULL_HANDLE;
    8758 
    8759  // 1. Create VkBuffer.
    8760  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    8761  allocator->m_hDevice,
    8762  pBufferCreateInfo,
    8763  allocator->GetAllocationCallbacks(),
    8764  pBuffer);
    8765  if(res >= 0)
    8766  {
    8767  // 2. vkGetBufferMemoryRequirements.
    8768  VkMemoryRequirements vkMemReq = {};
    8769  bool requiresDedicatedAllocation = false;
    8770  bool prefersDedicatedAllocation = false;
    8771  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    8772  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8773 
    8774  // Make sure alignment requirements for specific buffer usages reported
    8775  // in Physical Device Properties are included in alignment reported by memory requirements.
    8776  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    8777  {
    8778  VMA_ASSERT(vkMemReq.alignment %
    8779  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    8780  }
    8781  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    8782  {
    8783  VMA_ASSERT(vkMemReq.alignment %
    8784  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    8785  }
    8786  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    8787  {
    8788  VMA_ASSERT(vkMemReq.alignment %
    8789  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    8790  }
    8791 
    8792  // 3. Allocate memory using allocator.
    8793  res = allocator->AllocateMemory(
    8794  vkMemReq,
    8795  requiresDedicatedAllocation,
    8796  prefersDedicatedAllocation,
    8797  *pBuffer, // dedicatedBuffer
    8798  VK_NULL_HANDLE, // dedicatedImage
    8799  *pAllocationCreateInfo,
    8800  VMA_SUBALLOCATION_TYPE_BUFFER,
    8801  pAllocation);
    8802  if(res >= 0)
    8803  {
    8804  // 3. Bind buffer with memory.
    8805  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8806  allocator->m_hDevice,
    8807  *pBuffer,
    8808  (*pAllocation)->GetMemory(),
    8809  (*pAllocation)->GetOffset());
    8810  if(res >= 0)
    8811  {
    8812  // All steps succeeded.
    8813  if(pAllocationInfo != VMA_NULL)
    8814  {
    8815  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8816  }
    8817  return VK_SUCCESS;
    8818  }
    8819  allocator->FreeMemory(*pAllocation);
    8820  *pAllocation = VK_NULL_HANDLE;
    8821  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8822  *pBuffer = VK_NULL_HANDLE;
    8823  return res;
    8824  }
    8825  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8826  *pBuffer = VK_NULL_HANDLE;
    8827  return res;
    8828  }
    8829  return res;
    8830 }
    8831 
    8832 void vmaDestroyBuffer(
    8833  VmaAllocator allocator,
    8834  VkBuffer buffer,
    8835  VmaAllocation allocation)
    8836 {
    8837  if(buffer != VK_NULL_HANDLE)
    8838  {
    8839  VMA_ASSERT(allocator);
    8840 
    8841  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8842 
    8843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8844 
    8845  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8846 
    8847  allocator->FreeMemory(allocation);
    8848  }
    8849 }
    8850 
    8851 VkResult vmaCreateImage(
    8852  VmaAllocator allocator,
    8853  const VkImageCreateInfo* pImageCreateInfo,
    8854  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8855  VkImage* pImage,
    8856  VmaAllocation* pAllocation,
    8857  VmaAllocationInfo* pAllocationInfo)
    8858 {
    8859  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8860 
    8861  VMA_DEBUG_LOG("vmaCreateImage");
    8862 
    8863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8864 
    8865  *pImage = VK_NULL_HANDLE;
    8866  *pAllocation = VK_NULL_HANDLE;
    8867 
    8868  // 1. Create VkImage.
    8869  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8870  allocator->m_hDevice,
    8871  pImageCreateInfo,
    8872  allocator->GetAllocationCallbacks(),
    8873  pImage);
    8874  if(res >= 0)
    8875  {
    8876  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8877  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8878  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8879 
    8880  // 2. Allocate memory using allocator.
    8881  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8882  if(res >= 0)
    8883  {
    8884  // 3. Bind image with memory.
    8885  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8886  allocator->m_hDevice,
    8887  *pImage,
    8888  (*pAllocation)->GetMemory(),
    8889  (*pAllocation)->GetOffset());
    8890  if(res >= 0)
    8891  {
    8892  // All steps succeeded.
    8893  if(pAllocationInfo != VMA_NULL)
    8894  {
    8895  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8896  }
    8897  return VK_SUCCESS;
    8898  }
    8899  allocator->FreeMemory(*pAllocation);
    8900  *pAllocation = VK_NULL_HANDLE;
    8901  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8902  *pImage = VK_NULL_HANDLE;
    8903  return res;
    8904  }
    8905  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8906  *pImage = VK_NULL_HANDLE;
    8907  return res;
    8908  }
    8909  return res;
    8910 }
    8911 
    8912 void vmaDestroyImage(
    8913  VmaAllocator allocator,
    8914  VkImage image,
    8915  VmaAllocation allocation)
    8916 {
    8917  if(image != VK_NULL_HANDLE)
    8918  {
    8919  VMA_ASSERT(allocator);
    8920 
    8921  VMA_DEBUG_LOG("vmaDestroyImage");
    8922 
    8923  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8924 
    8925  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8926 
    8927  allocator->FreeMemory(allocation);
    8928  }
    8929 }
    8930 
    8931 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:896
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    826 #include <vulkan/vulkan.h>
    827 
    828 VK_DEFINE_HANDLE(VmaAllocator)
    829 
    830 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    832  VmaAllocator allocator,
    833  uint32_t memoryType,
    834  VkDeviceMemory memory,
    835  VkDeviceSize size);
    837 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    838  VmaAllocator allocator,
    839  uint32_t memoryType,
    840  VkDeviceMemory memory,
    841  VkDeviceSize size);
    842 
    850 typedef struct VmaDeviceMemoryCallbacks {
    856 
    886 
    889 typedef VkFlags VmaAllocatorCreateFlags;
    890 
    895 typedef struct VmaVulkanFunctions {
    896  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    897  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    898  PFN_vkAllocateMemory vkAllocateMemory;
    899  PFN_vkFreeMemory vkFreeMemory;
    900  PFN_vkMapMemory vkMapMemory;
    901  PFN_vkUnmapMemory vkUnmapMemory;
    902  PFN_vkBindBufferMemory vkBindBufferMemory;
    903  PFN_vkBindImageMemory vkBindImageMemory;
    904  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    905  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    906  PFN_vkCreateBuffer vkCreateBuffer;
    907  PFN_vkDestroyBuffer vkDestroyBuffer;
    908  PFN_vkCreateImage vkCreateImage;
    909  PFN_vkDestroyImage vkDestroyImage;
    910  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    911  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    913 
    916 {
    918  VmaAllocatorCreateFlags flags;
    920 
    921  VkPhysicalDevice physicalDevice;
    923 
    924  VkDevice device;
    926 
    929 
    930  const VkAllocationCallbacks* pAllocationCallbacks;
    932 
    947  uint32_t frameInUseCount;
    971  const VkDeviceSize* pHeapSizeLimit;
    985 
    987 VkResult vmaCreateAllocator(
    988  const VmaAllocatorCreateInfo* pCreateInfo,
    989  VmaAllocator* pAllocator);
    990 
    993  VmaAllocator allocator);
    994 
    1000  VmaAllocator allocator,
    1001  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1002 
    1008  VmaAllocator allocator,
    1009  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1010 
    1018  VmaAllocator allocator,
    1019  uint32_t memoryTypeIndex,
    1020  VkMemoryPropertyFlags* pFlags);
    1021 
    1031  VmaAllocator allocator,
    1032  uint32_t frameIndex);
    1033 
    1036 typedef struct VmaStatInfo
    1037 {
    1039  uint32_t blockCount;
    1045  VkDeviceSize usedBytes;
    1047  VkDeviceSize unusedBytes;
    1048  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
    1049  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
    1050 } VmaStatInfo;
    1051 
    1053 typedef struct VmaStats
    1054 {
    1055  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1056  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1058 } VmaStats;
    1059 
    1061 void vmaCalculateStats(
    1062  VmaAllocator allocator,
    1063  VmaStats* pStats);
    1064 
    1065 #define VMA_STATS_STRING_ENABLED 1
    1066 
    1067 #if VMA_STATS_STRING_ENABLED
    1068 
    1070 
    1072 void vmaBuildStatsString(
    1073  VmaAllocator allocator,
    1074  char** ppStatsString,
    1075  VkBool32 detailedMap);
    1076 
    1077 void vmaFreeStatsString(
    1078  VmaAllocator allocator,
    1079  char* pStatsString);
    1080 
    1081 #endif // #if VMA_STATS_STRING_ENABLED
    1082 
    1083 VK_DEFINE_HANDLE(VmaPool)
    1084 
    1085 typedef enum VmaMemoryUsage
    1086 {
    1135 } VmaMemoryUsage;
    1136 
    1151 
    1201 
    1205 
    1207 {
    1209  VmaAllocationCreateFlags flags;
    1220  VkMemoryPropertyFlags requiredFlags;
    1225  VkMemoryPropertyFlags preferredFlags;
    1233  uint32_t memoryTypeBits;
    1239  VmaPool pool;
    1246  void* pUserData;
    1248 
    1265 VkResult vmaFindMemoryTypeIndex(
    1266  VmaAllocator allocator,
    1267  uint32_t memoryTypeBits,
    1268  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1269  uint32_t* pMemoryTypeIndex);
    1270 
    1284  VmaAllocator allocator,
    1285  const VkBufferCreateInfo* pBufferCreateInfo,
    1286  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1287  uint32_t* pMemoryTypeIndex);
    1288 
    1302  VmaAllocator allocator,
    1303  const VkImageCreateInfo* pImageCreateInfo,
    1304  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1305  uint32_t* pMemoryTypeIndex);
    1306 
    1327 
    1330 typedef VkFlags VmaPoolCreateFlags;
    1331 
    1334 typedef struct VmaPoolCreateInfo {
    1340  VmaPoolCreateFlags flags;
    1345  VkDeviceSize blockSize;
    1374 
    1377 typedef struct VmaPoolStats {
    1380  VkDeviceSize size;
    1383  VkDeviceSize unusedSize;
    1396  VkDeviceSize unusedRangeSizeMax;
    1397 } VmaPoolStats;
    1398 
    1405 VkResult vmaCreatePool(
    1406  VmaAllocator allocator,
    1407  const VmaPoolCreateInfo* pCreateInfo,
    1408  VmaPool* pPool);
    1409 
    1412 void vmaDestroyPool(
    1413  VmaAllocator allocator,
    1414  VmaPool pool);
    1415 
    1422 void vmaGetPoolStats(
    1423  VmaAllocator allocator,
    1424  VmaPool pool,
    1425  VmaPoolStats* pPoolStats);
    1426 
    1434  VmaAllocator allocator,
    1435  VmaPool pool,
    1436  size_t* pLostAllocationCount);
    1437 
    1438 VK_DEFINE_HANDLE(VmaAllocation)
    1439 
    1440 
    1442 typedef struct VmaAllocationInfo {
    1447  uint32_t memoryType;
    1456  VkDeviceMemory deviceMemory;
    1461  VkDeviceSize offset;
    1466  VkDeviceSize size;
    1480  void* pUserData;
    1482 
    1493 VkResult vmaAllocateMemory(
    1494  VmaAllocator allocator,
    1495  const VkMemoryRequirements* pVkMemoryRequirements,
    1496  const VmaAllocationCreateInfo* pCreateInfo,
    1497  VmaAllocation* pAllocation,
    1498  VmaAllocationInfo* pAllocationInfo);
    1499 
    1507  VmaAllocator allocator,
    1508  VkBuffer buffer,
    1509  const VmaAllocationCreateInfo* pCreateInfo,
    1510  VmaAllocation* pAllocation,
    1511  VmaAllocationInfo* pAllocationInfo);
    1512 
    1514 VkResult vmaAllocateMemoryForImage(
    1515  VmaAllocator allocator,
    1516  VkImage image,
    1517  const VmaAllocationCreateInfo* pCreateInfo,
    1518  VmaAllocation* pAllocation,
    1519  VmaAllocationInfo* pAllocationInfo);
    1520 
    1522 void vmaFreeMemory(
    1523  VmaAllocator allocator,
    1524  VmaAllocation allocation);
    1525 
    1531  VmaAllocator allocator,
    1532  VmaAllocation allocation,
    1533  VmaAllocationInfo* pAllocationInfo);
    1534 
    1537 VkBool32 vmaTouchAllocation(
    1538  VmaAllocator allocator,
    1539  VmaAllocation allocation);
    1540 
    1555  VmaAllocator allocator,
    1556  VmaAllocation allocation,
    1557  void* pUserData);
    1558 
    1570  VmaAllocator allocator,
    1571  VmaAllocation* pAllocation);
    1572 
    1607 VkResult vmaMapMemory(
    1608  VmaAllocator allocator,
    1609  VmaAllocation allocation,
    1610  void** ppData);
    1611 
    1616 void vmaUnmapMemory(
    1617  VmaAllocator allocator,
    1618  VmaAllocation allocation);
    1619 
    1621 typedef struct VmaDefragmentationInfo {
    1626  VkDeviceSize maxBytesToMove;
    1633 
    1635 typedef struct VmaDefragmentationStats {
    1637  VkDeviceSize bytesMoved;
    1639  VkDeviceSize bytesFreed;
    1645 
    1728 VkResult vmaDefragment(
    1729  VmaAllocator allocator,
    1730  VmaAllocation* pAllocations,
    1731  size_t allocationCount,
    1732  VkBool32* pAllocationsChanged,
    1733  const VmaDefragmentationInfo *pDefragmentationInfo,
    1734  VmaDefragmentationStats* pDefragmentationStats);
    1735 
    1762 VkResult vmaCreateBuffer(
    1763  VmaAllocator allocator,
    1764  const VkBufferCreateInfo* pBufferCreateInfo,
    1765  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1766  VkBuffer* pBuffer,
    1767  VmaAllocation* pAllocation,
    1768  VmaAllocationInfo* pAllocationInfo);
    1769 
    1781 void vmaDestroyBuffer(
    1782  VmaAllocator allocator,
    1783  VkBuffer buffer,
    1784  VmaAllocation allocation);
    1785 
    1787 VkResult vmaCreateImage(
    1788  VmaAllocator allocator,
    1789  const VkImageCreateInfo* pImageCreateInfo,
    1790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    1791  VkImage* pImage,
    1792  VmaAllocation* pAllocation,
    1793  VmaAllocationInfo* pAllocationInfo);
    1794 
    1806 void vmaDestroyImage(
    1807  VmaAllocator allocator,
    1808  VkImage image,
    1809  VmaAllocation allocation);
    1810 
    1811 #ifdef __cplusplus
    1812 }
    1813 #endif
    1814 
    1815 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    1816 
    1817 // For Visual Studio IntelliSense.
    1818 #ifdef __INTELLISENSE__
    1819 #define VMA_IMPLEMENTATION
    1820 #endif
    1821 
    1822 #ifdef VMA_IMPLEMENTATION
    1823 #undef VMA_IMPLEMENTATION
    1824 
    1825 #include <cstdint>
    1826 #include <cstdlib>
    1827 #include <cstring>
    1828 
    1829 /*******************************************************************************
    1830 CONFIGURATION SECTION
    1831 
    1832 Define some of these macros before each #include of this header or change them
    1833 here if you need other then default behavior depending on your environment.
    1834 */
    1835 
    1836 /*
    1837 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    1838 internally, like:
    1839 
    1840  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    1841 
    1842 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    1843 VmaAllocatorCreateInfo::pVulkanFunctions.
    1844 */
    1845 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    1846 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    1847 #endif
    1848 
    1849 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    1850 //#define VMA_USE_STL_CONTAINERS 1
    1851 
    1852 /* Set this macro to 1 to make the library including and using STL containers:
    1853 std::pair, std::vector, std::list, std::unordered_map.
    1854 
    1855 Set it to 0 or undefined to make the library using its own implementation of
    1856 the containers.
    1857 */
    1858 #if VMA_USE_STL_CONTAINERS
    1859  #define VMA_USE_STL_VECTOR 1
    1860  #define VMA_USE_STL_UNORDERED_MAP 1
    1861  #define VMA_USE_STL_LIST 1
    1862 #endif
    1863 
    1864 #if VMA_USE_STL_VECTOR
    1865  #include <vector>
    1866 #endif
    1867 
    1868 #if VMA_USE_STL_UNORDERED_MAP
    1869  #include <unordered_map>
    1870 #endif
    1871 
    1872 #if VMA_USE_STL_LIST
    1873  #include <list>
    1874 #endif
    1875 
    1876 /*
    1877 Following headers are used in this CONFIGURATION section only, so feel free to
    1878 remove them if not needed.
    1879 */
    1880 #include <cassert> // for assert
    1881 #include <algorithm> // for min, max
    1882 #include <mutex> // for std::mutex
    1883 #include <atomic> // for std::atomic
    1884 
    1885 #if !defined(_WIN32) && !defined(__APPLE__)
    1886  #include <malloc.h> // for aligned_alloc()
    1887 #endif
    1888 
    1889 #ifndef VMA_NULL
    1890  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    1891  #define VMA_NULL nullptr
    1892 #endif
    1893 
    1894 #if defined(__APPLE__)
    1895 #include <cstdlib>
    1896 void *aligned_alloc(size_t alignment, size_t size)
    1897 {
    1898  // alignment must be >= sizeof(void*)
    1899  if(alignment < sizeof(void*))
    1900  {
    1901  alignment = sizeof(void*);
    1902  }
    1903 
    1904  void *pointer;
    1905  if(posix_memalign(&pointer, alignment, size) == 0)
    1906  return pointer;
    1907  return VMA_NULL;
    1908 }
    1909 #endif
    1910 
    1911 // Normal assert to check for programmer's errors, especially in Debug configuration.
    1912 #ifndef VMA_ASSERT
    1913  #ifdef _DEBUG
    1914  #define VMA_ASSERT(expr) assert(expr)
    1915  #else
    1916  #define VMA_ASSERT(expr)
    1917  #endif
    1918 #endif
    1919 
    1920 // Assert that will be called very often, like inside data structures e.g. operator[].
    1921 // Making it non-empty can make program slow.
    1922 #ifndef VMA_HEAVY_ASSERT
    1923  #ifdef _DEBUG
    1924  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    1925  #else
    1926  #define VMA_HEAVY_ASSERT(expr)
    1927  #endif
    1928 #endif
    1929 
    1930 #ifndef VMA_ALIGN_OF
    1931  #define VMA_ALIGN_OF(type) (__alignof(type))
    1932 #endif
    1933 
    1934 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    1935  #if defined(_WIN32)
    1936  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    1937  #else
    1938  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    1939  #endif
    1940 #endif
    1941 
    1942 #ifndef VMA_SYSTEM_FREE
    1943  #if defined(_WIN32)
    1944  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    1945  #else
    1946  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    1947  #endif
    1948 #endif
    1949 
    1950 #ifndef VMA_MIN
    1951  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    1952 #endif
    1953 
    1954 #ifndef VMA_MAX
    1955  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    1956 #endif
    1957 
    1958 #ifndef VMA_SWAP
    1959  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    1960 #endif
    1961 
    1962 #ifndef VMA_SORT
    1963  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    1964 #endif
    1965 
    1966 #ifndef VMA_DEBUG_LOG
    1967  #define VMA_DEBUG_LOG(format, ...)
    1968  /*
    1969  #define VMA_DEBUG_LOG(format, ...) do { \
    1970  printf(format, __VA_ARGS__); \
    1971  printf("\n"); \
    1972  } while(false)
    1973  */
    1974 #endif
    1975 
    1976 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    1977 #if VMA_STATS_STRING_ENABLED
    1978  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    1979  {
    1980  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    1981  }
    1982  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    1983  {
    1984  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    1985  }
    1986  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    1987  {
    1988  snprintf(outStr, strLen, "%p", ptr);
    1989  }
    1990 #endif
    1991 
    1992 #ifndef VMA_MUTEX
    1993  class VmaMutex
    1994  {
    1995  public:
    1996  VmaMutex() { }
    1997  ~VmaMutex() { }
    1998  void Lock() { m_Mutex.lock(); }
    1999  void Unlock() { m_Mutex.unlock(); }
    2000  private:
    2001  std::mutex m_Mutex;
    2002  };
    2003  #define VMA_MUTEX VmaMutex
    2004 #endif
    2005 
    2006 /*
    2007 If providing your own implementation, you need to implement a subset of std::atomic:
    2008 
    2009 - Constructor(uint32_t desired)
    2010 - uint32_t load() const
    2011 - void store(uint32_t desired)
    2012 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2013 */
    2014 #ifndef VMA_ATOMIC_UINT32
    2015  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2016 #endif
    2017 
    2018 #ifndef VMA_BEST_FIT
    2019 
    2031  #define VMA_BEST_FIT (1)
    2032 #endif
    2033 
    2034 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2035 
    2039  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2040 #endif
    2041 
    2042 #ifndef VMA_DEBUG_ALIGNMENT
    2043 
    2047  #define VMA_DEBUG_ALIGNMENT (1)
    2048 #endif
    2049 
    2050 #ifndef VMA_DEBUG_MARGIN
    2051 
    2055  #define VMA_DEBUG_MARGIN (0)
    2056 #endif
    2057 
    2058 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2059 
    2063  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2064 #endif
    2065 
    2066 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2067 
    2071  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2072 #endif
    2073 
    2074 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2075  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2077 #endif
    2078 
    2079 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2080  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2082 #endif
    2083 
    2084 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2085 
    2086 /*******************************************************************************
    2087 END OF CONFIGURATION
    2088 */
    2089 
    2090 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    2091  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    2092 
    2093 // Returns number of bits set to 1 in (v).
    2094 static inline uint32_t VmaCountBitsSet(uint32_t v)
    2095 {
    2096  uint32_t c = v - ((v >> 1) & 0x55555555);
    2097  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    2098  c = ((c >> 4) + c) & 0x0F0F0F0F;
    2099  c = ((c >> 8) + c) & 0x00FF00FF;
    2100  c = ((c >> 16) + c) & 0x0000FFFF;
    2101  return c;
    2102 }
    2103 
    2104 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    2105 // Use types like uint32_t, uint64_t as T.
    2106 template <typename T>
    2107 static inline T VmaAlignUp(T val, T align)
    2108 {
    2109  return (val + align - 1) / align * align;
    2110 }
    2111 
    2112 // Division with mathematical rounding to nearest number.
    2113 template <typename T>
    2114 inline T VmaRoundDiv(T x, T y)
    2115 {
    2116  return (x + (y / (T)2)) / y;
    2117 }
    2118 
    2119 #ifndef VMA_SORT
    2120 
    2121 template<typename Iterator, typename Compare>
    2122 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    2123 {
    2124  Iterator centerValue = end; --centerValue;
    2125  Iterator insertIndex = beg;
    2126  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    2127  {
    2128  if(cmp(*memTypeIndex, *centerValue))
    2129  {
    2130  if(insertIndex != memTypeIndex)
    2131  {
    2132  VMA_SWAP(*memTypeIndex, *insertIndex);
    2133  }
    2134  ++insertIndex;
    2135  }
    2136  }
    2137  if(insertIndex != centerValue)
    2138  {
    2139  VMA_SWAP(*insertIndex, *centerValue);
    2140  }
    2141  return insertIndex;
    2142 }
    2143 
    2144 template<typename Iterator, typename Compare>
    2145 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    2146 {
    2147  if(beg < end)
    2148  {
    2149  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    2150  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    2151  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    2152  }
    2153 }
    2154 
    2155 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    2156 
    2157 #endif // #ifndef VMA_SORT
    2158 
    2159 /*
    2160 Returns true if two memory blocks occupy overlapping pages.
    2161 ResourceA must be in less memory offset than ResourceB.
    2162 
    2163 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    2164 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    2165 */
    2166 static inline bool VmaBlocksOnSamePage(
    2167  VkDeviceSize resourceAOffset,
    2168  VkDeviceSize resourceASize,
    2169  VkDeviceSize resourceBOffset,
    2170  VkDeviceSize pageSize)
    2171 {
    2172  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    2173  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    2174  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    2175  VkDeviceSize resourceBStart = resourceBOffset;
    2176  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    2177  return resourceAEndPage == resourceBStartPage;
    2178 }
    2179 
    2180 enum VmaSuballocationType
    2181 {
    2182  VMA_SUBALLOCATION_TYPE_FREE = 0,
    2183  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    2184  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    2185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    2186  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    2187  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    2188  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    2189 };
    2190 
    2191 /*
    2192 Returns true if given suballocation types could conflict and must respect
    2193 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    2194 or linear image and another one is optimal image. If type is unknown, behave
    2195 conservatively.
    2196 */
    2197 static inline bool VmaIsBufferImageGranularityConflict(
    2198  VmaSuballocationType suballocType1,
    2199  VmaSuballocationType suballocType2)
    2200 {
    2201  if(suballocType1 > suballocType2)
    2202  {
    2203  VMA_SWAP(suballocType1, suballocType2);
    2204  }
    2205 
    2206  switch(suballocType1)
    2207  {
    2208  case VMA_SUBALLOCATION_TYPE_FREE:
    2209  return false;
    2210  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    2211  return true;
    2212  case VMA_SUBALLOCATION_TYPE_BUFFER:
    2213  return
    2214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2216  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    2217  return
    2218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    2219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    2220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2221  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    2222  return
    2223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    2224  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    2225  return false;
    2226  default:
    2227  VMA_ASSERT(0);
    2228  return true;
    2229  }
    2230 }
    2231 
    2232 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    2233 struct VmaMutexLock
    2234 {
    2235 public:
    2236  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    2237  m_pMutex(useMutex ? &mutex : VMA_NULL)
    2238  {
    2239  if(m_pMutex)
    2240  {
    2241  m_pMutex->Lock();
    2242  }
    2243  }
    2244 
    2245  ~VmaMutexLock()
    2246  {
    2247  if(m_pMutex)
    2248  {
    2249  m_pMutex->Unlock();
    2250  }
    2251  }
    2252 
    2253 private:
    2254  VMA_MUTEX* m_pMutex;
    2255 };
    2256 
    2257 #if VMA_DEBUG_GLOBAL_MUTEX
    2258  static VMA_MUTEX gDebugGlobalMutex;
    2259  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    2260 #else
    2261  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    2262 #endif
    2263 
    2264 // Minimum size of a free suballocation to register it in the free suballocation collection.
    2265 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    2266 
    2267 /*
    2268 Performs binary search and returns iterator to first element that is greater or
    2269 equal to (key), according to comparison (cmp).
    2270 
    2271 Cmp should return true if first argument is less than second argument.
    2272 
    2273 Returned value is the found element, if present in the collection or place where
    2274 new element with value (key) should be inserted.
    2275 */
    2276 template <typename IterT, typename KeyT, typename CmpT>
    2277 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
    2278 {
    2279  size_t down = 0, up = (end - beg);
    2280  while(down < up)
    2281  {
    2282  const size_t mid = (down + up) / 2;
    2283  if(cmp(*(beg+mid), key))
    2284  {
    2285  down = mid + 1;
    2286  }
    2287  else
    2288  {
    2289  up = mid;
    2290  }
    2291  }
    2292  return beg + down;
    2293 }
    2294 
    2296 // Memory allocation
    2297 
    2298 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    2299 {
    2300  if((pAllocationCallbacks != VMA_NULL) &&
    2301  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    2302  {
    2303  return (*pAllocationCallbacks->pfnAllocation)(
    2304  pAllocationCallbacks->pUserData,
    2305  size,
    2306  alignment,
    2307  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    2308  }
    2309  else
    2310  {
    2311  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    2312  }
    2313 }
    2314 
    2315 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    2316 {
    2317  if((pAllocationCallbacks != VMA_NULL) &&
    2318  (pAllocationCallbacks->pfnFree != VMA_NULL))
    2319  {
    2320  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    2321  }
    2322  else
    2323  {
    2324  VMA_SYSTEM_FREE(ptr);
    2325  }
    2326 }
    2327 
    2328 template<typename T>
    2329 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    2330 {
    2331  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    2332 }
    2333 
    2334 template<typename T>
    2335 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    2336 {
    2337  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    2338 }
    2339 
    2340 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    2341 
    2342 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    2343 
    2344 template<typename T>
    2345 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    2346 {
    2347  ptr->~T();
    2348  VmaFree(pAllocationCallbacks, ptr);
    2349 }
    2350 
    2351 template<typename T>
    2352 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    2353 {
    2354  if(ptr != VMA_NULL)
    2355  {
    2356  for(size_t i = count; i--; )
    2357  {
    2358  ptr[i].~T();
    2359  }
    2360  VmaFree(pAllocationCallbacks, ptr);
    2361  }
    2362 }
    2363 
    2364 // STL-compatible allocator.
    2365 template<typename T>
    2366 class VmaStlAllocator
    2367 {
    2368 public:
    2369  const VkAllocationCallbacks* const m_pCallbacks;
    2370  typedef T value_type;
    2371 
    2372  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    2373  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    2374 
    2375  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    2376  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    2377 
    2378  template<typename U>
    2379  bool operator==(const VmaStlAllocator<U>& rhs) const
    2380  {
    2381  return m_pCallbacks == rhs.m_pCallbacks;
    2382  }
    2383  template<typename U>
    2384  bool operator!=(const VmaStlAllocator<U>& rhs) const
    2385  {
    2386  return m_pCallbacks != rhs.m_pCallbacks;
    2387  }
    2388 
    2389  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    2390 };
    2391 
    2392 #if VMA_USE_STL_VECTOR
    2393 
    2394 #define VmaVector std::vector
    2395 
    2396 template<typename T, typename allocatorT>
    2397 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    2398 {
    2399  vec.insert(vec.begin() + index, item);
    2400 }
    2401 
    2402 template<typename T, typename allocatorT>
    2403 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    2404 {
    2405  vec.erase(vec.begin() + index);
    2406 }
    2407 
    2408 #else // #if VMA_USE_STL_VECTOR
    2409 
    2410 /* Class with interface compatible with subset of std::vector.
    2411 T must be POD because constructors and destructors are not called and memcpy is
    2412 used for these objects. */
    2413 template<typename T, typename AllocatorT>
    2414 class VmaVector
    2415 {
    2416 public:
    2417  typedef T value_type;
    2418 
    2419  VmaVector(const AllocatorT& allocator) :
    2420  m_Allocator(allocator),
    2421  m_pArray(VMA_NULL),
    2422  m_Count(0),
    2423  m_Capacity(0)
    2424  {
    2425  }
    2426 
    2427  VmaVector(size_t count, const AllocatorT& allocator) :
    2428  m_Allocator(allocator),
    2429  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    2430  m_Count(count),
    2431  m_Capacity(count)
    2432  {
    2433  }
    2434 
    2435  VmaVector(const VmaVector<T, AllocatorT>& src) :
    2436  m_Allocator(src.m_Allocator),
    2437  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    2438  m_Count(src.m_Count),
    2439  m_Capacity(src.m_Count)
    2440  {
    2441  if(m_Count != 0)
    2442  {
    2443  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    2444  }
    2445  }
    2446 
    2447  ~VmaVector()
    2448  {
    2449  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2450  }
    2451 
    2452  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    2453  {
    2454  if(&rhs != this)
    2455  {
    2456  resize(rhs.m_Count);
    2457  if(m_Count != 0)
    2458  {
    2459  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    2460  }
    2461  }
    2462  return *this;
    2463  }
    2464 
    2465  bool empty() const { return m_Count == 0; }
    2466  size_t size() const { return m_Count; }
    2467  T* data() { return m_pArray; }
    2468  const T* data() const { return m_pArray; }
    2469 
    2470  T& operator[](size_t index)
    2471  {
    2472  VMA_HEAVY_ASSERT(index < m_Count);
    2473  return m_pArray[index];
    2474  }
    2475  const T& operator[](size_t index) const
    2476  {
    2477  VMA_HEAVY_ASSERT(index < m_Count);
    2478  return m_pArray[index];
    2479  }
    2480 
    2481  T& front()
    2482  {
    2483  VMA_HEAVY_ASSERT(m_Count > 0);
    2484  return m_pArray[0];
    2485  }
    2486  const T& front() const
    2487  {
    2488  VMA_HEAVY_ASSERT(m_Count > 0);
    2489  return m_pArray[0];
    2490  }
    2491  T& back()
    2492  {
    2493  VMA_HEAVY_ASSERT(m_Count > 0);
    2494  return m_pArray[m_Count - 1];
    2495  }
    2496  const T& back() const
    2497  {
    2498  VMA_HEAVY_ASSERT(m_Count > 0);
    2499  return m_pArray[m_Count - 1];
    2500  }
    2501 
    2502  void reserve(size_t newCapacity, bool freeMemory = false)
    2503  {
    2504  newCapacity = VMA_MAX(newCapacity, m_Count);
    2505 
    2506  if((newCapacity < m_Capacity) && !freeMemory)
    2507  {
    2508  newCapacity = m_Capacity;
    2509  }
    2510 
    2511  if(newCapacity != m_Capacity)
    2512  {
    2513  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    2514  if(m_Count != 0)
    2515  {
    2516  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    2517  }
    2518  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2519  m_Capacity = newCapacity;
    2520  m_pArray = newArray;
    2521  }
    2522  }
    2523 
    2524  void resize(size_t newCount, bool freeMemory = false)
    2525  {
    2526  size_t newCapacity = m_Capacity;
    2527  if(newCount > m_Capacity)
    2528  {
    2529  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    2530  }
    2531  else if(freeMemory)
    2532  {
    2533  newCapacity = newCount;
    2534  }
    2535 
    2536  if(newCapacity != m_Capacity)
    2537  {
    2538  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    2539  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    2540  if(elementsToCopy != 0)
    2541  {
    2542  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    2543  }
    2544  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    2545  m_Capacity = newCapacity;
    2546  m_pArray = newArray;
    2547  }
    2548 
    2549  m_Count = newCount;
    2550  }
    2551 
    2552  void clear(bool freeMemory = false)
    2553  {
    2554  resize(0, freeMemory);
    2555  }
    2556 
    2557  void insert(size_t index, const T& src)
    2558  {
    2559  VMA_HEAVY_ASSERT(index <= m_Count);
    2560  const size_t oldCount = size();
    2561  resize(oldCount + 1);
    2562  if(index < oldCount)
    2563  {
    2564  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    2565  }
    2566  m_pArray[index] = src;
    2567  }
    2568 
    2569  void remove(size_t index)
    2570  {
    2571  VMA_HEAVY_ASSERT(index < m_Count);
    2572  const size_t oldCount = size();
    2573  if(index < oldCount - 1)
    2574  {
    2575  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    2576  }
    2577  resize(oldCount - 1);
    2578  }
    2579 
    2580  void push_back(const T& src)
    2581  {
    2582  const size_t newIndex = size();
    2583  resize(newIndex + 1);
    2584  m_pArray[newIndex] = src;
    2585  }
    2586 
    2587  void pop_back()
    2588  {
    2589  VMA_HEAVY_ASSERT(m_Count > 0);
    2590  resize(size() - 1);
    2591  }
    2592 
    2593  void push_front(const T& src)
    2594  {
    2595  insert(0, src);
    2596  }
    2597 
    2598  void pop_front()
    2599  {
    2600  VMA_HEAVY_ASSERT(m_Count > 0);
    2601  remove(0);
    2602  }
    2603 
    2604  typedef T* iterator;
    2605 
    2606  iterator begin() { return m_pArray; }
    2607  iterator end() { return m_pArray + m_Count; }
    2608 
    2609 private:
    2610  AllocatorT m_Allocator;
    2611  T* m_pArray;
    2612  size_t m_Count;
    2613  size_t m_Capacity;
    2614 };
    2615 
    2616 template<typename T, typename allocatorT>
    2617 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    2618 {
    2619  vec.insert(index, item);
    2620 }
    2621 
    2622 template<typename T, typename allocatorT>
    2623 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    2624 {
    2625  vec.remove(index);
    2626 }
    2627 
    2628 #endif // #if VMA_USE_STL_VECTOR
    2629 
    2630 template<typename CmpLess, typename VectorT>
    2631 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    2632 {
    2633  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    2634  vector.data(),
    2635  vector.data() + vector.size(),
    2636  value,
    2637  CmpLess()) - vector.data();
    2638  VmaVectorInsert(vector, indexToInsert, value);
    2639  return indexToInsert;
    2640 }
    2641 
    2642 template<typename CmpLess, typename VectorT>
    2643 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    2644 {
    2645  CmpLess comparator;
    2646  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2647  vector.begin(),
    2648  vector.end(),
    2649  value,
    2650  comparator);
    2651  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    2652  {
    2653  size_t indexToRemove = it - vector.begin();
    2654  VmaVectorRemove(vector, indexToRemove);
    2655  return true;
    2656  }
    2657  return false;
    2658 }
    2659 
    2660 template<typename CmpLess, typename VectorT>
    2661 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
    2662 {
    2663  CmpLess comparator;
    2664  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    2665  vector.data(),
    2666  vector.data() + vector.size(),
    2667  value,
    2668  comparator);
    2669  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
    2670  {
    2671  return it - vector.begin();
    2672  }
    2673  else
    2674  {
    2675  return vector.size();
    2676  }
    2677 }
    2678 
    2680 // class VmaPoolAllocator
    2681 
    2682 /*
    2683 Allocator for objects of type T using a list of arrays (pools) to speed up
    2684 allocation. Number of elements that can be allocated is not bounded because
    2685 allocator can create multiple blocks.
    2686 */
    2687 template<typename T>
    2688 class VmaPoolAllocator
    2689 {
    2690 public:
    2691  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    2692  ~VmaPoolAllocator();
    2693  void Clear();
    2694  T* Alloc();
    2695  void Free(T* ptr);
    2696 
    2697 private:
    2698  union Item
    2699  {
    2700  uint32_t NextFreeIndex;
    2701  T Value;
    2702  };
    2703 
    2704  struct ItemBlock
    2705  {
    2706  Item* pItems;
    2707  uint32_t FirstFreeIndex;
    2708  };
    2709 
    2710  const VkAllocationCallbacks* m_pAllocationCallbacks;
    2711  size_t m_ItemsPerBlock;
    2712  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    2713 
    2714  ItemBlock& CreateNewBlock();
    2715 };
    2716 
    2717 template<typename T>
    2718 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    2719  m_pAllocationCallbacks(pAllocationCallbacks),
    2720  m_ItemsPerBlock(itemsPerBlock),
    2721  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    2722 {
    2723  VMA_ASSERT(itemsPerBlock > 0);
    2724 }
    2725 
    2726 template<typename T>
    2727 VmaPoolAllocator<T>::~VmaPoolAllocator()
    2728 {
    2729  Clear();
    2730 }
    2731 
    2732 template<typename T>
    2733 void VmaPoolAllocator<T>::Clear()
    2734 {
    2735  for(size_t i = m_ItemBlocks.size(); i--; )
    2736  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    2737  m_ItemBlocks.clear();
    2738 }
    2739 
    2740 template<typename T>
    2741 T* VmaPoolAllocator<T>::Alloc()
    2742 {
    2743  for(size_t i = m_ItemBlocks.size(); i--; )
    2744  {
    2745  ItemBlock& block = m_ItemBlocks[i];
    2746  // This block has some free items: Use first one.
    2747  if(block.FirstFreeIndex != UINT32_MAX)
    2748  {
    2749  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    2750  block.FirstFreeIndex = pItem->NextFreeIndex;
    2751  return &pItem->Value;
    2752  }
    2753  }
    2754 
    2755  // No block has free item: Create new one and use it.
    2756  ItemBlock& newBlock = CreateNewBlock();
    2757  Item* const pItem = &newBlock.pItems[0];
    2758  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    2759  return &pItem->Value;
    2760 }
    2761 
    2762 template<typename T>
    2763 void VmaPoolAllocator<T>::Free(T* ptr)
    2764 {
    2765  // Search all memory blocks to find ptr.
    2766  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    2767  {
    2768  ItemBlock& block = m_ItemBlocks[i];
    2769 
    2770  // Casting to union.
    2771  Item* pItemPtr;
    2772  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    2773 
    2774  // Check if pItemPtr is in address range of this block.
    2775  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    2776  {
    2777  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    2778  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    2779  block.FirstFreeIndex = index;
    2780  return;
    2781  }
    2782  }
    2783  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    2784 }
    2785 
    2786 template<typename T>
    2787 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    2788 {
    2789  ItemBlock newBlock = {
    2790  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    2791 
    2792  m_ItemBlocks.push_back(newBlock);
    2793 
    2794  // Setup singly-linked list of all free items in this block.
    2795  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    2796  newBlock.pItems[i].NextFreeIndex = i + 1;
    2797  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    2798  return m_ItemBlocks.back();
    2799 }
    2800 
    2802 // class VmaRawList, VmaList
    2803 
    2804 #if VMA_USE_STL_LIST
    2805 
    2806 #define VmaList std::list
    2807 
    2808 #else // #if VMA_USE_STL_LIST
    2809 
    2810 template<typename T>
    2811 struct VmaListItem
    2812 {
    2813  VmaListItem* pPrev;
    2814  VmaListItem* pNext;
    2815  T Value;
    2816 };
    2817 
    2818 // Doubly linked list.
    2819 template<typename T>
    2820 class VmaRawList
    2821 {
    2822 public:
    2823  typedef VmaListItem<T> ItemType;
    2824 
    2825  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    2826  ~VmaRawList();
    2827  void Clear();
    2828 
    2829  size_t GetCount() const { return m_Count; }
    2830  bool IsEmpty() const { return m_Count == 0; }
    2831 
    2832  ItemType* Front() { return m_pFront; }
    2833  const ItemType* Front() const { return m_pFront; }
    2834  ItemType* Back() { return m_pBack; }
    2835  const ItemType* Back() const { return m_pBack; }
    2836 
    2837  ItemType* PushBack();
    2838  ItemType* PushFront();
    2839  ItemType* PushBack(const T& value);
    2840  ItemType* PushFront(const T& value);
    2841  void PopBack();
    2842  void PopFront();
    2843 
    2844  // Item can be null - it means PushBack.
    2845  ItemType* InsertBefore(ItemType* pItem);
    2846  // Item can be null - it means PushFront.
    2847  ItemType* InsertAfter(ItemType* pItem);
    2848 
    2849  ItemType* InsertBefore(ItemType* pItem, const T& value);
    2850  ItemType* InsertAfter(ItemType* pItem, const T& value);
    2851 
    2852  void Remove(ItemType* pItem);
    2853 
    2854 private:
    2855  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    2856  VmaPoolAllocator<ItemType> m_ItemAllocator;
    2857  ItemType* m_pFront;
    2858  ItemType* m_pBack;
    2859  size_t m_Count;
    2860 
    2861  // Declared not defined, to block copy constructor and assignment operator.
    2862  VmaRawList(const VmaRawList<T>& src);
    2863  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
    2864 };
    2865 
    2866 template<typename T>
    2867 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    2868  m_pAllocationCallbacks(pAllocationCallbacks),
    2869  m_ItemAllocator(pAllocationCallbacks, 128),
    2870  m_pFront(VMA_NULL),
    2871  m_pBack(VMA_NULL),
    2872  m_Count(0)
    2873 {
    2874 }
    2875 
    2876 template<typename T>
    2877 VmaRawList<T>::~VmaRawList()
    2878 {
    2879  // Intentionally not calling Clear, because that would be unnecessary
    2880  // computations to return all items to m_ItemAllocator as free.
    2881 }
    2882 
    2883 template<typename T>
    2884 void VmaRawList<T>::Clear()
    2885 {
    2886  if(IsEmpty() == false)
    2887  {
    2888  ItemType* pItem = m_pBack;
    2889  while(pItem != VMA_NULL)
    2890  {
    2891  ItemType* const pPrevItem = pItem->pPrev;
    2892  m_ItemAllocator.Free(pItem);
    2893  pItem = pPrevItem;
    2894  }
    2895  m_pFront = VMA_NULL;
    2896  m_pBack = VMA_NULL;
    2897  m_Count = 0;
    2898  }
    2899 }
    2900 
    2901 template<typename T>
    2902 VmaListItem<T>* VmaRawList<T>::PushBack()
    2903 {
    2904  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2905  pNewItem->pNext = VMA_NULL;
    2906  if(IsEmpty())
    2907  {
    2908  pNewItem->pPrev = VMA_NULL;
    2909  m_pFront = pNewItem;
    2910  m_pBack = pNewItem;
    2911  m_Count = 1;
    2912  }
    2913  else
    2914  {
    2915  pNewItem->pPrev = m_pBack;
    2916  m_pBack->pNext = pNewItem;
    2917  m_pBack = pNewItem;
    2918  ++m_Count;
    2919  }
    2920  return pNewItem;
    2921 }
    2922 
    2923 template<typename T>
    2924 VmaListItem<T>* VmaRawList<T>::PushFront()
    2925 {
    2926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    2927  pNewItem->pPrev = VMA_NULL;
    2928  if(IsEmpty())
    2929  {
    2930  pNewItem->pNext = VMA_NULL;
    2931  m_pFront = pNewItem;
    2932  m_pBack = pNewItem;
    2933  m_Count = 1;
    2934  }
    2935  else
    2936  {
    2937  pNewItem->pNext = m_pFront;
    2938  m_pFront->pPrev = pNewItem;
    2939  m_pFront = pNewItem;
    2940  ++m_Count;
    2941  }
    2942  return pNewItem;
    2943 }
    2944 
    2945 template<typename T>
    2946 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    2947 {
    2948  ItemType* const pNewItem = PushBack();
    2949  pNewItem->Value = value;
    2950  return pNewItem;
    2951 }
    2952 
    2953 template<typename T>
    2954 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    2955 {
    2956  ItemType* const pNewItem = PushFront();
    2957  pNewItem->Value = value;
    2958  return pNewItem;
    2959 }
    2960 
    2961 template<typename T>
    2962 void VmaRawList<T>::PopBack()
    2963 {
    2964  VMA_HEAVY_ASSERT(m_Count > 0);
    2965  ItemType* const pBackItem = m_pBack;
    2966  ItemType* const pPrevItem = pBackItem->pPrev;
    2967  if(pPrevItem != VMA_NULL)
    2968  {
    2969  pPrevItem->pNext = VMA_NULL;
    2970  }
    2971  m_pBack = pPrevItem;
    2972  m_ItemAllocator.Free(pBackItem);
    2973  --m_Count;
    2974 }
    2975 
    2976 template<typename T>
    2977 void VmaRawList<T>::PopFront()
    2978 {
    2979  VMA_HEAVY_ASSERT(m_Count > 0);
    2980  ItemType* const pFrontItem = m_pFront;
    2981  ItemType* const pNextItem = pFrontItem->pNext;
    2982  if(pNextItem != VMA_NULL)
    2983  {
    2984  pNextItem->pPrev = VMA_NULL;
    2985  }
    2986  m_pFront = pNextItem;
    2987  m_ItemAllocator.Free(pFrontItem);
    2988  --m_Count;
    2989 }
    2990 
    2991 template<typename T>
    2992 void VmaRawList<T>::Remove(ItemType* pItem)
    2993 {
    2994  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    2995  VMA_HEAVY_ASSERT(m_Count > 0);
    2996 
    2997  if(pItem->pPrev != VMA_NULL)
    2998  {
    2999  pItem->pPrev->pNext = pItem->pNext;
    3000  }
    3001  else
    3002  {
    3003  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3004  m_pFront = pItem->pNext;
    3005  }
    3006 
    3007  if(pItem->pNext != VMA_NULL)
    3008  {
    3009  pItem->pNext->pPrev = pItem->pPrev;
    3010  }
    3011  else
    3012  {
    3013  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3014  m_pBack = pItem->pPrev;
    3015  }
    3016 
    3017  m_ItemAllocator.Free(pItem);
    3018  --m_Count;
    3019 }
    3020 
    3021 template<typename T>
    3022 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    3023 {
    3024  if(pItem != VMA_NULL)
    3025  {
    3026  ItemType* const prevItem = pItem->pPrev;
    3027  ItemType* const newItem = m_ItemAllocator.Alloc();
    3028  newItem->pPrev = prevItem;
    3029  newItem->pNext = pItem;
    3030  pItem->pPrev = newItem;
    3031  if(prevItem != VMA_NULL)
    3032  {
    3033  prevItem->pNext = newItem;
    3034  }
    3035  else
    3036  {
    3037  VMA_HEAVY_ASSERT(m_pFront == pItem);
    3038  m_pFront = newItem;
    3039  }
    3040  ++m_Count;
    3041  return newItem;
    3042  }
    3043  else
    3044  return PushBack();
    3045 }
    3046 
    3047 template<typename T>
    3048 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    3049 {
    3050  if(pItem != VMA_NULL)
    3051  {
    3052  ItemType* const nextItem = pItem->pNext;
    3053  ItemType* const newItem = m_ItemAllocator.Alloc();
    3054  newItem->pNext = nextItem;
    3055  newItem->pPrev = pItem;
    3056  pItem->pNext = newItem;
    3057  if(nextItem != VMA_NULL)
    3058  {
    3059  nextItem->pPrev = newItem;
    3060  }
    3061  else
    3062  {
    3063  VMA_HEAVY_ASSERT(m_pBack == pItem);
    3064  m_pBack = newItem;
    3065  }
    3066  ++m_Count;
    3067  return newItem;
    3068  }
    3069  else
    3070  return PushFront();
    3071 }
    3072 
    3073 template<typename T>
    3074 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    3075 {
    3076  ItemType* const newItem = InsertBefore(pItem);
    3077  newItem->Value = value;
    3078  return newItem;
    3079 }
    3080 
    3081 template<typename T>
    3082 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    3083 {
    3084  ItemType* const newItem = InsertAfter(pItem);
    3085  newItem->Value = value;
    3086  return newItem;
    3087 }
    3088 
    3089 template<typename T, typename AllocatorT>
    3090 class VmaList
    3091 {
    3092 public:
    3093  class iterator
    3094  {
    3095  public:
    3096  iterator() :
    3097  m_pList(VMA_NULL),
    3098  m_pItem(VMA_NULL)
    3099  {
    3100  }
    3101 
    3102  T& operator*() const
    3103  {
    3104  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3105  return m_pItem->Value;
    3106  }
    3107  T* operator->() const
    3108  {
    3109  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3110  return &m_pItem->Value;
    3111  }
    3112 
    3113  iterator& operator++()
    3114  {
    3115  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3116  m_pItem = m_pItem->pNext;
    3117  return *this;
    3118  }
    3119  iterator& operator--()
    3120  {
    3121  if(m_pItem != VMA_NULL)
    3122  {
    3123  m_pItem = m_pItem->pPrev;
    3124  }
    3125  else
    3126  {
    3127  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3128  m_pItem = m_pList->Back();
    3129  }
    3130  return *this;
    3131  }
    3132 
    3133  iterator operator++(int)
    3134  {
    3135  iterator result = *this;
    3136  ++*this;
    3137  return result;
    3138  }
    3139  iterator operator--(int)
    3140  {
    3141  iterator result = *this;
    3142  --*this;
    3143  return result;
    3144  }
    3145 
    3146  bool operator==(const iterator& rhs) const
    3147  {
    3148  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3149  return m_pItem == rhs.m_pItem;
    3150  }
    3151  bool operator!=(const iterator& rhs) const
    3152  {
    3153  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3154  return m_pItem != rhs.m_pItem;
    3155  }
    3156 
    3157  private:
    3158  VmaRawList<T>* m_pList;
    3159  VmaListItem<T>* m_pItem;
    3160 
    3161  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    3162  m_pList(pList),
    3163  m_pItem(pItem)
    3164  {
    3165  }
    3166 
    3167  friend class VmaList<T, AllocatorT>;
    3168  };
    3169 
    3170  class const_iterator
    3171  {
    3172  public:
    3173  const_iterator() :
    3174  m_pList(VMA_NULL),
    3175  m_pItem(VMA_NULL)
    3176  {
    3177  }
    3178 
    3179  const_iterator(const iterator& src) :
    3180  m_pList(src.m_pList),
    3181  m_pItem(src.m_pItem)
    3182  {
    3183  }
    3184 
    3185  const T& operator*() const
    3186  {
    3187  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3188  return m_pItem->Value;
    3189  }
    3190  const T* operator->() const
    3191  {
    3192  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3193  return &m_pItem->Value;
    3194  }
    3195 
    3196  const_iterator& operator++()
    3197  {
    3198  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    3199  m_pItem = m_pItem->pNext;
    3200  return *this;
    3201  }
    3202  const_iterator& operator--()
    3203  {
    3204  if(m_pItem != VMA_NULL)
    3205  {
    3206  m_pItem = m_pItem->pPrev;
    3207  }
    3208  else
    3209  {
    3210  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    3211  m_pItem = m_pList->Back();
    3212  }
    3213  return *this;
    3214  }
    3215 
    3216  const_iterator operator++(int)
    3217  {
    3218  const_iterator result = *this;
    3219  ++*this;
    3220  return result;
    3221  }
    3222  const_iterator operator--(int)
    3223  {
    3224  const_iterator result = *this;
    3225  --*this;
    3226  return result;
    3227  }
    3228 
    3229  bool operator==(const const_iterator& rhs) const
    3230  {
    3231  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3232  return m_pItem == rhs.m_pItem;
    3233  }
    3234  bool operator!=(const const_iterator& rhs) const
    3235  {
    3236  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    3237  return m_pItem != rhs.m_pItem;
    3238  }
    3239 
    3240  private:
    3241  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    3242  m_pList(pList),
    3243  m_pItem(pItem)
    3244  {
    3245  }
    3246 
    3247  const VmaRawList<T>* m_pList;
    3248  const VmaListItem<T>* m_pItem;
    3249 
    3250  friend class VmaList<T, AllocatorT>;
    3251  };
    3252 
    3253  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    3254 
    3255  bool empty() const { return m_RawList.IsEmpty(); }
    3256  size_t size() const { return m_RawList.GetCount(); }
    3257 
    3258  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    3259  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    3260 
    3261  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    3262  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    3263 
    3264  void clear() { m_RawList.Clear(); }
    3265  void push_back(const T& value) { m_RawList.PushBack(value); }
    3266  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    3267  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    3268 
    3269 private:
    3270  VmaRawList<T> m_RawList;
    3271 };
    3272 
    3273 #endif // #if VMA_USE_STL_LIST
    3274 
    3276 // class VmaMap
    3277 
    3278 // Unused in this version.
    3279 #if 0
    3280 
    3281 #if VMA_USE_STL_UNORDERED_MAP
    3282 
    3283 #define VmaPair std::pair
    3284 
    3285 #define VMA_MAP_TYPE(KeyT, ValueT) \
    3286  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    3287 
    3288 #else // #if VMA_USE_STL_UNORDERED_MAP
    3289 
    3290 template<typename T1, typename T2>
    3291 struct VmaPair
    3292 {
    3293  T1 first;
    3294  T2 second;
    3295 
    3296  VmaPair() : first(), second() { }
    3297  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    3298 };
    3299 
    3300 /* Class compatible with subset of interface of std::unordered_map.
    3301 KeyT, ValueT must be POD because they will be stored in VmaVector.
    3302 */
    3303 template<typename KeyT, typename ValueT>
    3304 class VmaMap
    3305 {
    3306 public:
    3307  typedef VmaPair<KeyT, ValueT> PairType;
    3308  typedef PairType* iterator;
    3309 
    3310  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    3311 
    3312  iterator begin() { return m_Vector.begin(); }
    3313  iterator end() { return m_Vector.end(); }
    3314 
    3315  void insert(const PairType& pair);
    3316  iterator find(const KeyT& key);
    3317  void erase(iterator it);
    3318 
    3319 private:
    3320  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    3321 };
    3322 
    3323 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    3324 
    3325 template<typename FirstT, typename SecondT>
    3326 struct VmaPairFirstLess
    3327 {
    3328  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    3329  {
    3330  return lhs.first < rhs.first;
    3331  }
    3332  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    3333  {
    3334  return lhs.first < rhsFirst;
    3335  }
    3336 };
    3337 
    3338 template<typename KeyT, typename ValueT>
    3339 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    3340 {
    3341  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3342  m_Vector.data(),
    3343  m_Vector.data() + m_Vector.size(),
    3344  pair,
    3345  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    3346  VmaVectorInsert(m_Vector, indexToInsert, pair);
    3347 }
    3348 
    3349 template<typename KeyT, typename ValueT>
    3350 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    3351 {
    3352  PairType* it = VmaBinaryFindFirstNotLess(
    3353  m_Vector.data(),
    3354  m_Vector.data() + m_Vector.size(),
    3355  key,
    3356  VmaPairFirstLess<KeyT, ValueT>());
    3357  if((it != m_Vector.end()) && (it->first == key))
    3358  {
    3359  return it;
    3360  }
    3361  else
    3362  {
    3363  return m_Vector.end();
    3364  }
    3365 }
    3366 
    3367 template<typename KeyT, typename ValueT>
    3368 void VmaMap<KeyT, ValueT>::erase(iterator it)
    3369 {
    3370  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    3371 }
    3372 
    3373 #endif // #if VMA_USE_STL_UNORDERED_MAP
    3374 
    3375 #endif // #if 0
    3376 
    3378 
    3379 class VmaDeviceMemoryBlock;
    3380 
    3381 struct VmaAllocation_T
    3382 {
    3383 private:
    3384  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    3385 
    3386  enum FLAGS
    3387  {
    3388  FLAG_USER_DATA_STRING = 0x01,
    3389  };
    3390 
    3391 public:
    3392  enum ALLOCATION_TYPE
    3393  {
    3394  ALLOCATION_TYPE_NONE,
    3395  ALLOCATION_TYPE_BLOCK,
    3396  ALLOCATION_TYPE_DEDICATED,
    3397  };
    3398 
    3399  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    3400  m_Alignment(1),
    3401  m_Size(0),
    3402  m_pUserData(VMA_NULL),
    3403  m_LastUseFrameIndex(currentFrameIndex),
    3404  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    3405  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    3406  m_MapCount(0),
    3407  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    3408  {
    3409  }
    3410 
    3411  ~VmaAllocation_T()
    3412  {
    3413  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    3414 
    3415  // Check if owned string was freed.
    3416  VMA_ASSERT(m_pUserData == VMA_NULL);
    3417  }
    3418 
    3419  void InitBlockAllocation(
    3420  VmaPool hPool,
    3421  VmaDeviceMemoryBlock* block,
    3422  VkDeviceSize offset,
    3423  VkDeviceSize alignment,
    3424  VkDeviceSize size,
    3425  VmaSuballocationType suballocationType,
    3426  bool mapped,
    3427  bool canBecomeLost)
    3428  {
    3429  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3430  VMA_ASSERT(block != VMA_NULL);
    3431  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3432  m_Alignment = alignment;
    3433  m_Size = size;
    3434  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3435  m_SuballocationType = (uint8_t)suballocationType;
    3436  m_BlockAllocation.m_hPool = hPool;
    3437  m_BlockAllocation.m_Block = block;
    3438  m_BlockAllocation.m_Offset = offset;
    3439  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    3440  }
    3441 
    3442  void InitLost()
    3443  {
    3444  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3445  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    3446  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    3447  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    3448  m_BlockAllocation.m_Block = VMA_NULL;
    3449  m_BlockAllocation.m_Offset = 0;
    3450  m_BlockAllocation.m_CanBecomeLost = true;
    3451  }
    3452 
    3453  void ChangeBlockAllocation(
    3454  VmaAllocator hAllocator,
    3455  VmaDeviceMemoryBlock* block,
    3456  VkDeviceSize offset);
    3457 
    3458  // pMappedData not null means allocation is created with MAPPED flag.
    3459  void InitDedicatedAllocation(
    3460  uint32_t memoryTypeIndex,
    3461  VkDeviceMemory hMemory,
    3462  VmaSuballocationType suballocationType,
    3463  void* pMappedData,
    3464  VkDeviceSize size)
    3465  {
    3466  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    3467  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    3468  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    3469  m_Alignment = 0;
    3470  m_Size = size;
    3471  m_SuballocationType = (uint8_t)suballocationType;
    3472  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    3473  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    3474  m_DedicatedAllocation.m_hMemory = hMemory;
    3475  m_DedicatedAllocation.m_pMappedData = pMappedData;
    3476  }
    3477 
    3478  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    3479  VkDeviceSize GetAlignment() const { return m_Alignment; }
    3480  VkDeviceSize GetSize() const { return m_Size; }
    3481  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    3482  void* GetUserData() const { return m_pUserData; }
    3483  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    3484  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    3485 
    3486  VmaDeviceMemoryBlock* GetBlock() const
    3487  {
    3488  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    3489  return m_BlockAllocation.m_Block;
    3490  }
    3491  VkDeviceSize GetOffset() const;
    3492  VkDeviceMemory GetMemory() const;
    3493  uint32_t GetMemoryTypeIndex() const;
    3494  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    3495  void* GetMappedData() const;
    3496  bool CanBecomeLost() const;
    3497  VmaPool GetPool() const;
    3498 
    3499  uint32_t GetLastUseFrameIndex() const
    3500  {
    3501  return m_LastUseFrameIndex.load();
    3502  }
    3503  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    3504  {
    3505  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    3506  }
    3507  /*
    3508  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    3509  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    3510  - Else, returns false.
    3511 
    3512  If hAllocation is already lost, assert - you should not call it then.
    3513  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    3514  */
    3515  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3516 
    3517  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    3518  {
    3519  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    3520  outInfo.blockCount = 1;
    3521  outInfo.allocationCount = 1;
    3522  outInfo.unusedRangeCount = 0;
    3523  outInfo.usedBytes = m_Size;
    3524  outInfo.unusedBytes = 0;
    3525  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    3526  outInfo.unusedRangeSizeMin = UINT64_MAX;
    3527  outInfo.unusedRangeSizeMax = 0;
    3528  }
    3529 
    3530  void BlockAllocMap();
    3531  void BlockAllocUnmap();
    3532  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    3533  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    3534 
    3535 private:
    3536  VkDeviceSize m_Alignment;
    3537  VkDeviceSize m_Size;
    3538  void* m_pUserData;
    3539  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    3540  uint8_t m_Type; // ALLOCATION_TYPE
    3541  uint8_t m_SuballocationType; // VmaSuballocationType
    3542  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    3543  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    3544  uint8_t m_MapCount;
    3545  uint8_t m_Flags; // enum FLAGS
    3546 
    3547  // Allocation out of VmaDeviceMemoryBlock.
    3548  struct BlockAllocation
    3549  {
    3550  VmaPool m_hPool; // Null if belongs to general memory.
    3551  VmaDeviceMemoryBlock* m_Block;
    3552  VkDeviceSize m_Offset;
    3553  bool m_CanBecomeLost;
    3554  };
    3555 
    3556  // Allocation for an object that has its own private VkDeviceMemory.
    3557  struct DedicatedAllocation
    3558  {
    3559  uint32_t m_MemoryTypeIndex;
    3560  VkDeviceMemory m_hMemory;
    3561  void* m_pMappedData; // Not null means memory is mapped.
    3562  };
    3563 
    3564  union
    3565  {
    3566  // Allocation out of VmaDeviceMemoryBlock.
    3567  BlockAllocation m_BlockAllocation;
    3568  // Allocation for an object that has its own private VkDeviceMemory.
    3569  DedicatedAllocation m_DedicatedAllocation;
    3570  };
    3571 
    3572  void FreeUserDataString(VmaAllocator hAllocator);
    3573 };
    3574 
    3575 /*
    3576 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    3577 allocated memory block or free.
    3578 */
    3579 struct VmaSuballocation
    3580 {
    3581  VkDeviceSize offset;
    3582  VkDeviceSize size;
    3583  VmaAllocation hAllocation;
    3584  VmaSuballocationType type;
    3585 };
    3586 
    3587 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    3588 
    3589 // Cost of one additional allocation lost, as equivalent in bytes.
    3590 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    3591 
    3592 /*
    3593 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    3594 
    3595 If canMakeOtherLost was false:
    3596 - item points to a FREE suballocation.
    3597 - itemsToMakeLostCount is 0.
    3598 
    3599 If canMakeOtherLost was true:
    3600 - item points to first of sequence of suballocations, which are either FREE,
    3601  or point to VmaAllocations that can become lost.
    3602 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    3603  the requested allocation to succeed.
    3604 */
    3605 struct VmaAllocationRequest
    3606 {
    3607  VkDeviceSize offset;
    3608  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    3609  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    3610  VmaSuballocationList::iterator item;
    3611  size_t itemsToMakeLostCount;
    3612 
    3613  VkDeviceSize CalcCost() const
    3614  {
    3615  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    3616  }
    3617 };
    3618 
    3619 /*
    3620 Data structure used for bookkeeping of allocations and unused ranges of memory
    3621 in a single VkDeviceMemory block.
    3622 */
    3623 class VmaBlockMetadata
    3624 {
    3625 public:
    3626  VmaBlockMetadata(VmaAllocator hAllocator);
    3627  ~VmaBlockMetadata();
    3628  void Init(VkDeviceSize size);
    3629 
    3630  // Validates all data structures inside this object. If not valid, returns false.
    3631  bool Validate() const;
    3632  VkDeviceSize GetSize() const { return m_Size; }
    3633  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    3634  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    3635  VkDeviceSize GetUnusedRangeSizeMax() const;
    3636  // Returns true if this block is empty - contains only single free suballocation.
    3637  bool IsEmpty() const;
    3638 
    3639  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    3640  void AddPoolStats(VmaPoolStats& inoutStats) const;
    3641 
    3642 #if VMA_STATS_STRING_ENABLED
    3643  void PrintDetailedMap(class VmaJsonWriter& json) const;
    3644 #endif
    3645 
    3646  // Creates trivial request for case when block is empty.
    3647  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
    3648 
    3649  // Tries to find a place for suballocation with given parameters inside this block.
    3650  // If succeeded, fills pAllocationRequest and returns true.
    3651  // If failed, returns false.
    3652  bool CreateAllocationRequest(
    3653  uint32_t currentFrameIndex,
    3654  uint32_t frameInUseCount,
    3655  VkDeviceSize bufferImageGranularity,
    3656  VkDeviceSize allocSize,
    3657  VkDeviceSize allocAlignment,
    3658  VmaSuballocationType allocType,
    3659  bool canMakeOtherLost,
    3660  VmaAllocationRequest* pAllocationRequest);
    3661 
    3662  bool MakeRequestedAllocationsLost(
    3663  uint32_t currentFrameIndex,
    3664  uint32_t frameInUseCount,
    3665  VmaAllocationRequest* pAllocationRequest);
    3666 
    3667  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    3668 
    3669  // Makes actual allocation based on request. Request must already be checked and valid.
    3670  void Alloc(
    3671  const VmaAllocationRequest& request,
    3672  VmaSuballocationType type,
    3673  VkDeviceSize allocSize,
    3674  VmaAllocation hAllocation);
    3675 
    3676  // Frees suballocation assigned to given memory region.
    3677  void Free(const VmaAllocation allocation);
    3678  void FreeAtOffset(VkDeviceSize offset);
    3679 
    3680 private:
    3681  VkDeviceSize m_Size;
    3682  uint32_t m_FreeCount;
    3683  VkDeviceSize m_SumFreeSize;
    3684  VmaSuballocationList m_Suballocations;
    3685  // Suballocations that are free and have size greater than certain threshold.
    3686  // Sorted by size, ascending.
    3687  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    3688 
    3689  bool ValidateFreeSuballocationList() const;
    3690 
    3691  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    3692  // If yes, fills pOffset and returns true. If no, returns false.
    3693  bool CheckAllocation(
    3694  uint32_t currentFrameIndex,
    3695  uint32_t frameInUseCount,
    3696  VkDeviceSize bufferImageGranularity,
    3697  VkDeviceSize allocSize,
    3698  VkDeviceSize allocAlignment,
    3699  VmaSuballocationType allocType,
    3700  VmaSuballocationList::const_iterator suballocItem,
    3701  bool canMakeOtherLost,
    3702  VkDeviceSize* pOffset,
    3703  size_t* itemsToMakeLostCount,
    3704  VkDeviceSize* pSumFreeSize,
    3705  VkDeviceSize* pSumItemSize) const;
    3706  // Given free suballocation, it merges it with following one, which must also be free.
    3707  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    3708  // Releases given suballocation, making it free.
    3709  // Merges it with adjacent free suballocations if applicable.
    3710  // Returns iterator to new free suballocation at this place.
    3711  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    3712  // Given free suballocation, it inserts it into sorted list of
    3713  // m_FreeSuballocationsBySize if it's suitable.
    3714  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    3715  // Given free suballocation, it removes it from sorted list of
    3716  // m_FreeSuballocationsBySize if it's suitable.
    3717  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    3718 };
    3719 
    3720 // Helper class that represents mapped memory. Synchronized internally.
    3721 class VmaDeviceMemoryMapping
    3722 {
    3723 public:
    3724  VmaDeviceMemoryMapping();
    3725  ~VmaDeviceMemoryMapping();
    3726 
    3727  void* GetMappedData() const { return m_pMappedData; }
    3728 
    3729  // ppData can be null.
    3730  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
    3731  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
    3732 
    3733 private:
    3734  VMA_MUTEX m_Mutex;
    3735  uint32_t m_MapCount;
    3736  void* m_pMappedData;
    3737 };
    3738 
    3739 /*
    3740 Represents a single block of device memory (`VkDeviceMemory`) with all the
    3741 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
    3742 
    3743 Thread-safety: This class must be externally synchronized.
    3744 */
    3745 class VmaDeviceMemoryBlock
    3746 {
    3747 public:
    3748  uint32_t m_MemoryTypeIndex;
    3749  VkDeviceMemory m_hMemory;
    3750  VmaDeviceMemoryMapping m_Mapping;
    3751  VmaBlockMetadata m_Metadata;
    3752 
    3753  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    3754 
    3755  ~VmaDeviceMemoryBlock()
    3756  {
    3757  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    3758  }
    3759 
    3760  // Always call after construction.
    3761  void Init(
    3762  uint32_t newMemoryTypeIndex,
    3763  VkDeviceMemory newMemory,
    3764  VkDeviceSize newSize);
    3765  // Always call before destruction.
    3766  void Destroy(VmaAllocator allocator);
    3767 
    3768  // Validates all data structures inside this object. If not valid, returns false.
    3769  bool Validate() const;
    3770 
    3771  // ppData can be null.
    3772  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    3773  void Unmap(VmaAllocator hAllocator, uint32_t count);
    3774 };
    3775 
    3776 struct VmaPointerLess
    3777 {
    3778  bool operator()(const void* lhs, const void* rhs) const
    3779  {
    3780  return lhs < rhs;
    3781  }
    3782 };
    3783 
    3784 class VmaDefragmentator;
    3785 
    3786 /*
    3787 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    3788 Vulkan memory type.
    3789 
    3790 Synchronized internally with a mutex.
    3791 */
    3792 struct VmaBlockVector
    3793 {
    3794  VmaBlockVector(
    3795  VmaAllocator hAllocator,
    3796  uint32_t memoryTypeIndex,
    3797  VkDeviceSize preferredBlockSize,
    3798  size_t minBlockCount,
    3799  size_t maxBlockCount,
    3800  VkDeviceSize bufferImageGranularity,
    3801  uint32_t frameInUseCount,
    3802  bool isCustomPool);
    3803  ~VmaBlockVector();
    3804 
    3805  VkResult CreateMinBlocks();
    3806 
    3807  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    3808  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    3809  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    3810  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    3811 
    3812  void GetPoolStats(VmaPoolStats* pStats);
    3813 
    3814  bool IsEmpty() const { return m_Blocks.empty(); }
    3815 
    3816  VkResult Allocate(
    3817  VmaPool hCurrentPool,
    3818  uint32_t currentFrameIndex,
    3819  const VkMemoryRequirements& vkMemReq,
    3820  const VmaAllocationCreateInfo& createInfo,
    3821  VmaSuballocationType suballocType,
    3822  VmaAllocation* pAllocation);
    3823 
    3824  void Free(
    3825  VmaAllocation hAllocation);
    3826 
    3827  // Adds statistics of this BlockVector to pStats.
    3828  void AddStats(VmaStats* pStats);
    3829 
    3830 #if VMA_STATS_STRING_ENABLED
    3831  void PrintDetailedMap(class VmaJsonWriter& json);
    3832 #endif
    3833 
    3834  void MakePoolAllocationsLost(
    3835  uint32_t currentFrameIndex,
    3836  size_t* pLostAllocationCount);
    3837 
    3838  VmaDefragmentator* EnsureDefragmentator(
    3839  VmaAllocator hAllocator,
    3840  uint32_t currentFrameIndex);
    3841 
    3842  VkResult Defragment(
    3843  VmaDefragmentationStats* pDefragmentationStats,
    3844  VkDeviceSize& maxBytesToMove,
    3845  uint32_t& maxAllocationsToMove);
    3846 
    3847  void DestroyDefragmentator();
    3848 
    3849 private:
    3850  friend class VmaDefragmentator;
    3851 
    3852  const VmaAllocator m_hAllocator;
    3853  const uint32_t m_MemoryTypeIndex;
    3854  const VkDeviceSize m_PreferredBlockSize;
    3855  const size_t m_MinBlockCount;
    3856  const size_t m_MaxBlockCount;
    3857  const VkDeviceSize m_BufferImageGranularity;
    3858  const uint32_t m_FrameInUseCount;
    3859  const bool m_IsCustomPool;
    3860  VMA_MUTEX m_Mutex;
    3861  // Incrementally sorted by sumFreeSize, ascending.
    3862  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    3863  /* There can be at most one allocation that is completely empty - a
    3864  hysteresis to avoid pessimistic case of alternating creation and destruction
    3865  of a VkDeviceMemory. */
    3866  bool m_HasEmptyBlock;
    3867  VmaDefragmentator* m_pDefragmentator;
    3868 
    3869  size_t CalcMaxBlockSize() const;
    3870 
    3871  // Finds and removes given block from vector.
    3872  void Remove(VmaDeviceMemoryBlock* pBlock);
    3873 
    3874  // Performs single step in sorting m_Blocks. They may not be fully sorted
    3875  // after this call.
    3876  void IncrementallySortBlocks();
    3877 
    3878  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    3879 };
    3880 
    3881 struct VmaPool_T
    3882 {
    3883 public:
    3884  VmaBlockVector m_BlockVector;
    3885 
    3886  // Takes ownership.
    3887  VmaPool_T(
    3888  VmaAllocator hAllocator,
    3889  const VmaPoolCreateInfo& createInfo);
    3890  ~VmaPool_T();
    3891 
    3892  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
    3893 
    3894 #if VMA_STATS_STRING_ENABLED
    3895  //void PrintDetailedMap(class VmaStringBuilder& sb);
    3896 #endif
    3897 };
    3898 
    3899 class VmaDefragmentator
    3900 {
    3901  const VmaAllocator m_hAllocator;
    3902  VmaBlockVector* const m_pBlockVector;
    3903  uint32_t m_CurrentFrameIndex;
    3904  VkDeviceSize m_BytesMoved;
    3905  uint32_t m_AllocationsMoved;
    3906 
    3907  struct AllocationInfo
    3908  {
    3909  VmaAllocation m_hAllocation;
    3910  VkBool32* m_pChanged;
    3911 
    3912  AllocationInfo() :
    3913  m_hAllocation(VK_NULL_HANDLE),
    3914  m_pChanged(VMA_NULL)
    3915  {
    3916  }
    3917  };
    3918 
    3919  struct AllocationInfoSizeGreater
    3920  {
    3921  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    3922  {
    3923  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    3924  }
    3925  };
    3926 
    3927  // Used between AddAllocation and Defragment.
    3928  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3929 
    3930  struct BlockInfo
    3931  {
    3932  VmaDeviceMemoryBlock* m_pBlock;
    3933  bool m_HasNonMovableAllocations;
    3934  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    3935 
    3936  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    3937  m_pBlock(VMA_NULL),
    3938  m_HasNonMovableAllocations(true),
    3939  m_Allocations(pAllocationCallbacks),
    3940  m_pMappedDataForDefragmentation(VMA_NULL)
    3941  {
    3942  }
    3943 
    3944  void CalcHasNonMovableAllocations()
    3945  {
    3946  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
    3947  const size_t defragmentAllocCount = m_Allocations.size();
    3948  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    3949  }
    3950 
    3951  void SortAllocationsBySizeDescecnding()
    3952  {
    3953  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    3954  }
    3955 
    3956  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    3957  void Unmap(VmaAllocator hAllocator);
    3958 
    3959  private:
    3960  // Not null if mapped for defragmentation only, not originally mapped.
    3961  void* m_pMappedDataForDefragmentation;
    3962  };
    3963 
    3964  struct BlockPointerLess
    3965  {
    3966  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    3967  {
    3968  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    3969  }
    3970  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3971  {
    3972  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    3973  }
    3974  };
    3975 
    3976  // 1. Blocks with some non-movable allocations go first.
    3977  // 2. Blocks with smaller sumFreeSize go first.
    3978  struct BlockInfoCompareMoveDestination
    3979  {
    3980  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    3981  {
    3982  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    3983  {
    3984  return true;
    3985  }
    3986  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    3987  {
    3988  return false;
    3989  }
    3990  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
    3991  {
    3992  return true;
    3993  }
    3994  return false;
    3995  }
    3996  };
    3997 
    3998  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    3999  BlockInfoVector m_Blocks;
    4000 
    4001  VkResult DefragmentRound(
    4002  VkDeviceSize maxBytesToMove,
    4003  uint32_t maxAllocationsToMove);
    4004 
    4005  static bool MoveMakesSense(
    4006  size_t dstBlockIndex, VkDeviceSize dstOffset,
    4007  size_t srcBlockIndex, VkDeviceSize srcOffset);
    4008 
    4009 public:
    4010  VmaDefragmentator(
    4011  VmaAllocator hAllocator,
    4012  VmaBlockVector* pBlockVector,
    4013  uint32_t currentFrameIndex);
    4014 
    4015  ~VmaDefragmentator();
    4016 
    4017  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    4018  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    4019 
    4020  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    4021 
    4022  VkResult Defragment(
    4023  VkDeviceSize maxBytesToMove,
    4024  uint32_t maxAllocationsToMove);
    4025 };
    4026 
    4027 // Main allocator object.
    4028 struct VmaAllocator_T
    4029 {
    4030  bool m_UseMutex;
    4031  bool m_UseKhrDedicatedAllocation;
    4032  VkDevice m_hDevice;
    4033  bool m_AllocationCallbacksSpecified;
    4034  VkAllocationCallbacks m_AllocationCallbacks;
    4035  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    4036 
    4037  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    4038  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    4039  VMA_MUTEX m_HeapSizeLimitMutex;
    4040 
    4041  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    4042  VkPhysicalDeviceMemoryProperties m_MemProps;
    4043 
    4044  // Default pools.
    4045  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    4046 
    4047  // Each vector is sorted by memory (handle value).
    4048  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    4049  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    4050  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    4051 
    4052  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    4053  ~VmaAllocator_T();
    4054 
    4055  const VkAllocationCallbacks* GetAllocationCallbacks() const
    4056  {
    4057  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    4058  }
    4059  const VmaVulkanFunctions& GetVulkanFunctions() const
    4060  {
    4061  return m_VulkanFunctions;
    4062  }
    4063 
    4064  VkDeviceSize GetBufferImageGranularity() const
    4065  {
    4066  return VMA_MAX(
    4067  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    4068  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    4069  }
    4070 
    4071  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    4072  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    4073 
    4074  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    4075  {
    4076  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    4077  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    4078  }
    4079 
    4080  void GetBufferMemoryRequirements(
    4081  VkBuffer hBuffer,
    4082  VkMemoryRequirements& memReq,
    4083  bool& requiresDedicatedAllocation,
    4084  bool& prefersDedicatedAllocation) const;
    4085  void GetImageMemoryRequirements(
    4086  VkImage hImage,
    4087  VkMemoryRequirements& memReq,
    4088  bool& requiresDedicatedAllocation,
    4089  bool& prefersDedicatedAllocation) const;
    4090 
    4091  // Main allocation function.
    4092  VkResult AllocateMemory(
    4093  const VkMemoryRequirements& vkMemReq,
    4094  bool requiresDedicatedAllocation,
    4095  bool prefersDedicatedAllocation,
    4096  VkBuffer dedicatedBuffer,
    4097  VkImage dedicatedImage,
    4098  const VmaAllocationCreateInfo& createInfo,
    4099  VmaSuballocationType suballocType,
    4100  VmaAllocation* pAllocation);
    4101 
    4102  // Main deallocation function.
    4103  void FreeMemory(const VmaAllocation allocation);
    4104 
    4105  void CalculateStats(VmaStats* pStats);
    4106 
    4107 #if VMA_STATS_STRING_ENABLED
    4108  void PrintDetailedMap(class VmaJsonWriter& json);
    4109 #endif
    4110 
    4111  VkResult Defragment(
    4112  VmaAllocation* pAllocations,
    4113  size_t allocationCount,
    4114  VkBool32* pAllocationsChanged,
    4115  const VmaDefragmentationInfo* pDefragmentationInfo,
    4116  VmaDefragmentationStats* pDefragmentationStats);
    4117 
    4118  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    4119  bool TouchAllocation(VmaAllocation hAllocation);
    4120 
    4121  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    4122  void DestroyPool(VmaPool pool);
    4123  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    4124 
    4125  void SetCurrentFrameIndex(uint32_t frameIndex);
    4126 
    4127  void MakePoolAllocationsLost(
    4128  VmaPool hPool,
    4129  size_t* pLostAllocationCount);
    4130 
    4131  void CreateLostAllocation(VmaAllocation* pAllocation);
    4132 
    4133  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    4134  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    4135 
    4136  VkResult Map(VmaAllocation hAllocation, void** ppData);
    4137  void Unmap(VmaAllocation hAllocation);
    4138 
    4139 private:
    4140  VkDeviceSize m_PreferredLargeHeapBlockSize;
    4141 
    4142  VkPhysicalDevice m_PhysicalDevice;
    4143  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    4144 
    4145  VMA_MUTEX m_PoolsMutex;
    4146  // Protected by m_PoolsMutex. Sorted by pointer value.
    4147  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    4148 
    4149  VmaVulkanFunctions m_VulkanFunctions;
    4150 
    4151  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    4152 
    4153  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    4154 
    4155  VkResult AllocateMemoryOfType(
    4156  const VkMemoryRequirements& vkMemReq,
    4157  bool dedicatedAllocation,
    4158  VkBuffer dedicatedBuffer,
    4159  VkImage dedicatedImage,
    4160  const VmaAllocationCreateInfo& createInfo,
    4161  uint32_t memTypeIndex,
    4162  VmaSuballocationType suballocType,
    4163  VmaAllocation* pAllocation);
    4164 
    4165  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    4166  VkResult AllocateDedicatedMemory(
    4167  VkDeviceSize size,
    4168  VmaSuballocationType suballocType,
    4169  uint32_t memTypeIndex,
    4170  bool map,
    4171  bool isUserDataString,
    4172  void* pUserData,
    4173  VkBuffer dedicatedBuffer,
    4174  VkImage dedicatedImage,
    4175  VmaAllocation* pAllocation);
    4176 
    4177  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    4178  void FreeDedicatedMemory(VmaAllocation allocation);
    4179 };
    4180 
    4182 // Memory allocation #2 after VmaAllocator_T definition
    4183 
    4184 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    4185 {
    4186  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    4187 }
    4188 
    4189 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    4190 {
    4191  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    4192 }
    4193 
    4194 template<typename T>
    4195 static T* VmaAllocate(VmaAllocator hAllocator)
    4196 {
    4197  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    4198 }
    4199 
    4200 template<typename T>
    4201 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    4202 {
    4203  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    4204 }
    4205 
    4206 template<typename T>
    4207 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    4208 {
    4209  if(ptr != VMA_NULL)
    4210  {
    4211  ptr->~T();
    4212  VmaFree(hAllocator, ptr);
    4213  }
    4214 }
    4215 
    4216 template<typename T>
    4217 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    4218 {
    4219  if(ptr != VMA_NULL)
    4220  {
    4221  for(size_t i = count; i--; )
    4222  ptr[i].~T();
    4223  VmaFree(hAllocator, ptr);
    4224  }
    4225 }
    4226 
    4228 // VmaStringBuilder
    4229 
    4230 #if VMA_STATS_STRING_ENABLED
    4231 
    4232 class VmaStringBuilder
    4233 {
    4234 public:
    4235  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    4236  size_t GetLength() const { return m_Data.size(); }
    4237  const char* GetData() const { return m_Data.data(); }
    4238 
    4239  void Add(char ch) { m_Data.push_back(ch); }
    4240  void Add(const char* pStr);
    4241  void AddNewLine() { Add('\n'); }
    4242  void AddNumber(uint32_t num);
    4243  void AddNumber(uint64_t num);
    4244  void AddPointer(const void* ptr);
    4245 
    4246 private:
    4247  VmaVector< char, VmaStlAllocator<char> > m_Data;
    4248 };
    4249 
    4250 void VmaStringBuilder::Add(const char* pStr)
    4251 {
    4252  const size_t strLen = strlen(pStr);
    4253  if(strLen > 0)
    4254  {
    4255  const size_t oldCount = m_Data.size();
    4256  m_Data.resize(oldCount + strLen);
    4257  memcpy(m_Data.data() + oldCount, pStr, strLen);
    4258  }
    4259 }
    4260 
    4261 void VmaStringBuilder::AddNumber(uint32_t num)
    4262 {
    4263  char buf[11];
    4264  VmaUint32ToStr(buf, sizeof(buf), num);
    4265  Add(buf);
    4266 }
    4267 
    4268 void VmaStringBuilder::AddNumber(uint64_t num)
    4269 {
    4270  char buf[21];
    4271  VmaUint64ToStr(buf, sizeof(buf), num);
    4272  Add(buf);
    4273 }
    4274 
    4275 void VmaStringBuilder::AddPointer(const void* ptr)
    4276 {
    4277  char buf[21];
    4278  VmaPtrToStr(buf, sizeof(buf), ptr);
    4279  Add(buf);
    4280 }
    4281 
    4282 #endif // #if VMA_STATS_STRING_ENABLED
    4283 
    4285 // VmaJsonWriter
    4286 
    4287 #if VMA_STATS_STRING_ENABLED
    4288 
    4289 class VmaJsonWriter
    4290 {
    4291 public:
    4292  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    4293  ~VmaJsonWriter();
    4294 
    4295  void BeginObject(bool singleLine = false);
    4296  void EndObject();
    4297 
    4298  void BeginArray(bool singleLine = false);
    4299  void EndArray();
    4300 
    4301  void WriteString(const char* pStr);
    4302  void BeginString(const char* pStr = VMA_NULL);
    4303  void ContinueString(const char* pStr);
    4304  void ContinueString(uint32_t n);
    4305  void ContinueString(uint64_t n);
    4306  void ContinueString_Pointer(const void* ptr);
    4307  void EndString(const char* pStr = VMA_NULL);
    4308 
    4309  void WriteNumber(uint32_t n);
    4310  void WriteNumber(uint64_t n);
    4311  void WriteBool(bool b);
    4312  void WriteNull();
    4313 
    4314 private:
    4315  static const char* const INDENT;
    4316 
    4317  enum COLLECTION_TYPE
    4318  {
    4319  COLLECTION_TYPE_OBJECT,
    4320  COLLECTION_TYPE_ARRAY,
    4321  };
    4322  struct StackItem
    4323  {
    4324  COLLECTION_TYPE type;
    4325  uint32_t valueCount;
    4326  bool singleLineMode;
    4327  };
    4328 
    4329  VmaStringBuilder& m_SB;
    4330  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    4331  bool m_InsideString;
    4332 
    4333  void BeginValue(bool isString);
    4334  void WriteIndent(bool oneLess = false);
    4335 };
    4336 
    4337 const char* const VmaJsonWriter::INDENT = " ";
    4338 
    4339 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    4340  m_SB(sb),
    4341  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    4342  m_InsideString(false)
    4343 {
    4344 }
    4345 
    4346 VmaJsonWriter::~VmaJsonWriter()
    4347 {
    4348  VMA_ASSERT(!m_InsideString);
    4349  VMA_ASSERT(m_Stack.empty());
    4350 }
    4351 
    4352 void VmaJsonWriter::BeginObject(bool singleLine)
    4353 {
    4354  VMA_ASSERT(!m_InsideString);
    4355 
    4356  BeginValue(false);
    4357  m_SB.Add('{');
    4358 
    4359  StackItem item;
    4360  item.type = COLLECTION_TYPE_OBJECT;
    4361  item.valueCount = 0;
    4362  item.singleLineMode = singleLine;
    4363  m_Stack.push_back(item);
    4364 }
    4365 
    4366 void VmaJsonWriter::EndObject()
    4367 {
    4368  VMA_ASSERT(!m_InsideString);
    4369 
    4370  WriteIndent(true);
    4371  m_SB.Add('}');
    4372 
    4373  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    4374  m_Stack.pop_back();
    4375 }
    4376 
    4377 void VmaJsonWriter::BeginArray(bool singleLine)
    4378 {
    4379  VMA_ASSERT(!m_InsideString);
    4380 
    4381  BeginValue(false);
    4382  m_SB.Add('[');
    4383 
    4384  StackItem item;
    4385  item.type = COLLECTION_TYPE_ARRAY;
    4386  item.valueCount = 0;
    4387  item.singleLineMode = singleLine;
    4388  m_Stack.push_back(item);
    4389 }
    4390 
    4391 void VmaJsonWriter::EndArray()
    4392 {
    4393  VMA_ASSERT(!m_InsideString);
    4394 
    4395  WriteIndent(true);
    4396  m_SB.Add(']');
    4397 
    4398  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    4399  m_Stack.pop_back();
    4400 }
    4401 
    4402 void VmaJsonWriter::WriteString(const char* pStr)
    4403 {
    4404  BeginString(pStr);
    4405  EndString();
    4406 }
    4407 
    4408 void VmaJsonWriter::BeginString(const char* pStr)
    4409 {
    4410  VMA_ASSERT(!m_InsideString);
    4411 
    4412  BeginValue(true);
    4413  m_SB.Add('"');
    4414  m_InsideString = true;
    4415  if(pStr != VMA_NULL && pStr[0] != '\0')
    4416  {
    4417  ContinueString(pStr);
    4418  }
    4419 }
    4420 
    4421 void VmaJsonWriter::ContinueString(const char* pStr)
    4422 {
    4423  VMA_ASSERT(m_InsideString);
    4424 
    4425  const size_t strLen = strlen(pStr);
    4426  for(size_t i = 0; i < strLen; ++i)
    4427  {
    4428  char ch = pStr[i];
    4429  if(ch == '\'')
    4430  {
    4431  m_SB.Add("\\\\");
    4432  }
    4433  else if(ch == '"')
    4434  {
    4435  m_SB.Add("\\\"");
    4436  }
    4437  else if(ch >= 32)
    4438  {
    4439  m_SB.Add(ch);
    4440  }
    4441  else switch(ch)
    4442  {
    4443  case '\b':
    4444  m_SB.Add("\\b");
    4445  break;
    4446  case '\f':
    4447  m_SB.Add("\\f");
    4448  break;
    4449  case '\n':
    4450  m_SB.Add("\\n");
    4451  break;
    4452  case '\r':
    4453  m_SB.Add("\\r");
    4454  break;
    4455  case '\t':
    4456  m_SB.Add("\\t");
    4457  break;
    4458  default:
    4459  VMA_ASSERT(0 && "Character not currently supported.");
    4460  break;
    4461  }
    4462  }
    4463 }
    4464 
    4465 void VmaJsonWriter::ContinueString(uint32_t n)
    4466 {
    4467  VMA_ASSERT(m_InsideString);
    4468  m_SB.AddNumber(n);
    4469 }
    4470 
    4471 void VmaJsonWriter::ContinueString(uint64_t n)
    4472 {
    4473  VMA_ASSERT(m_InsideString);
    4474  m_SB.AddNumber(n);
    4475 }
    4476 
    4477 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    4478 {
    4479  VMA_ASSERT(m_InsideString);
    4480  m_SB.AddPointer(ptr);
    4481 }
    4482 
    4483 void VmaJsonWriter::EndString(const char* pStr)
    4484 {
    4485  VMA_ASSERT(m_InsideString);
    4486  if(pStr != VMA_NULL && pStr[0] != '\0')
    4487  {
    4488  ContinueString(pStr);
    4489  }
    4490  m_SB.Add('"');
    4491  m_InsideString = false;
    4492 }
    4493 
    4494 void VmaJsonWriter::WriteNumber(uint32_t n)
    4495 {
    4496  VMA_ASSERT(!m_InsideString);
    4497  BeginValue(false);
    4498  m_SB.AddNumber(n);
    4499 }
    4500 
    4501 void VmaJsonWriter::WriteNumber(uint64_t n)
    4502 {
    4503  VMA_ASSERT(!m_InsideString);
    4504  BeginValue(false);
    4505  m_SB.AddNumber(n);
    4506 }
    4507 
    4508 void VmaJsonWriter::WriteBool(bool b)
    4509 {
    4510  VMA_ASSERT(!m_InsideString);
    4511  BeginValue(false);
    4512  m_SB.Add(b ? "true" : "false");
    4513 }
    4514 
    4515 void VmaJsonWriter::WriteNull()
    4516 {
    4517  VMA_ASSERT(!m_InsideString);
    4518  BeginValue(false);
    4519  m_SB.Add("null");
    4520 }
    4521 
    4522 void VmaJsonWriter::BeginValue(bool isString)
    4523 {
    4524  if(!m_Stack.empty())
    4525  {
    4526  StackItem& currItem = m_Stack.back();
    4527  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4528  currItem.valueCount % 2 == 0)
    4529  {
    4530  VMA_ASSERT(isString);
    4531  }
    4532 
    4533  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    4534  currItem.valueCount % 2 != 0)
    4535  {
    4536  m_SB.Add(": ");
    4537  }
    4538  else if(currItem.valueCount > 0)
    4539  {
    4540  m_SB.Add(", ");
    4541  WriteIndent();
    4542  }
    4543  else
    4544  {
    4545  WriteIndent();
    4546  }
    4547  ++currItem.valueCount;
    4548  }
    4549 }
    4550 
    4551 void VmaJsonWriter::WriteIndent(bool oneLess)
    4552 {
    4553  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    4554  {
    4555  m_SB.AddNewLine();
    4556 
    4557  size_t count = m_Stack.size();
    4558  if(count > 0 && oneLess)
    4559  {
    4560  --count;
    4561  }
    4562  for(size_t i = 0; i < count; ++i)
    4563  {
    4564  m_SB.Add(INDENT);
    4565  }
    4566  }
    4567 }
    4568 
    4569 #endif // #if VMA_STATS_STRING_ENABLED
    4570 
    4572 
    4573 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    4574 {
    4575  if(IsUserDataString())
    4576  {
    4577  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    4578 
    4579  FreeUserDataString(hAllocator);
    4580 
    4581  if(pUserData != VMA_NULL)
    4582  {
    4583  const char* const newStrSrc = (char*)pUserData;
    4584  const size_t newStrLen = strlen(newStrSrc);
    4585  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    4586  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    4587  m_pUserData = newStrDst;
    4588  }
    4589  }
    4590  else
    4591  {
    4592  m_pUserData = pUserData;
    4593  }
    4594 }
    4595 
    4596 void VmaAllocation_T::ChangeBlockAllocation(
    4597  VmaAllocator hAllocator,
    4598  VmaDeviceMemoryBlock* block,
    4599  VkDeviceSize offset)
    4600 {
    4601  VMA_ASSERT(block != VMA_NULL);
    4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4603 
    4604  // Move mapping reference counter from old block to new block.
    4605  if(block != m_BlockAllocation.m_Block)
    4606  {
    4607  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    4608  if(IsPersistentMap())
    4609  ++mapRefCount;
    4610  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    4611  block->Map(hAllocator, mapRefCount, VMA_NULL);
    4612  }
    4613 
    4614  m_BlockAllocation.m_Block = block;
    4615  m_BlockAllocation.m_Offset = offset;
    4616 }
    4617 
    4618 VkDeviceSize VmaAllocation_T::GetOffset() const
    4619 {
    4620  switch(m_Type)
    4621  {
    4622  case ALLOCATION_TYPE_BLOCK:
    4623  return m_BlockAllocation.m_Offset;
    4624  case ALLOCATION_TYPE_DEDICATED:
    4625  return 0;
    4626  default:
    4627  VMA_ASSERT(0);
    4628  return 0;
    4629  }
    4630 }
    4631 
    4632 VkDeviceMemory VmaAllocation_T::GetMemory() const
    4633 {
    4634  switch(m_Type)
    4635  {
    4636  case ALLOCATION_TYPE_BLOCK:
    4637  return m_BlockAllocation.m_Block->m_hMemory;
    4638  case ALLOCATION_TYPE_DEDICATED:
    4639  return m_DedicatedAllocation.m_hMemory;
    4640  default:
    4641  VMA_ASSERT(0);
    4642  return VK_NULL_HANDLE;
    4643  }
    4644 }
    4645 
    4646 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    4647 {
    4648  switch(m_Type)
    4649  {
    4650  case ALLOCATION_TYPE_BLOCK:
    4651  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
    4652  case ALLOCATION_TYPE_DEDICATED:
    4653  return m_DedicatedAllocation.m_MemoryTypeIndex;
    4654  default:
    4655  VMA_ASSERT(0);
    4656  return UINT32_MAX;
    4657  }
    4658 }
    4659 
    4660 void* VmaAllocation_T::GetMappedData() const
    4661 {
    4662  switch(m_Type)
    4663  {
    4664  case ALLOCATION_TYPE_BLOCK:
    4665  if(m_MapCount != 0)
    4666  {
    4667  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
    4668  VMA_ASSERT(pBlockData != VMA_NULL);
    4669  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    4670  }
    4671  else
    4672  {
    4673  return VMA_NULL;
    4674  }
    4675  break;
    4676  case ALLOCATION_TYPE_DEDICATED:
    4677  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    4678  return m_DedicatedAllocation.m_pMappedData;
    4679  default:
    4680  VMA_ASSERT(0);
    4681  return VMA_NULL;
    4682  }
    4683 }
    4684 
    4685 bool VmaAllocation_T::CanBecomeLost() const
    4686 {
    4687  switch(m_Type)
    4688  {
    4689  case ALLOCATION_TYPE_BLOCK:
    4690  return m_BlockAllocation.m_CanBecomeLost;
    4691  case ALLOCATION_TYPE_DEDICATED:
    4692  return false;
    4693  default:
    4694  VMA_ASSERT(0);
    4695  return false;
    4696  }
    4697 }
    4698 
    4699 VmaPool VmaAllocation_T::GetPool() const
    4700 {
    4701  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4702  return m_BlockAllocation.m_hPool;
    4703 }
    4704 
    4705 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    4706 {
    4707  VMA_ASSERT(CanBecomeLost());
    4708 
    4709  /*
    4710  Warning: This is a carefully designed algorithm.
    4711  Do not modify unless you really know what you're doing :)
    4712  */
    4713  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    4714  for(;;)
    4715  {
    4716  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    4717  {
    4718  VMA_ASSERT(0);
    4719  return false;
    4720  }
    4721  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    4722  {
    4723  return false;
    4724  }
    4725  else // Last use time earlier than current time.
    4726  {
    4727  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    4728  {
    4729  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    4730  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    4731  return true;
    4732  }
    4733  }
    4734  }
    4735 }
    4736 
    4737 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    4738 {
    4739  VMA_ASSERT(IsUserDataString());
    4740  if(m_pUserData != VMA_NULL)
    4741  {
    4742  char* const oldStr = (char*)m_pUserData;
    4743  const size_t oldStrLen = strlen(oldStr);
    4744  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    4745  m_pUserData = VMA_NULL;
    4746  }
    4747 }
    4748 
    4749 void VmaAllocation_T::BlockAllocMap()
    4750 {
    4751  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    4752 
    4753  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    4754  {
    4755  ++m_MapCount;
    4756  }
    4757  else
    4758  {
    4759  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    4760  }
    4761 }
    4762 
    4763 void VmaAllocation_T::BlockAllocUnmap()
    4764 {
    4765  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    4766 
    4767  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    4768  {
    4769  --m_MapCount;
    4770  }
    4771  else
    4772  {
    4773  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    4774  }
    4775 }
    4776 
    4777 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    4778 {
    4779  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    4780 
    4781  if(m_MapCount != 0)
    4782  {
    4783  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    4784  {
    4785  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    4786  *ppData = m_DedicatedAllocation.m_pMappedData;
    4787  ++m_MapCount;
    4788  return VK_SUCCESS;
    4789  }
    4790  else
    4791  {
    4792  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    4793  return VK_ERROR_MEMORY_MAP_FAILED;
    4794  }
    4795  }
    4796  else
    4797  {
    4798  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    4799  hAllocator->m_hDevice,
    4800  m_DedicatedAllocation.m_hMemory,
    4801  0, // offset
    4802  VK_WHOLE_SIZE,
    4803  0, // flags
    4804  ppData);
    4805  if(result == VK_SUCCESS)
    4806  {
    4807  m_DedicatedAllocation.m_pMappedData = *ppData;
    4808  m_MapCount = 1;
    4809  }
    4810  return result;
    4811  }
    4812 }
    4813 
    4814 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    4815 {
    4816  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    4817 
    4818  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    4819  {
    4820  --m_MapCount;
    4821  if(m_MapCount == 0)
    4822  {
    4823  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    4824  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    4825  hAllocator->m_hDevice,
    4826  m_DedicatedAllocation.m_hMemory);
    4827  }
    4828  }
    4829  else
    4830  {
    4831  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    4832  }
    4833 }
    4834 
    4835 #if VMA_STATS_STRING_ENABLED
    4836 
    4837 // Correspond to values of enum VmaSuballocationType.
    4838 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    4839  "FREE",
    4840  "UNKNOWN",
    4841  "BUFFER",
    4842  "IMAGE_UNKNOWN",
    4843  "IMAGE_LINEAR",
    4844  "IMAGE_OPTIMAL",
    4845 };
    4846 
    4847 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    4848 {
    4849  json.BeginObject();
    4850 
    4851  json.WriteString("Blocks");
    4852  json.WriteNumber(stat.blockCount);
    4853 
    4854  json.WriteString("Allocations");
    4855  json.WriteNumber(stat.allocationCount);
    4856 
    4857  json.WriteString("UnusedRanges");
    4858  json.WriteNumber(stat.unusedRangeCount);
    4859 
    4860  json.WriteString("UsedBytes");
    4861  json.WriteNumber(stat.usedBytes);
    4862 
    4863  json.WriteString("UnusedBytes");
    4864  json.WriteNumber(stat.unusedBytes);
    4865 
    4866  if(stat.allocationCount > 1)
    4867  {
    4868  json.WriteString("AllocationSize");
    4869  json.BeginObject(true);
    4870  json.WriteString("Min");
    4871  json.WriteNumber(stat.allocationSizeMin);
    4872  json.WriteString("Avg");
    4873  json.WriteNumber(stat.allocationSizeAvg);
    4874  json.WriteString("Max");
    4875  json.WriteNumber(stat.allocationSizeMax);
    4876  json.EndObject();
    4877  }
    4878 
    4879  if(stat.unusedRangeCount > 1)
    4880  {
    4881  json.WriteString("UnusedRangeSize");
    4882  json.BeginObject(true);
    4883  json.WriteString("Min");
    4884  json.WriteNumber(stat.unusedRangeSizeMin);
    4885  json.WriteString("Avg");
    4886  json.WriteNumber(stat.unusedRangeSizeAvg);
    4887  json.WriteString("Max");
    4888  json.WriteNumber(stat.unusedRangeSizeMax);
    4889  json.EndObject();
    4890  }
    4891 
    4892  json.EndObject();
    4893 }
    4894 
    4895 #endif // #if VMA_STATS_STRING_ENABLED
    4896 
    4897 struct VmaSuballocationItemSizeLess
    4898 {
    4899  bool operator()(
    4900  const VmaSuballocationList::iterator lhs,
    4901  const VmaSuballocationList::iterator rhs) const
    4902  {
    4903  return lhs->size < rhs->size;
    4904  }
    4905  bool operator()(
    4906  const VmaSuballocationList::iterator lhs,
    4907  VkDeviceSize rhsSize) const
    4908  {
    4909  return lhs->size < rhsSize;
    4910  }
    4911 };
    4912 
    4914 // class VmaBlockMetadata
    4915 
    4916 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    4917  m_Size(0),
    4918  m_FreeCount(0),
    4919  m_SumFreeSize(0),
    4920  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    4921  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    4922 {
    4923 }
    4924 
    4925 VmaBlockMetadata::~VmaBlockMetadata()
    4926 {
    4927 }
    4928 
    4929 void VmaBlockMetadata::Init(VkDeviceSize size)
    4930 {
    4931  m_Size = size;
    4932  m_FreeCount = 1;
    4933  m_SumFreeSize = size;
    4934 
    4935  VmaSuballocation suballoc = {};
    4936  suballoc.offset = 0;
    4937  suballoc.size = size;
    4938  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    4939  suballoc.hAllocation = VK_NULL_HANDLE;
    4940 
    4941  m_Suballocations.push_back(suballoc);
    4942  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    4943  --suballocItem;
    4944  m_FreeSuballocationsBySize.push_back(suballocItem);
    4945 }
    4946 
    4947 bool VmaBlockMetadata::Validate() const
    4948 {
    4949  if(m_Suballocations.empty())
    4950  {
    4951  return false;
    4952  }
    4953 
    4954  // Expected offset of new suballocation as calculates from previous ones.
    4955  VkDeviceSize calculatedOffset = 0;
    4956  // Expected number of free suballocations as calculated from traversing their list.
    4957  uint32_t calculatedFreeCount = 0;
    4958  // Expected sum size of free suballocations as calculated from traversing their list.
    4959  VkDeviceSize calculatedSumFreeSize = 0;
    4960  // Expected number of free suballocations that should be registered in
    4961  // m_FreeSuballocationsBySize calculated from traversing their list.
    4962  size_t freeSuballocationsToRegister = 0;
    4963  // True if previous visisted suballocation was free.
    4964  bool prevFree = false;
    4965 
    4966  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    4967  suballocItem != m_Suballocations.cend();
    4968  ++suballocItem)
    4969  {
    4970  const VmaSuballocation& subAlloc = *suballocItem;
    4971 
    4972  // Actual offset of this suballocation doesn't match expected one.
    4973  if(subAlloc.offset != calculatedOffset)
    4974  {
    4975  return false;
    4976  }
    4977 
    4978  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    4979  // Two adjacent free suballocations are invalid. They should be merged.
    4980  if(prevFree && currFree)
    4981  {
    4982  return false;
    4983  }
    4984 
    4985  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
    4986  {
    4987  return false;
    4988  }
    4989 
    4990  if(currFree)
    4991  {
    4992  calculatedSumFreeSize += subAlloc.size;
    4993  ++calculatedFreeCount;
    4994  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    4995  {
    4996  ++freeSuballocationsToRegister;
    4997  }
    4998  }
    4999  else
    5000  {
    5001  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
    5002  {
    5003  return false;
    5004  }
    5005  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
    5006  {
    5007  return false;
    5008  }
    5009  }
    5010 
    5011  calculatedOffset += subAlloc.size;
    5012  prevFree = currFree;
    5013  }
    5014 
    5015  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    5016  // match expected one.
    5017  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
    5018  {
    5019  return false;
    5020  }
    5021 
    5022  VkDeviceSize lastSize = 0;
    5023  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    5024  {
    5025  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    5026 
    5027  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    5028  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    5029  {
    5030  return false;
    5031  }
    5032  // They must be sorted by size ascending.
    5033  if(suballocItem->size < lastSize)
    5034  {
    5035  return false;
    5036  }
    5037 
    5038  lastSize = suballocItem->size;
    5039  }
    5040 
    5041  // Check if totals match calculacted values.
    5042  if(!ValidateFreeSuballocationList() ||
    5043  (calculatedOffset != m_Size) ||
    5044  (calculatedSumFreeSize != m_SumFreeSize) ||
    5045  (calculatedFreeCount != m_FreeCount))
    5046  {
    5047  return false;
    5048  }
    5049 
    5050  return true;
    5051 }
    5052 
    5053 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
    5054 {
    5055  if(!m_FreeSuballocationsBySize.empty())
    5056  {
    5057  return m_FreeSuballocationsBySize.back()->size;
    5058  }
    5059  else
    5060  {
    5061  return 0;
    5062  }
    5063 }
    5064 
    5065 bool VmaBlockMetadata::IsEmpty() const
    5066 {
    5067  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    5068 }
    5069 
    5070 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    5071 {
    5072  outInfo.blockCount = 1;
    5073 
    5074  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5075  outInfo.allocationCount = rangeCount - m_FreeCount;
    5076  outInfo.unusedRangeCount = m_FreeCount;
    5077 
    5078  outInfo.unusedBytes = m_SumFreeSize;
    5079  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
    5080 
    5081  outInfo.allocationSizeMin = UINT64_MAX;
    5082  outInfo.allocationSizeMax = 0;
    5083  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5084  outInfo.unusedRangeSizeMax = 0;
    5085 
    5086  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5087  suballocItem != m_Suballocations.cend();
    5088  ++suballocItem)
    5089  {
    5090  const VmaSuballocation& suballoc = *suballocItem;
    5091  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    5092  {
    5093  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    5094  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    5095  }
    5096  else
    5097  {
    5098  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    5099  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    5100  }
    5101  }
    5102 }
    5103 
    5104 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
    5105 {
    5106  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    5107 
    5108  inoutStats.size += m_Size;
    5109  inoutStats.unusedSize += m_SumFreeSize;
    5110  inoutStats.allocationCount += rangeCount - m_FreeCount;
    5111  inoutStats.unusedRangeCount += m_FreeCount;
    5112  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    5113 }
    5114 
    5115 #if VMA_STATS_STRING_ENABLED
    5116 
    5117 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
    5118 {
    5119  json.BeginObject();
    5120 
    5121  json.WriteString("TotalBytes");
    5122  json.WriteNumber(m_Size);
    5123 
    5124  json.WriteString("UnusedBytes");
    5125  json.WriteNumber(m_SumFreeSize);
    5126 
    5127  json.WriteString("Allocations");
    5128  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
    5129 
    5130  json.WriteString("UnusedRanges");
    5131  json.WriteNumber(m_FreeCount);
    5132 
    5133  json.WriteString("Suballocations");
    5134  json.BeginArray();
    5135  size_t i = 0;
    5136  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    5137  suballocItem != m_Suballocations.cend();
    5138  ++suballocItem, ++i)
    5139  {
    5140  json.BeginObject(true);
    5141 
    5142  json.WriteString("Type");
    5143  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
    5144 
    5145  json.WriteString("Size");
    5146  json.WriteNumber(suballocItem->size);
    5147 
    5148  json.WriteString("Offset");
    5149  json.WriteNumber(suballocItem->offset);
    5150 
    5151  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
    5152  {
    5153  const void* pUserData = suballocItem->hAllocation->GetUserData();
    5154  if(pUserData != VMA_NULL)
    5155  {
    5156  json.WriteString("UserData");
    5157  if(suballocItem->hAllocation->IsUserDataString())
    5158  {
    5159  json.WriteString((const char*)pUserData);
    5160  }
    5161  else
    5162  {
    5163  json.BeginString();
    5164  json.ContinueString_Pointer(pUserData);
    5165  json.EndString();
    5166  }
    5167  }
    5168  }
    5169 
    5170  json.EndObject();
    5171  }
    5172  json.EndArray();
    5173 
    5174  json.EndObject();
    5175 }
    5176 
    5177 #endif // #if VMA_STATS_STRING_ENABLED
    5178 
    5179 /*
    5180 How many suitable free suballocations to analyze before choosing best one.
    5181 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
    5182  be chosen.
    5183 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
    5184  suballocations will be analized and best one will be chosen.
    5185 - Any other value is also acceptable.
    5186 */
    5187 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
    5188 
    5189 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
    5190 {
    5191  VMA_ASSERT(IsEmpty());
    5192  pAllocationRequest->offset = 0;
    5193  pAllocationRequest->sumFreeSize = m_SumFreeSize;
    5194  pAllocationRequest->sumItemSize = 0;
    5195  pAllocationRequest->item = m_Suballocations.begin();
    5196  pAllocationRequest->itemsToMakeLostCount = 0;
    5197 }
    5198 
    5199 bool VmaBlockMetadata::CreateAllocationRequest(
    5200  uint32_t currentFrameIndex,
    5201  uint32_t frameInUseCount,
    5202  VkDeviceSize bufferImageGranularity,
    5203  VkDeviceSize allocSize,
    5204  VkDeviceSize allocAlignment,
    5205  VmaSuballocationType allocType,
    5206  bool canMakeOtherLost,
    5207  VmaAllocationRequest* pAllocationRequest)
    5208 {
    5209  VMA_ASSERT(allocSize > 0);
    5210  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5211  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    5212  VMA_HEAVY_ASSERT(Validate());
    5213 
    5214  // There is not enough total free space in this block to fullfill the request: Early return.
    5215  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
    5216  {
    5217  return false;
    5218  }
    5219 
    5220  // New algorithm, efficiently searching freeSuballocationsBySize.
    5221  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    5222  if(freeSuballocCount > 0)
    5223  {
    5224  if(VMA_BEST_FIT)
    5225  {
    5226  // Find first free suballocation with size not less than allocSize.
    5227  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5228  m_FreeSuballocationsBySize.data(),
    5229  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    5230  allocSize,
    5231  VmaSuballocationItemSizeLess());
    5232  size_t index = it - m_FreeSuballocationsBySize.data();
    5233  for(; index < freeSuballocCount; ++index)
    5234  {
    5235  if(CheckAllocation(
    5236  currentFrameIndex,
    5237  frameInUseCount,
    5238  bufferImageGranularity,
    5239  allocSize,
    5240  allocAlignment,
    5241  allocType,
    5242  m_FreeSuballocationsBySize[index],
    5243  false, // canMakeOtherLost
    5244  &pAllocationRequest->offset,
    5245  &pAllocationRequest->itemsToMakeLostCount,
    5246  &pAllocationRequest->sumFreeSize,
    5247  &pAllocationRequest->sumItemSize))
    5248  {
    5249  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5250  return true;
    5251  }
    5252  }
    5253  }
    5254  else
    5255  {
    5256  // Search staring from biggest suballocations.
    5257  for(size_t index = freeSuballocCount; index--; )
    5258  {
    5259  if(CheckAllocation(
    5260  currentFrameIndex,
    5261  frameInUseCount,
    5262  bufferImageGranularity,
    5263  allocSize,
    5264  allocAlignment,
    5265  allocType,
    5266  m_FreeSuballocationsBySize[index],
    5267  false, // canMakeOtherLost
    5268  &pAllocationRequest->offset,
    5269  &pAllocationRequest->itemsToMakeLostCount,
    5270  &pAllocationRequest->sumFreeSize,
    5271  &pAllocationRequest->sumItemSize))
    5272  {
    5273  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    5274  return true;
    5275  }
    5276  }
    5277  }
    5278  }
    5279 
    5280  if(canMakeOtherLost)
    5281  {
    5282  // Brute-force algorithm. TODO: Come up with something better.
    5283 
    5284  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    5285  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    5286 
    5287  VmaAllocationRequest tmpAllocRequest = {};
    5288  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    5289  suballocIt != m_Suballocations.end();
    5290  ++suballocIt)
    5291  {
    5292  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    5293  suballocIt->hAllocation->CanBecomeLost())
    5294  {
    5295  if(CheckAllocation(
    5296  currentFrameIndex,
    5297  frameInUseCount,
    5298  bufferImageGranularity,
    5299  allocSize,
    5300  allocAlignment,
    5301  allocType,
    5302  suballocIt,
    5303  canMakeOtherLost,
    5304  &tmpAllocRequest.offset,
    5305  &tmpAllocRequest.itemsToMakeLostCount,
    5306  &tmpAllocRequest.sumFreeSize,
    5307  &tmpAllocRequest.sumItemSize))
    5308  {
    5309  tmpAllocRequest.item = suballocIt;
    5310 
    5311  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    5312  {
    5313  *pAllocationRequest = tmpAllocRequest;
    5314  }
    5315  }
    5316  }
    5317  }
    5318 
    5319  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    5320  {
    5321  return true;
    5322  }
    5323  }
    5324 
    5325  return false;
    5326 }
    5327 
    5328 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
    5329  uint32_t currentFrameIndex,
    5330  uint32_t frameInUseCount,
    5331  VmaAllocationRequest* pAllocationRequest)
    5332 {
    5333  while(pAllocationRequest->itemsToMakeLostCount > 0)
    5334  {
    5335  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    5336  {
    5337  ++pAllocationRequest->item;
    5338  }
    5339  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5340  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    5341  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    5342  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5343  {
    5344  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    5345  --pAllocationRequest->itemsToMakeLostCount;
    5346  }
    5347  else
    5348  {
    5349  return false;
    5350  }
    5351  }
    5352 
    5353  VMA_HEAVY_ASSERT(Validate());
    5354  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    5355  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5356 
    5357  return true;
    5358 }
    5359 
    5360 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    5361 {
    5362  uint32_t lostAllocationCount = 0;
    5363  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    5364  it != m_Suballocations.end();
    5365  ++it)
    5366  {
    5367  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    5368  it->hAllocation->CanBecomeLost() &&
    5369  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    5370  {
    5371  it = FreeSuballocation(it);
    5372  ++lostAllocationCount;
    5373  }
    5374  }
    5375  return lostAllocationCount;
    5376 }
    5377 
    5378 void VmaBlockMetadata::Alloc(
    5379  const VmaAllocationRequest& request,
    5380  VmaSuballocationType type,
    5381  VkDeviceSize allocSize,
    5382  VmaAllocation hAllocation)
    5383 {
    5384  VMA_ASSERT(request.item != m_Suballocations.end());
    5385  VmaSuballocation& suballoc = *request.item;
    5386  // Given suballocation is a free block.
    5387  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5388  // Given offset is inside this suballocation.
    5389  VMA_ASSERT(request.offset >= suballoc.offset);
    5390  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    5391  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    5392  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    5393 
    5394  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    5395  // it to become used.
    5396  UnregisterFreeSuballocation(request.item);
    5397 
    5398  suballoc.offset = request.offset;
    5399  suballoc.size = allocSize;
    5400  suballoc.type = type;
    5401  suballoc.hAllocation = hAllocation;
    5402 
    5403  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    5404  if(paddingEnd)
    5405  {
    5406  VmaSuballocation paddingSuballoc = {};
    5407  paddingSuballoc.offset = request.offset + allocSize;
    5408  paddingSuballoc.size = paddingEnd;
    5409  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5410  VmaSuballocationList::iterator next = request.item;
    5411  ++next;
    5412  const VmaSuballocationList::iterator paddingEndItem =
    5413  m_Suballocations.insert(next, paddingSuballoc);
    5414  RegisterFreeSuballocation(paddingEndItem);
    5415  }
    5416 
    5417  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    5418  if(paddingBegin)
    5419  {
    5420  VmaSuballocation paddingSuballoc = {};
    5421  paddingSuballoc.offset = request.offset - paddingBegin;
    5422  paddingSuballoc.size = paddingBegin;
    5423  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5424  const VmaSuballocationList::iterator paddingBeginItem =
    5425  m_Suballocations.insert(request.item, paddingSuballoc);
    5426  RegisterFreeSuballocation(paddingBeginItem);
    5427  }
    5428 
    5429  // Update totals.
    5430  m_FreeCount = m_FreeCount - 1;
    5431  if(paddingBegin > 0)
    5432  {
    5433  ++m_FreeCount;
    5434  }
    5435  if(paddingEnd > 0)
    5436  {
    5437  ++m_FreeCount;
    5438  }
    5439  m_SumFreeSize -= allocSize;
    5440 }
    5441 
    5442 void VmaBlockMetadata::Free(const VmaAllocation allocation)
    5443 {
    5444  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5445  suballocItem != m_Suballocations.end();
    5446  ++suballocItem)
    5447  {
    5448  VmaSuballocation& suballoc = *suballocItem;
    5449  if(suballoc.hAllocation == allocation)
    5450  {
    5451  FreeSuballocation(suballocItem);
    5452  VMA_HEAVY_ASSERT(Validate());
    5453  return;
    5454  }
    5455  }
    5456  VMA_ASSERT(0 && "Not found!");
    5457 }
    5458 
    5459 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
    5460 {
    5461  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    5462  suballocItem != m_Suballocations.end();
    5463  ++suballocItem)
    5464  {
    5465  VmaSuballocation& suballoc = *suballocItem;
    5466  if(suballoc.offset == offset)
    5467  {
    5468  FreeSuballocation(suballocItem);
    5469  return;
    5470  }
    5471  }
    5472  VMA_ASSERT(0 && "Not found!");
    5473 }
    5474 
    5475 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
    5476 {
    5477  VkDeviceSize lastSize = 0;
    5478  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    5479  {
    5480  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    5481 
    5482  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    5483  {
    5484  VMA_ASSERT(0);
    5485  return false;
    5486  }
    5487  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5488  {
    5489  VMA_ASSERT(0);
    5490  return false;
    5491  }
    5492  if(it->size < lastSize)
    5493  {
    5494  VMA_ASSERT(0);
    5495  return false;
    5496  }
    5497 
    5498  lastSize = it->size;
    5499  }
    5500  return true;
    5501 }
    5502 
    5503 bool VmaBlockMetadata::CheckAllocation(
    5504  uint32_t currentFrameIndex,
    5505  uint32_t frameInUseCount,
    5506  VkDeviceSize bufferImageGranularity,
    5507  VkDeviceSize allocSize,
    5508  VkDeviceSize allocAlignment,
    5509  VmaSuballocationType allocType,
    5510  VmaSuballocationList::const_iterator suballocItem,
    5511  bool canMakeOtherLost,
    5512  VkDeviceSize* pOffset,
    5513  size_t* itemsToMakeLostCount,
    5514  VkDeviceSize* pSumFreeSize,
    5515  VkDeviceSize* pSumItemSize) const
    5516 {
    5517  VMA_ASSERT(allocSize > 0);
    5518  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    5519  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    5520  VMA_ASSERT(pOffset != VMA_NULL);
    5521 
    5522  *itemsToMakeLostCount = 0;
    5523  *pSumFreeSize = 0;
    5524  *pSumItemSize = 0;
    5525 
    5526  if(canMakeOtherLost)
    5527  {
    5528  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5529  {
    5530  *pSumFreeSize = suballocItem->size;
    5531  }
    5532  else
    5533  {
    5534  if(suballocItem->hAllocation->CanBecomeLost() &&
    5535  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5536  {
    5537  ++*itemsToMakeLostCount;
    5538  *pSumItemSize = suballocItem->size;
    5539  }
    5540  else
    5541  {
    5542  return false;
    5543  }
    5544  }
    5545 
    5546  // Remaining size is too small for this request: Early return.
    5547  if(m_Size - suballocItem->offset < allocSize)
    5548  {
    5549  return false;
    5550  }
    5551 
    5552  // Start from offset equal to beginning of this suballocation.
    5553  *pOffset = suballocItem->offset;
    5554 
    5555  // Apply VMA_DEBUG_MARGIN at the beginning.
    5556  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5557  {
    5558  *pOffset += VMA_DEBUG_MARGIN;
    5559  }
    5560 
    5561  // Apply alignment.
    5562  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5563  *pOffset = VmaAlignUp(*pOffset, alignment);
    5564 
    5565  // Check previous suballocations for BufferImageGranularity conflicts.
    5566  // Make bigger alignment if necessary.
    5567  if(bufferImageGranularity > 1)
    5568  {
    5569  bool bufferImageGranularityConflict = false;
    5570  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5571  while(prevSuballocItem != m_Suballocations.cbegin())
    5572  {
    5573  --prevSuballocItem;
    5574  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5575  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5576  {
    5577  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5578  {
    5579  bufferImageGranularityConflict = true;
    5580  break;
    5581  }
    5582  }
    5583  else
    5584  // Already on previous page.
    5585  break;
    5586  }
    5587  if(bufferImageGranularityConflict)
    5588  {
    5589  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5590  }
    5591  }
    5592 
    5593  // Now that we have final *pOffset, check if we are past suballocItem.
    5594  // If yes, return false - this function should be called for another suballocItem as starting point.
    5595  if(*pOffset >= suballocItem->offset + suballocItem->size)
    5596  {
    5597  return false;
    5598  }
    5599 
    5600  // Calculate padding at the beginning based on current offset.
    5601  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    5602 
    5603  // Calculate required margin at the end if this is not last suballocation.
    5604  VmaSuballocationList::const_iterator next = suballocItem;
    5605  ++next;
    5606  const VkDeviceSize requiredEndMargin =
    5607  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5608 
    5609  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    5610  // Another early return check.
    5611  if(suballocItem->offset + totalSize > m_Size)
    5612  {
    5613  return false;
    5614  }
    5615 
    5616  // Advance lastSuballocItem until desired size is reached.
    5617  // Update itemsToMakeLostCount.
    5618  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    5619  if(totalSize > suballocItem->size)
    5620  {
    5621  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    5622  while(remainingSize > 0)
    5623  {
    5624  ++lastSuballocItem;
    5625  if(lastSuballocItem == m_Suballocations.cend())
    5626  {
    5627  return false;
    5628  }
    5629  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5630  {
    5631  *pSumFreeSize += lastSuballocItem->size;
    5632  }
    5633  else
    5634  {
    5635  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    5636  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    5637  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5638  {
    5639  ++*itemsToMakeLostCount;
    5640  *pSumItemSize += lastSuballocItem->size;
    5641  }
    5642  else
    5643  {
    5644  return false;
    5645  }
    5646  }
    5647  remainingSize = (lastSuballocItem->size < remainingSize) ?
    5648  remainingSize - lastSuballocItem->size : 0;
    5649  }
    5650  }
    5651 
    5652  // Check next suballocations for BufferImageGranularity conflicts.
    5653  // If conflict exists, we must mark more allocations lost or fail.
    5654  if(bufferImageGranularity > 1)
    5655  {
    5656  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    5657  ++nextSuballocItem;
    5658  while(nextSuballocItem != m_Suballocations.cend())
    5659  {
    5660  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5661  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5662  {
    5663  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5664  {
    5665  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    5666  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    5667  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    5668  {
    5669  ++*itemsToMakeLostCount;
    5670  }
    5671  else
    5672  {
    5673  return false;
    5674  }
    5675  }
    5676  }
    5677  else
    5678  {
    5679  // Already on next page.
    5680  break;
    5681  }
    5682  ++nextSuballocItem;
    5683  }
    5684  }
    5685  }
    5686  else
    5687  {
    5688  const VmaSuballocation& suballoc = *suballocItem;
    5689  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    5690 
    5691  *pSumFreeSize = suballoc.size;
    5692 
    5693  // Size of this suballocation is too small for this request: Early return.
    5694  if(suballoc.size < allocSize)
    5695  {
    5696  return false;
    5697  }
    5698 
    5699  // Start from offset equal to beginning of this suballocation.
    5700  *pOffset = suballoc.offset;
    5701 
    5702  // Apply VMA_DEBUG_MARGIN at the beginning.
    5703  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
    5704  {
    5705  *pOffset += VMA_DEBUG_MARGIN;
    5706  }
    5707 
    5708  // Apply alignment.
    5709  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
    5710  *pOffset = VmaAlignUp(*pOffset, alignment);
    5711 
    5712  // Check previous suballocations for BufferImageGranularity conflicts.
    5713  // Make bigger alignment if necessary.
    5714  if(bufferImageGranularity > 1)
    5715  {
    5716  bool bufferImageGranularityConflict = false;
    5717  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    5718  while(prevSuballocItem != m_Suballocations.cbegin())
    5719  {
    5720  --prevSuballocItem;
    5721  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    5722  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    5723  {
    5724  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    5725  {
    5726  bufferImageGranularityConflict = true;
    5727  break;
    5728  }
    5729  }
    5730  else
    5731  // Already on previous page.
    5732  break;
    5733  }
    5734  if(bufferImageGranularityConflict)
    5735  {
    5736  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    5737  }
    5738  }
    5739 
    5740  // Calculate padding at the beginning based on current offset.
    5741  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    5742 
    5743  // Calculate required margin at the end if this is not last suballocation.
    5744  VmaSuballocationList::const_iterator next = suballocItem;
    5745  ++next;
    5746  const VkDeviceSize requiredEndMargin =
    5747  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
    5748 
    5749  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    5750  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    5751  {
    5752  return false;
    5753  }
    5754 
    5755  // Check next suballocations for BufferImageGranularity conflicts.
    5756  // If conflict exists, allocation cannot be made here.
    5757  if(bufferImageGranularity > 1)
    5758  {
    5759  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    5760  ++nextSuballocItem;
    5761  while(nextSuballocItem != m_Suballocations.cend())
    5762  {
    5763  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    5764  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    5765  {
    5766  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    5767  {
    5768  return false;
    5769  }
    5770  }
    5771  else
    5772  {
    5773  // Already on next page.
    5774  break;
    5775  }
    5776  ++nextSuballocItem;
    5777  }
    5778  }
    5779  }
    5780 
    5781  // All tests passed: Success. pOffset is already filled.
    5782  return true;
    5783 }
    5784 
    5785 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
    5786 {
    5787  VMA_ASSERT(item != m_Suballocations.end());
    5788  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5789 
    5790  VmaSuballocationList::iterator nextItem = item;
    5791  ++nextItem;
    5792  VMA_ASSERT(nextItem != m_Suballocations.end());
    5793  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    5794 
    5795  item->size += nextItem->size;
    5796  --m_FreeCount;
    5797  m_Suballocations.erase(nextItem);
    5798 }
    5799 
    5800 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    5801 {
    5802  // Change this suballocation to be marked as free.
    5803  VmaSuballocation& suballoc = *suballocItem;
    5804  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    5805  suballoc.hAllocation = VK_NULL_HANDLE;
    5806 
    5807  // Update totals.
    5808  ++m_FreeCount;
    5809  m_SumFreeSize += suballoc.size;
    5810 
    5811  // Merge with previous and/or next suballocation if it's also free.
    5812  bool mergeWithNext = false;
    5813  bool mergeWithPrev = false;
    5814 
    5815  VmaSuballocationList::iterator nextItem = suballocItem;
    5816  ++nextItem;
    5817  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    5818  {
    5819  mergeWithNext = true;
    5820  }
    5821 
    5822  VmaSuballocationList::iterator prevItem = suballocItem;
    5823  if(suballocItem != m_Suballocations.begin())
    5824  {
    5825  --prevItem;
    5826  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    5827  {
    5828  mergeWithPrev = true;
    5829  }
    5830  }
    5831 
    5832  if(mergeWithNext)
    5833  {
    5834  UnregisterFreeSuballocation(nextItem);
    5835  MergeFreeWithNext(suballocItem);
    5836  }
    5837 
    5838  if(mergeWithPrev)
    5839  {
    5840  UnregisterFreeSuballocation(prevItem);
    5841  MergeFreeWithNext(prevItem);
    5842  RegisterFreeSuballocation(prevItem);
    5843  return prevItem;
    5844  }
    5845  else
    5846  {
    5847  RegisterFreeSuballocation(suballocItem);
    5848  return suballocItem;
    5849  }
    5850 }
    5851 
    5852 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    5853 {
    5854  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5855  VMA_ASSERT(item->size > 0);
    5856 
    5857  // You may want to enable this validation at the beginning or at the end of
    5858  // this function, depending on what do you want to check.
    5859  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5860 
    5861  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5862  {
    5863  if(m_FreeSuballocationsBySize.empty())
    5864  {
    5865  m_FreeSuballocationsBySize.push_back(item);
    5866  }
    5867  else
    5868  {
    5869  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    5870  }
    5871  }
    5872 
    5873  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5874 }
    5875 
    5876 
    5877 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    5878 {
    5879  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    5880  VMA_ASSERT(item->size > 0);
    5881 
    5882  // You may want to enable this validation at the beginning or at the end of
    5883  // this function, depending on what do you want to check.
    5884  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5885 
    5886  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    5887  {
    5888  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    5889  m_FreeSuballocationsBySize.data(),
    5890  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    5891  item,
    5892  VmaSuballocationItemSizeLess());
    5893  for(size_t index = it - m_FreeSuballocationsBySize.data();
    5894  index < m_FreeSuballocationsBySize.size();
    5895  ++index)
    5896  {
    5897  if(m_FreeSuballocationsBySize[index] == item)
    5898  {
    5899  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    5900  return;
    5901  }
    5902  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    5903  }
    5904  VMA_ASSERT(0 && "Not found.");
    5905  }
    5906 
    5907  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    5908 }
    5909 
    5911 // class VmaDeviceMemoryMapping
    5912 
    5913 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
    5914  m_MapCount(0),
    5915  m_pMappedData(VMA_NULL)
    5916 {
    5917 }
    5918 
    5919 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
    5920 {
    5921  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5922 }
    5923 
    5924 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
    5925 {
    5926  if(count == 0)
    5927  {
    5928  return VK_SUCCESS;
    5929  }
    5930 
    5931  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    5932  if(m_MapCount != 0)
    5933  {
    5934  m_MapCount += count;
    5935  VMA_ASSERT(m_pMappedData != VMA_NULL);
    5936  if(ppData != VMA_NULL)
    5937  {
    5938  *ppData = m_pMappedData;
    5939  }
    5940  return VK_SUCCESS;
    5941  }
    5942  else
    5943  {
    5944  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    5945  hAllocator->m_hDevice,
    5946  hMemory,
    5947  0, // offset
    5948  VK_WHOLE_SIZE,
    5949  0, // flags
    5950  &m_pMappedData);
    5951  if(result == VK_SUCCESS)
    5952  {
    5953  if(ppData != VMA_NULL)
    5954  {
    5955  *ppData = m_pMappedData;
    5956  }
    5957  m_MapCount = count;
    5958  }
    5959  return result;
    5960  }
    5961 }
    5962 
    5963 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
    5964 {
    5965  if(count == 0)
    5966  {
    5967  return;
    5968  }
    5969 
    5970  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    5971  if(m_MapCount >= count)
    5972  {
    5973  m_MapCount -= count;
    5974  if(m_MapCount == 0)
    5975  {
    5976  m_pMappedData = VMA_NULL;
    5977  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
    5978  }
    5979  }
    5980  else
    5981  {
    5982  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    5983  }
    5984 }
    5985 
    5987 // class VmaDeviceMemoryBlock
    5988 
    5989 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    5990  m_MemoryTypeIndex(UINT32_MAX),
    5991  m_hMemory(VK_NULL_HANDLE),
    5992  m_Metadata(hAllocator)
    5993 {
    5994 }
    5995 
    5996 void VmaDeviceMemoryBlock::Init(
    5997  uint32_t newMemoryTypeIndex,
    5998  VkDeviceMemory newMemory,
    5999  VkDeviceSize newSize)
    6000 {
    6001  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    6002 
    6003  m_MemoryTypeIndex = newMemoryTypeIndex;
    6004  m_hMemory = newMemory;
    6005 
    6006  m_Metadata.Init(newSize);
    6007 }
    6008 
    6009 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    6010 {
    6011  // This is the most important assert in the entire library.
    6012  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    6013  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    6014 
    6015  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    6016  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
    6017  m_hMemory = VK_NULL_HANDLE;
    6018 }
    6019 
    6020 bool VmaDeviceMemoryBlock::Validate() const
    6021 {
    6022  if((m_hMemory == VK_NULL_HANDLE) ||
    6023  (m_Metadata.GetSize() == 0))
    6024  {
    6025  return false;
    6026  }
    6027 
    6028  return m_Metadata.Validate();
    6029 }
    6030 
    6031 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    6032 {
    6033  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
    6034 }
    6035 
    6036 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    6037 {
    6038  m_Mapping.Unmap(hAllocator, m_hMemory, count);
    6039 }
    6040 
    6041 static void InitStatInfo(VmaStatInfo& outInfo)
    6042 {
    6043  memset(&outInfo, 0, sizeof(outInfo));
    6044  outInfo.allocationSizeMin = UINT64_MAX;
    6045  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6046 }
    6047 
    6048 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    6049 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    6050 {
    6051  inoutInfo.blockCount += srcInfo.blockCount;
    6052  inoutInfo.allocationCount += srcInfo.allocationCount;
    6053  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    6054  inoutInfo.usedBytes += srcInfo.usedBytes;
    6055  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    6056  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    6057  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    6058  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    6059  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    6060 }
    6061 
    6062 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    6063 {
    6064  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    6065  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    6066  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    6067  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    6068 }
    6069 
    6070 VmaPool_T::VmaPool_T(
    6071  VmaAllocator hAllocator,
    6072  const VmaPoolCreateInfo& createInfo) :
    6073  m_BlockVector(
    6074  hAllocator,
    6075  createInfo.memoryTypeIndex,
    6076  createInfo.blockSize,
    6077  createInfo.minBlockCount,
    6078  createInfo.maxBlockCount,
    6079  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    6080  createInfo.frameInUseCount,
    6081  true) // isCustomPool
    6082 {
    6083 }
    6084 
    6085 VmaPool_T::~VmaPool_T()
    6086 {
    6087 }
    6088 
    6089 #if VMA_STATS_STRING_ENABLED
    6090 
    6091 #endif // #if VMA_STATS_STRING_ENABLED
    6092 
    6093 VmaBlockVector::VmaBlockVector(
    6094  VmaAllocator hAllocator,
    6095  uint32_t memoryTypeIndex,
    6096  VkDeviceSize preferredBlockSize,
    6097  size_t minBlockCount,
    6098  size_t maxBlockCount,
    6099  VkDeviceSize bufferImageGranularity,
    6100  uint32_t frameInUseCount,
    6101  bool isCustomPool) :
    6102  m_hAllocator(hAllocator),
    6103  m_MemoryTypeIndex(memoryTypeIndex),
    6104  m_PreferredBlockSize(preferredBlockSize),
    6105  m_MinBlockCount(minBlockCount),
    6106  m_MaxBlockCount(maxBlockCount),
    6107  m_BufferImageGranularity(bufferImageGranularity),
    6108  m_FrameInUseCount(frameInUseCount),
    6109  m_IsCustomPool(isCustomPool),
    6110  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    6111  m_HasEmptyBlock(false),
    6112  m_pDefragmentator(VMA_NULL)
    6113 {
    6114 }
    6115 
    6116 VmaBlockVector::~VmaBlockVector()
    6117 {
    6118  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    6119 
    6120  for(size_t i = m_Blocks.size(); i--; )
    6121  {
    6122  m_Blocks[i]->Destroy(m_hAllocator);
    6123  vma_delete(m_hAllocator, m_Blocks[i]);
    6124  }
    6125 }
    6126 
    6127 VkResult VmaBlockVector::CreateMinBlocks()
    6128 {
    6129  for(size_t i = 0; i < m_MinBlockCount; ++i)
    6130  {
    6131  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    6132  if(res != VK_SUCCESS)
    6133  {
    6134  return res;
    6135  }
    6136  }
    6137  return VK_SUCCESS;
    6138 }
    6139 
    6140 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    6141 {
    6142  pStats->size = 0;
    6143  pStats->unusedSize = 0;
    6144  pStats->allocationCount = 0;
    6145  pStats->unusedRangeCount = 0;
    6146  pStats->unusedRangeSizeMax = 0;
    6147 
    6148  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6149 
    6150  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6151  {
    6152  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6153  VMA_ASSERT(pBlock);
    6154  VMA_HEAVY_ASSERT(pBlock->Validate());
    6155  pBlock->m_Metadata.AddPoolStats(*pStats);
    6156  }
    6157 }
    6158 
    6159 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    6160 
    6161 VkResult VmaBlockVector::Allocate(
    6162  VmaPool hCurrentPool,
    6163  uint32_t currentFrameIndex,
    6164  const VkMemoryRequirements& vkMemReq,
    6165  const VmaAllocationCreateInfo& createInfo,
    6166  VmaSuballocationType suballocType,
    6167  VmaAllocation* pAllocation)
    6168 {
    6169  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    6170  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    6171 
    6172  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6173 
    6174  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    6175  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6176  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6177  {
    6178  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6179  VMA_ASSERT(pCurrBlock);
    6180  VmaAllocationRequest currRequest = {};
    6181  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6182  currentFrameIndex,
    6183  m_FrameInUseCount,
    6184  m_BufferImageGranularity,
    6185  vkMemReq.size,
    6186  vkMemReq.alignment,
    6187  suballocType,
    6188  false, // canMakeOtherLost
    6189  &currRequest))
    6190  {
    6191  // Allocate from pCurrBlock.
    6192  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    6193 
    6194  if(mapped)
    6195  {
    6196  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
    6197  if(res != VK_SUCCESS)
    6198  {
    6199  return res;
    6200  }
    6201  }
    6202 
    6203  // We no longer have an empty Allocation.
    6204  if(pCurrBlock->m_Metadata.IsEmpty())
    6205  {
    6206  m_HasEmptyBlock = false;
    6207  }
    6208 
    6209  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6210  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
    6211  (*pAllocation)->InitBlockAllocation(
    6212  hCurrentPool,
    6213  pCurrBlock,
    6214  currRequest.offset,
    6215  vkMemReq.alignment,
    6216  vkMemReq.size,
    6217  suballocType,
    6218  mapped,
    6219  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6220  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
    6221  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6222  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6223  return VK_SUCCESS;
    6224  }
    6225  }
    6226 
    6227  const bool canCreateNewBlock =
    6228  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    6229  (m_Blocks.size() < m_MaxBlockCount);
    6230 
    6231  // 2. Try to create new block.
    6232  if(canCreateNewBlock)
    6233  {
    6234  // Calculate optimal size for new block.
    6235  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    6236  uint32_t newBlockSizeShift = 0;
    6237  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    6238 
    6239  // Allocating blocks of other sizes is allowed only in default pools.
    6240  // In custom pools block size is fixed.
    6241  if(m_IsCustomPool == false)
    6242  {
    6243  // Allocate 1/8, 1/4, 1/2 as first blocks.
    6244  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    6245  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    6246  {
    6247  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6248  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
    6249  {
    6250  newBlockSize = smallerNewBlockSize;
    6251  ++newBlockSizeShift;
    6252  }
    6253  else
    6254  {
    6255  break;
    6256  }
    6257  }
    6258  }
    6259 
    6260  size_t newBlockIndex = 0;
    6261  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    6262  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    6263  if(m_IsCustomPool == false)
    6264  {
    6265  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    6266  {
    6267  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    6268  if(smallerNewBlockSize >= vkMemReq.size)
    6269  {
    6270  newBlockSize = smallerNewBlockSize;
    6271  ++newBlockSizeShift;
    6272  res = CreateBlock(newBlockSize, &newBlockIndex);
    6273  }
    6274  else
    6275  {
    6276  break;
    6277  }
    6278  }
    6279  }
    6280 
    6281  if(res == VK_SUCCESS)
    6282  {
    6283  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    6284  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
    6285 
    6286  if(mapped)
    6287  {
    6288  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    6289  if(res != VK_SUCCESS)
    6290  {
    6291  return res;
    6292  }
    6293  }
    6294 
    6295  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
    6296  VmaAllocationRequest allocRequest;
    6297  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
    6298  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6299  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
    6300  (*pAllocation)->InitBlockAllocation(
    6301  hCurrentPool,
    6302  pBlock,
    6303  allocRequest.offset,
    6304  vkMemReq.alignment,
    6305  vkMemReq.size,
    6306  suballocType,
    6307  mapped,
    6308  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6309  VMA_HEAVY_ASSERT(pBlock->Validate());
    6310  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
    6311  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6312  return VK_SUCCESS;
    6313  }
    6314  }
    6315 
    6316  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    6317 
    6318  // 3. Try to allocate from existing blocks with making other allocations lost.
    6319  if(canMakeOtherLost)
    6320  {
    6321  uint32_t tryIndex = 0;
    6322  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    6323  {
    6324  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    6325  VmaAllocationRequest bestRequest = {};
    6326  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    6327 
    6328  // 1. Search existing allocations.
    6329  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    6330  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    6331  {
    6332  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    6333  VMA_ASSERT(pCurrBlock);
    6334  VmaAllocationRequest currRequest = {};
    6335  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
    6336  currentFrameIndex,
    6337  m_FrameInUseCount,
    6338  m_BufferImageGranularity,
    6339  vkMemReq.size,
    6340  vkMemReq.alignment,
    6341  suballocType,
    6342  canMakeOtherLost,
    6343  &currRequest))
    6344  {
    6345  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    6346  if(pBestRequestBlock == VMA_NULL ||
    6347  currRequestCost < bestRequestCost)
    6348  {
    6349  pBestRequestBlock = pCurrBlock;
    6350  bestRequest = currRequest;
    6351  bestRequestCost = currRequestCost;
    6352 
    6353  if(bestRequestCost == 0)
    6354  {
    6355  break;
    6356  }
    6357  }
    6358  }
    6359  }
    6360 
    6361  if(pBestRequestBlock != VMA_NULL)
    6362  {
    6363  if(mapped)
    6364  {
    6365  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    6366  if(res != VK_SUCCESS)
    6367  {
    6368  return res;
    6369  }
    6370  }
    6371 
    6372  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
    6373  currentFrameIndex,
    6374  m_FrameInUseCount,
    6375  &bestRequest))
    6376  {
    6377  // We no longer have an empty Allocation.
    6378  if(pBestRequestBlock->m_Metadata.IsEmpty())
    6379  {
    6380  m_HasEmptyBlock = false;
    6381  }
    6382  // Allocate from this pBlock.
    6383  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    6384  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
    6385  (*pAllocation)->InitBlockAllocation(
    6386  hCurrentPool,
    6387  pBestRequestBlock,
    6388  bestRequest.offset,
    6389  vkMemReq.alignment,
    6390  vkMemReq.size,
    6391  suballocType,
    6392  mapped,
    6393  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    6394  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    6395  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    6396  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    6397  return VK_SUCCESS;
    6398  }
    6399  // else: Some allocations must have been touched while we are here. Next try.
    6400  }
    6401  else
    6402  {
    6403  // Could not find place in any of the blocks - break outer loop.
    6404  break;
    6405  }
    6406  }
    6407  /* Maximum number of tries exceeded - a very unlike event when many other
    6408  threads are simultaneously touching allocations making it impossible to make
    6409  lost at the same time as we try to allocate. */
    6410  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    6411  {
    6412  return VK_ERROR_TOO_MANY_OBJECTS;
    6413  }
    6414  }
    6415 
    6416  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    6417 }
    6418 
    6419 void VmaBlockVector::Free(
    6420  VmaAllocation hAllocation)
    6421 {
    6422  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    6423 
    6424  // Scope for lock.
    6425  {
    6426  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6427 
    6428  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    6429 
    6430  if(hAllocation->IsPersistentMap())
    6431  {
    6432  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
    6433  }
    6434 
    6435  pBlock->m_Metadata.Free(hAllocation);
    6436  VMA_HEAVY_ASSERT(pBlock->Validate());
    6437 
    6438  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    6439 
    6440  // pBlock became empty after this deallocation.
    6441  if(pBlock->m_Metadata.IsEmpty())
    6442  {
    6443  // Already has empty Allocation. We don't want to have two, so delete this one.
    6444  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    6445  {
    6446  pBlockToDelete = pBlock;
    6447  Remove(pBlock);
    6448  }
    6449  // We now have first empty Allocation.
    6450  else
    6451  {
    6452  m_HasEmptyBlock = true;
    6453  }
    6454  }
    6455  // pBlock didn't become empty, but we have another empty block - find and free that one.
    6456  // (This is optional, heuristics.)
    6457  else if(m_HasEmptyBlock)
    6458  {
    6459  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    6460  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    6461  {
    6462  pBlockToDelete = pLastBlock;
    6463  m_Blocks.pop_back();
    6464  m_HasEmptyBlock = false;
    6465  }
    6466  }
    6467 
    6468  IncrementallySortBlocks();
    6469  }
    6470 
    6471  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    6472  // lock, for performance reason.
    6473  if(pBlockToDelete != VMA_NULL)
    6474  {
    6475  VMA_DEBUG_LOG(" Deleted empty allocation");
    6476  pBlockToDelete->Destroy(m_hAllocator);
    6477  vma_delete(m_hAllocator, pBlockToDelete);
    6478  }
    6479 }
    6480 
    6481 size_t VmaBlockVector::CalcMaxBlockSize() const
    6482 {
    6483  size_t result = 0;
    6484  for(size_t i = m_Blocks.size(); i--; )
    6485  {
    6486  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
    6487  if(result >= m_PreferredBlockSize)
    6488  {
    6489  break;
    6490  }
    6491  }
    6492  return result;
    6493 }
    6494 
    6495 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    6496 {
    6497  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6498  {
    6499  if(m_Blocks[blockIndex] == pBlock)
    6500  {
    6501  VmaVectorRemove(m_Blocks, blockIndex);
    6502  return;
    6503  }
    6504  }
    6505  VMA_ASSERT(0);
    6506 }
    6507 
    6508 void VmaBlockVector::IncrementallySortBlocks()
    6509 {
    6510  // Bubble sort only until first swap.
    6511  for(size_t i = 1; i < m_Blocks.size(); ++i)
    6512  {
    6513  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
    6514  {
    6515  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    6516  return;
    6517  }
    6518  }
    6519 }
    6520 
    6521 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    6522 {
    6523  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    6524  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    6525  allocInfo.allocationSize = blockSize;
    6526  VkDeviceMemory mem = VK_NULL_HANDLE;
    6527  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    6528  if(res < 0)
    6529  {
    6530  return res;
    6531  }
    6532 
    6533  // New VkDeviceMemory successfully created.
    6534 
    6535  // Create new Allocation for it.
    6536  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    6537  pBlock->Init(
    6538  m_MemoryTypeIndex,
    6539  mem,
    6540  allocInfo.allocationSize);
    6541 
    6542  m_Blocks.push_back(pBlock);
    6543  if(pNewBlockIndex != VMA_NULL)
    6544  {
    6545  *pNewBlockIndex = m_Blocks.size() - 1;
    6546  }
    6547 
    6548  return VK_SUCCESS;
    6549 }
    6550 
    6551 #if VMA_STATS_STRING_ENABLED
    6552 
    6553 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    6554 {
    6555  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6556 
    6557  json.BeginObject();
    6558 
    6559  if(m_IsCustomPool)
    6560  {
    6561  json.WriteString("MemoryTypeIndex");
    6562  json.WriteNumber(m_MemoryTypeIndex);
    6563 
    6564  json.WriteString("BlockSize");
    6565  json.WriteNumber(m_PreferredBlockSize);
    6566 
    6567  json.WriteString("BlockCount");
    6568  json.BeginObject(true);
    6569  if(m_MinBlockCount > 0)
    6570  {
    6571  json.WriteString("Min");
    6572  json.WriteNumber((uint64_t)m_MinBlockCount);
    6573  }
    6574  if(m_MaxBlockCount < SIZE_MAX)
    6575  {
    6576  json.WriteString("Max");
    6577  json.WriteNumber((uint64_t)m_MaxBlockCount);
    6578  }
    6579  json.WriteString("Cur");
    6580  json.WriteNumber((uint64_t)m_Blocks.size());
    6581  json.EndObject();
    6582 
    6583  if(m_FrameInUseCount > 0)
    6584  {
    6585  json.WriteString("FrameInUseCount");
    6586  json.WriteNumber(m_FrameInUseCount);
    6587  }
    6588  }
    6589  else
    6590  {
    6591  json.WriteString("PreferredBlockSize");
    6592  json.WriteNumber(m_PreferredBlockSize);
    6593  }
    6594 
    6595  json.WriteString("Blocks");
    6596  json.BeginArray();
    6597  for(size_t i = 0; i < m_Blocks.size(); ++i)
    6598  {
    6599  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
    6600  }
    6601  json.EndArray();
    6602 
    6603  json.EndObject();
    6604 }
    6605 
    6606 #endif // #if VMA_STATS_STRING_ENABLED
    6607 
    6608 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    6609  VmaAllocator hAllocator,
    6610  uint32_t currentFrameIndex)
    6611 {
    6612  if(m_pDefragmentator == VMA_NULL)
    6613  {
    6614  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    6615  hAllocator,
    6616  this,
    6617  currentFrameIndex);
    6618  }
    6619 
    6620  return m_pDefragmentator;
    6621 }
    6622 
    6623 VkResult VmaBlockVector::Defragment(
    6624  VmaDefragmentationStats* pDefragmentationStats,
    6625  VkDeviceSize& maxBytesToMove,
    6626  uint32_t& maxAllocationsToMove)
    6627 {
    6628  if(m_pDefragmentator == VMA_NULL)
    6629  {
    6630  return VK_SUCCESS;
    6631  }
    6632 
    6633  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6634 
    6635  // Defragment.
    6636  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    6637 
    6638  // Accumulate statistics.
    6639  if(pDefragmentationStats != VMA_NULL)
    6640  {
    6641  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    6642  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    6643  pDefragmentationStats->bytesMoved += bytesMoved;
    6644  pDefragmentationStats->allocationsMoved += allocationsMoved;
    6645  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    6646  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    6647  maxBytesToMove -= bytesMoved;
    6648  maxAllocationsToMove -= allocationsMoved;
    6649  }
    6650 
    6651  // Free empty blocks.
    6652  m_HasEmptyBlock = false;
    6653  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    6654  {
    6655  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    6656  if(pBlock->m_Metadata.IsEmpty())
    6657  {
    6658  if(m_Blocks.size() > m_MinBlockCount)
    6659  {
    6660  if(pDefragmentationStats != VMA_NULL)
    6661  {
    6662  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    6663  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
    6664  }
    6665 
    6666  VmaVectorRemove(m_Blocks, blockIndex);
    6667  pBlock->Destroy(m_hAllocator);
    6668  vma_delete(m_hAllocator, pBlock);
    6669  }
    6670  else
    6671  {
    6672  m_HasEmptyBlock = true;
    6673  }
    6674  }
    6675  }
    6676 
    6677  return result;
    6678 }
    6679 
    6680 void VmaBlockVector::DestroyDefragmentator()
    6681 {
    6682  if(m_pDefragmentator != VMA_NULL)
    6683  {
    6684  vma_delete(m_hAllocator, m_pDefragmentator);
    6685  m_pDefragmentator = VMA_NULL;
    6686  }
    6687 }
    6688 
    6689 void VmaBlockVector::MakePoolAllocationsLost(
    6690  uint32_t currentFrameIndex,
    6691  size_t* pLostAllocationCount)
    6692 {
    6693  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6694  size_t lostAllocationCount = 0;
    6695  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6696  {
    6697  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6698  VMA_ASSERT(pBlock);
    6699  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    6700  }
    6701  if(pLostAllocationCount != VMA_NULL)
    6702  {
    6703  *pLostAllocationCount = lostAllocationCount;
    6704  }
    6705 }
    6706 
    6707 void VmaBlockVector::AddStats(VmaStats* pStats)
    6708 {
    6709  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    6710  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    6711 
    6712  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    6713 
    6714  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    6715  {
    6716  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    6717  VMA_ASSERT(pBlock);
    6718  VMA_HEAVY_ASSERT(pBlock->Validate());
    6719  VmaStatInfo allocationStatInfo;
    6720  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
    6721  VmaAddStatInfo(pStats->total, allocationStatInfo);
    6722  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    6723  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    6724  }
    6725 }
    6726 
    6728 // VmaDefragmentator members definition
    6729 
    6730 VmaDefragmentator::VmaDefragmentator(
    6731  VmaAllocator hAllocator,
    6732  VmaBlockVector* pBlockVector,
    6733  uint32_t currentFrameIndex) :
    6734  m_hAllocator(hAllocator),
    6735  m_pBlockVector(pBlockVector),
    6736  m_CurrentFrameIndex(currentFrameIndex),
    6737  m_BytesMoved(0),
    6738  m_AllocationsMoved(0),
    6739  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    6740  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    6741 {
    6742 }
    6743 
    6744 VmaDefragmentator::~VmaDefragmentator()
    6745 {
    6746  for(size_t i = m_Blocks.size(); i--; )
    6747  {
    6748  vma_delete(m_hAllocator, m_Blocks[i]);
    6749  }
    6750 }
    6751 
    6752 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    6753 {
    6754  AllocationInfo allocInfo;
    6755  allocInfo.m_hAllocation = hAlloc;
    6756  allocInfo.m_pChanged = pChanged;
    6757  m_Allocations.push_back(allocInfo);
    6758 }
    6759 
    6760 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    6761 {
    6762  // It has already been mapped for defragmentation.
    6763  if(m_pMappedDataForDefragmentation)
    6764  {
    6765  *ppMappedData = m_pMappedDataForDefragmentation;
    6766  return VK_SUCCESS;
    6767  }
    6768 
    6769  // It is originally mapped.
    6770  if(m_pBlock->m_Mapping.GetMappedData())
    6771  {
    6772  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
    6773  return VK_SUCCESS;
    6774  }
    6775 
    6776  // Map on first usage.
    6777  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    6778  *ppMappedData = m_pMappedDataForDefragmentation;
    6779  return res;
    6780 }
    6781 
    6782 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    6783 {
    6784  if(m_pMappedDataForDefragmentation != VMA_NULL)
    6785  {
    6786  m_pBlock->Unmap(hAllocator, 1);
    6787  }
    6788 }
    6789 
    6790 VkResult VmaDefragmentator::DefragmentRound(
    6791  VkDeviceSize maxBytesToMove,
    6792  uint32_t maxAllocationsToMove)
    6793 {
    6794  if(m_Blocks.empty())
    6795  {
    6796  return VK_SUCCESS;
    6797  }
    6798 
    6799  size_t srcBlockIndex = m_Blocks.size() - 1;
    6800  size_t srcAllocIndex = SIZE_MAX;
    6801  for(;;)
    6802  {
    6803  // 1. Find next allocation to move.
    6804  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    6805  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    6806  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    6807  {
    6808  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    6809  {
    6810  // Finished: no more allocations to process.
    6811  if(srcBlockIndex == 0)
    6812  {
    6813  return VK_SUCCESS;
    6814  }
    6815  else
    6816  {
    6817  --srcBlockIndex;
    6818  srcAllocIndex = SIZE_MAX;
    6819  }
    6820  }
    6821  else
    6822  {
    6823  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    6824  }
    6825  }
    6826 
    6827  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    6828  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    6829 
    6830  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    6831  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    6832  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    6833  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    6834 
    6835  // 2. Try to find new place for this allocation in preceding or current block.
    6836  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    6837  {
    6838  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    6839  VmaAllocationRequest dstAllocRequest;
    6840  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
    6841  m_CurrentFrameIndex,
    6842  m_pBlockVector->GetFrameInUseCount(),
    6843  m_pBlockVector->GetBufferImageGranularity(),
    6844  size,
    6845  alignment,
    6846  suballocType,
    6847  false, // canMakeOtherLost
    6848  &dstAllocRequest) &&
    6849  MoveMakesSense(
    6850  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    6851  {
    6852  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    6853 
    6854  // Reached limit on number of allocations or bytes to move.
    6855  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    6856  (m_BytesMoved + size > maxBytesToMove))
    6857  {
    6858  return VK_INCOMPLETE;
    6859  }
    6860 
    6861  void* pDstMappedData = VMA_NULL;
    6862  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    6863  if(res != VK_SUCCESS)
    6864  {
    6865  return res;
    6866  }
    6867 
    6868  void* pSrcMappedData = VMA_NULL;
    6869  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    6870  if(res != VK_SUCCESS)
    6871  {
    6872  return res;
    6873  }
    6874 
    6875  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    6876  memcpy(
    6877  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    6878  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    6879  static_cast<size_t>(size));
    6880 
    6881  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
    6882  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
    6883 
    6884  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    6885 
    6886  if(allocInfo.m_pChanged != VMA_NULL)
    6887  {
    6888  *allocInfo.m_pChanged = VK_TRUE;
    6889  }
    6890 
    6891  ++m_AllocationsMoved;
    6892  m_BytesMoved += size;
    6893 
    6894  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    6895 
    6896  break;
    6897  }
    6898  }
    6899 
    6900  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    6901 
    6902  if(srcAllocIndex > 0)
    6903  {
    6904  --srcAllocIndex;
    6905  }
    6906  else
    6907  {
    6908  if(srcBlockIndex > 0)
    6909  {
    6910  --srcBlockIndex;
    6911  srcAllocIndex = SIZE_MAX;
    6912  }
    6913  else
    6914  {
    6915  return VK_SUCCESS;
    6916  }
    6917  }
    6918  }
    6919 }
    6920 
    6921 VkResult VmaDefragmentator::Defragment(
    6922  VkDeviceSize maxBytesToMove,
    6923  uint32_t maxAllocationsToMove)
    6924 {
    6925  if(m_Allocations.empty())
    6926  {
    6927  return VK_SUCCESS;
    6928  }
    6929 
    6930  // Create block info for each block.
    6931  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    6932  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6933  {
    6934  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    6935  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    6936  m_Blocks.push_back(pBlockInfo);
    6937  }
    6938 
    6939  // Sort them by m_pBlock pointer value.
    6940  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    6941 
    6942  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    6943  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    6944  {
    6945  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    6946  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    6947  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    6948  {
    6949  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    6950  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    6951  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    6952  {
    6953  (*it)->m_Allocations.push_back(allocInfo);
    6954  }
    6955  else
    6956  {
    6957  VMA_ASSERT(0);
    6958  }
    6959  }
    6960  }
    6961  m_Allocations.clear();
    6962 
    6963  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6964  {
    6965  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    6966  pBlockInfo->CalcHasNonMovableAllocations();
    6967  pBlockInfo->SortAllocationsBySizeDescecnding();
    6968  }
    6969 
    6970  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    6971  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    6972 
    6973  // Execute defragmentation rounds (the main part).
    6974  VkResult result = VK_SUCCESS;
    6975  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    6976  {
    6977  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    6978  }
    6979 
    6980  // Unmap blocks that were mapped for defragmentation.
    6981  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    6982  {
    6983  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    6984  }
    6985 
    6986  return result;
    6987 }
    6988 
    6989 bool VmaDefragmentator::MoveMakesSense(
    6990  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6991  size_t srcBlockIndex, VkDeviceSize srcOffset)
    6992 {
    6993  if(dstBlockIndex < srcBlockIndex)
    6994  {
    6995  return true;
    6996  }
    6997  if(dstBlockIndex > srcBlockIndex)
    6998  {
    6999  return false;
    7000  }
    7001  if(dstOffset < srcOffset)
    7002  {
    7003  return true;
    7004  }
    7005  return false;
    7006 }
    7007 
    7009 // VmaAllocator_T
    7010 
    7011 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    7012  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    7013  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    7014  m_hDevice(pCreateInfo->device),
    7015  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    7016  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    7017  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    7018  m_PreferredLargeHeapBlockSize(0),
    7019  m_PhysicalDevice(pCreateInfo->physicalDevice),
    7020  m_CurrentFrameIndex(0),
    7021  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
    7022 {
    7023  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    7024 
    7025  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    7026  memset(&m_MemProps, 0, sizeof(m_MemProps));
    7027  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    7028 
    7029  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    7030  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    7031 
    7032  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    7033  {
    7034  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    7035  }
    7036 
    7037  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    7038  {
    7039  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    7040  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    7041  }
    7042 
    7043  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    7044 
    7045  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    7046  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    7047 
    7048  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    7049  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    7050 
    7051  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    7052  {
    7053  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    7054  {
    7055  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    7056  if(limit != VK_WHOLE_SIZE)
    7057  {
    7058  m_HeapSizeLimit[heapIndex] = limit;
    7059  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    7060  {
    7061  m_MemProps.memoryHeaps[heapIndex].size = limit;
    7062  }
    7063  }
    7064  }
    7065  }
    7066 
    7067  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7068  {
    7069  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    7070 
    7071  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    7072  this,
    7073  memTypeIndex,
    7074  preferredBlockSize,
    7075  0,
    7076  SIZE_MAX,
    7077  GetBufferImageGranularity(),
    7078  pCreateInfo->frameInUseCount,
    7079  false); // isCustomPool
    7080  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    7081  // becase minBlockCount is 0.
    7082  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    7083  }
    7084 }
    7085 
    7086 VmaAllocator_T::~VmaAllocator_T()
    7087 {
    7088  VMA_ASSERT(m_Pools.empty());
    7089 
    7090  for(size_t i = GetMemoryTypeCount(); i--; )
    7091  {
    7092  vma_delete(this, m_pDedicatedAllocations[i]);
    7093  vma_delete(this, m_pBlockVectors[i]);
    7094  }
    7095 }
    7096 
    7097 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    7098 {
    7099 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7100  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    7101  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    7102  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    7103  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    7104  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    7105  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    7106  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    7107  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    7108  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    7109  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    7110  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    7111  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    7112  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    7113  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    7114  if(m_UseKhrDedicatedAllocation)
    7115  {
    7116  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    7117  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    7118  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    7119  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    7120  }
    7121 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    7122 
    7123 #define VMA_COPY_IF_NOT_NULL(funcName) \
    7124  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    7125 
    7126  if(pVulkanFunctions != VMA_NULL)
    7127  {
    7128  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    7129  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    7130  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    7131  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    7132  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    7133  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    7134  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    7135  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    7136  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    7137  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    7138  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    7139  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    7140  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    7141  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    7142  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    7143  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    7144  }
    7145 
    7146 #undef VMA_COPY_IF_NOT_NULL
    7147 
    7148  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    7149  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    7150  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    7151  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    7152  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    7153  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    7154  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    7155  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    7156  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    7157  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    7158  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    7159  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    7160  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    7161  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    7162  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    7163  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    7164  if(m_UseKhrDedicatedAllocation)
    7165  {
    7166  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    7167  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    7168  }
    7169 }
    7170 
    7171 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    7172 {
    7173  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    7174  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    7175  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    7176  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    7177 }
    7178 
    7179 VkResult VmaAllocator_T::AllocateMemoryOfType(
    7180  const VkMemoryRequirements& vkMemReq,
    7181  bool dedicatedAllocation,
    7182  VkBuffer dedicatedBuffer,
    7183  VkImage dedicatedImage,
    7184  const VmaAllocationCreateInfo& createInfo,
    7185  uint32_t memTypeIndex,
    7186  VmaSuballocationType suballocType,
    7187  VmaAllocation* pAllocation)
    7188 {
    7189  VMA_ASSERT(pAllocation != VMA_NULL);
    7190  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    7191 
    7192  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    7193 
    7194  // If memory type is not HOST_VISIBLE, disable MAPPED.
    7195  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7196  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    7197  {
    7198  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    7199  }
    7200 
    7201  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    7202  VMA_ASSERT(blockVector);
    7203 
    7204  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    7205  bool preferDedicatedMemory =
    7206  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    7207  dedicatedAllocation ||
    7208  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    7209  vkMemReq.size > preferredBlockSize / 2;
    7210 
    7211  if(preferDedicatedMemory &&
    7212  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    7213  finalCreateInfo.pool == VK_NULL_HANDLE)
    7214  {
    7216  }
    7217 
    7218  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    7219  {
    7220  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7221  {
    7222  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7223  }
    7224  else
    7225  {
    7226  return AllocateDedicatedMemory(
    7227  vkMemReq.size,
    7228  suballocType,
    7229  memTypeIndex,
    7230  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7231  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7232  finalCreateInfo.pUserData,
    7233  dedicatedBuffer,
    7234  dedicatedImage,
    7235  pAllocation);
    7236  }
    7237  }
    7238  else
    7239  {
    7240  VkResult res = blockVector->Allocate(
    7241  VK_NULL_HANDLE, // hCurrentPool
    7242  m_CurrentFrameIndex.load(),
    7243  vkMemReq,
    7244  finalCreateInfo,
    7245  suballocType,
    7246  pAllocation);
    7247  if(res == VK_SUCCESS)
    7248  {
    7249  return res;
    7250  }
    7251 
    7252  // 5. Try dedicated memory.
    7253  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7254  {
    7255  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7256  }
    7257  else
    7258  {
    7259  res = AllocateDedicatedMemory(
    7260  vkMemReq.size,
    7261  suballocType,
    7262  memTypeIndex,
    7263  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    7264  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    7265  finalCreateInfo.pUserData,
    7266  dedicatedBuffer,
    7267  dedicatedImage,
    7268  pAllocation);
    7269  if(res == VK_SUCCESS)
    7270  {
    7271  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    7272  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    7273  return VK_SUCCESS;
    7274  }
    7275  else
    7276  {
    7277  // Everything failed: Return error code.
    7278  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7279  return res;
    7280  }
    7281  }
    7282  }
    7283 }
    7284 
    7285 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    7286  VkDeviceSize size,
    7287  VmaSuballocationType suballocType,
    7288  uint32_t memTypeIndex,
    7289  bool map,
    7290  bool isUserDataString,
    7291  void* pUserData,
    7292  VkBuffer dedicatedBuffer,
    7293  VkImage dedicatedImage,
    7294  VmaAllocation* pAllocation)
    7295 {
    7296  VMA_ASSERT(pAllocation);
    7297 
    7298  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    7299  allocInfo.memoryTypeIndex = memTypeIndex;
    7300  allocInfo.allocationSize = size;
    7301 
    7302  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    7303  if(m_UseKhrDedicatedAllocation)
    7304  {
    7305  if(dedicatedBuffer != VK_NULL_HANDLE)
    7306  {
    7307  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    7308  dedicatedAllocInfo.buffer = dedicatedBuffer;
    7309  allocInfo.pNext = &dedicatedAllocInfo;
    7310  }
    7311  else if(dedicatedImage != VK_NULL_HANDLE)
    7312  {
    7313  dedicatedAllocInfo.image = dedicatedImage;
    7314  allocInfo.pNext = &dedicatedAllocInfo;
    7315  }
    7316  }
    7317 
    7318  // Allocate VkDeviceMemory.
    7319  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    7320  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    7321  if(res < 0)
    7322  {
    7323  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    7324  return res;
    7325  }
    7326 
    7327  void* pMappedData = VMA_NULL;
    7328  if(map)
    7329  {
    7330  res = (*m_VulkanFunctions.vkMapMemory)(
    7331  m_hDevice,
    7332  hMemory,
    7333  0,
    7334  VK_WHOLE_SIZE,
    7335  0,
    7336  &pMappedData);
    7337  if(res < 0)
    7338  {
    7339  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    7340  FreeVulkanMemory(memTypeIndex, size, hMemory);
    7341  return res;
    7342  }
    7343  }
    7344 
    7345  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    7346  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    7347  (*pAllocation)->SetUserData(this, pUserData);
    7348 
    7349  // Register it in m_pDedicatedAllocations.
    7350  {
    7351  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7352  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    7353  VMA_ASSERT(pDedicatedAllocations);
    7354  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    7355  }
    7356 
    7357  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    7358 
    7359  return VK_SUCCESS;
    7360 }
    7361 
    7362 void VmaAllocator_T::GetBufferMemoryRequirements(
    7363  VkBuffer hBuffer,
    7364  VkMemoryRequirements& memReq,
    7365  bool& requiresDedicatedAllocation,
    7366  bool& prefersDedicatedAllocation) const
    7367 {
    7368  if(m_UseKhrDedicatedAllocation)
    7369  {
    7370  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7371  memReqInfo.buffer = hBuffer;
    7372 
    7373  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7374 
    7375  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7376  memReq2.pNext = &memDedicatedReq;
    7377 
    7378  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7379 
    7380  memReq = memReq2.memoryRequirements;
    7381  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7382  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7383  }
    7384  else
    7385  {
    7386  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    7387  requiresDedicatedAllocation = false;
    7388  prefersDedicatedAllocation = false;
    7389  }
    7390 }
    7391 
    7392 void VmaAllocator_T::GetImageMemoryRequirements(
    7393  VkImage hImage,
    7394  VkMemoryRequirements& memReq,
    7395  bool& requiresDedicatedAllocation,
    7396  bool& prefersDedicatedAllocation) const
    7397 {
    7398  if(m_UseKhrDedicatedAllocation)
    7399  {
    7400  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    7401  memReqInfo.image = hImage;
    7402 
    7403  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    7404 
    7405  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    7406  memReq2.pNext = &memDedicatedReq;
    7407 
    7408  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    7409 
    7410  memReq = memReq2.memoryRequirements;
    7411  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    7412  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    7413  }
    7414  else
    7415  {
    7416  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    7417  requiresDedicatedAllocation = false;
    7418  prefersDedicatedAllocation = false;
    7419  }
    7420 }
    7421 
    7422 VkResult VmaAllocator_T::AllocateMemory(
    7423  const VkMemoryRequirements& vkMemReq,
    7424  bool requiresDedicatedAllocation,
    7425  bool prefersDedicatedAllocation,
    7426  VkBuffer dedicatedBuffer,
    7427  VkImage dedicatedImage,
    7428  const VmaAllocationCreateInfo& createInfo,
    7429  VmaSuballocationType suballocType,
    7430  VmaAllocation* pAllocation)
    7431 {
    7432  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    7433  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7434  {
    7435  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    7436  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7437  }
    7438  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    7440  {
    7441  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    7442  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7443  }
    7444  if(requiresDedicatedAllocation)
    7445  {
    7446  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    7447  {
    7448  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    7449  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7450  }
    7451  if(createInfo.pool != VK_NULL_HANDLE)
    7452  {
    7453  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    7454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7455  }
    7456  }
    7457  if((createInfo.pool != VK_NULL_HANDLE) &&
    7458  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    7459  {
    7460  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    7461  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7462  }
    7463 
    7464  if(createInfo.pool != VK_NULL_HANDLE)
    7465  {
    7466  return createInfo.pool->m_BlockVector.Allocate(
    7467  createInfo.pool,
    7468  m_CurrentFrameIndex.load(),
    7469  vkMemReq,
    7470  createInfo,
    7471  suballocType,
    7472  pAllocation);
    7473  }
    7474  else
    7475  {
    7476  // Bit mask of memory Vulkan types acceptable for this allocation.
    7477  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    7478  uint32_t memTypeIndex = UINT32_MAX;
    7479  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    7480  if(res == VK_SUCCESS)
    7481  {
    7482  res = AllocateMemoryOfType(
    7483  vkMemReq,
    7484  requiresDedicatedAllocation || prefersDedicatedAllocation,
    7485  dedicatedBuffer,
    7486  dedicatedImage,
    7487  createInfo,
    7488  memTypeIndex,
    7489  suballocType,
    7490  pAllocation);
    7491  // Succeeded on first try.
    7492  if(res == VK_SUCCESS)
    7493  {
    7494  return res;
    7495  }
    7496  // Allocation from this memory type failed. Try other compatible memory types.
    7497  else
    7498  {
    7499  for(;;)
    7500  {
    7501  // Remove old memTypeIndex from list of possibilities.
    7502  memoryTypeBits &= ~(1u << memTypeIndex);
    7503  // Find alternative memTypeIndex.
    7504  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    7505  if(res == VK_SUCCESS)
    7506  {
    7507  res = AllocateMemoryOfType(
    7508  vkMemReq,
    7509  requiresDedicatedAllocation || prefersDedicatedAllocation,
    7510  dedicatedBuffer,
    7511  dedicatedImage,
    7512  createInfo,
    7513  memTypeIndex,
    7514  suballocType,
    7515  pAllocation);
    7516  // Allocation from this alternative memory type succeeded.
    7517  if(res == VK_SUCCESS)
    7518  {
    7519  return res;
    7520  }
    7521  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    7522  }
    7523  // No other matching memory type index could be found.
    7524  else
    7525  {
    7526  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    7527  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7528  }
    7529  }
    7530  }
    7531  }
    7532  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    7533  else
    7534  return res;
    7535  }
    7536 }
    7537 
    7538 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    7539 {
    7540  VMA_ASSERT(allocation);
    7541 
    7542  if(allocation->CanBecomeLost() == false ||
    7543  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    7544  {
    7545  switch(allocation->GetType())
    7546  {
    7547  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7548  {
    7549  VmaBlockVector* pBlockVector = VMA_NULL;
    7550  VmaPool hPool = allocation->GetPool();
    7551  if(hPool != VK_NULL_HANDLE)
    7552  {
    7553  pBlockVector = &hPool->m_BlockVector;
    7554  }
    7555  else
    7556  {
    7557  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    7558  pBlockVector = m_pBlockVectors[memTypeIndex];
    7559  }
    7560  pBlockVector->Free(allocation);
    7561  }
    7562  break;
    7563  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7564  FreeDedicatedMemory(allocation);
    7565  break;
    7566  default:
    7567  VMA_ASSERT(0);
    7568  }
    7569  }
    7570 
    7571  allocation->SetUserData(this, VMA_NULL);
    7572  vma_delete(this, allocation);
    7573 }
    7574 
    7575 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    7576 {
    7577  // Initialize.
    7578  InitStatInfo(pStats->total);
    7579  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    7580  InitStatInfo(pStats->memoryType[i]);
    7581  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    7582  InitStatInfo(pStats->memoryHeap[i]);
    7583 
    7584  // Process default pools.
    7585  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7586  {
    7587  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    7588  VMA_ASSERT(pBlockVector);
    7589  pBlockVector->AddStats(pStats);
    7590  }
    7591 
    7592  // Process custom pools.
    7593  {
    7594  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7595  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    7596  {
    7597  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
    7598  }
    7599  }
    7600 
    7601  // Process dedicated allocations.
    7602  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    7603  {
    7604  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    7605  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    7606  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    7607  VMA_ASSERT(pDedicatedAllocVector);
    7608  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    7609  {
    7610  VmaStatInfo allocationStatInfo;
    7611  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    7612  VmaAddStatInfo(pStats->total, allocationStatInfo);
    7613  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    7614  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    7615  }
    7616  }
    7617 
    7618  // Postprocess.
    7619  VmaPostprocessCalcStatInfo(pStats->total);
    7620  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    7621  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    7622  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    7623  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    7624 }
    7625 
    7626 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    7627 
    7628 VkResult VmaAllocator_T::Defragment(
    7629  VmaAllocation* pAllocations,
    7630  size_t allocationCount,
    7631  VkBool32* pAllocationsChanged,
    7632  const VmaDefragmentationInfo* pDefragmentationInfo,
    7633  VmaDefragmentationStats* pDefragmentationStats)
    7634 {
    7635  if(pAllocationsChanged != VMA_NULL)
    7636  {
    7637  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    7638  }
    7639  if(pDefragmentationStats != VMA_NULL)
    7640  {
    7641  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    7642  }
    7643 
    7644  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    7645 
    7646  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    7647 
    7648  const size_t poolCount = m_Pools.size();
    7649 
    7650  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    7651  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    7652  {
    7653  VmaAllocation hAlloc = pAllocations[allocIndex];
    7654  VMA_ASSERT(hAlloc);
    7655  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    7656  // DedicatedAlloc cannot be defragmented.
    7657  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    7658  // Only HOST_VISIBLE memory types can be defragmented.
    7659  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
    7660  // Lost allocation cannot be defragmented.
    7661  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    7662  {
    7663  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    7664 
    7665  const VmaPool hAllocPool = hAlloc->GetPool();
    7666  // This allocation belongs to custom pool.
    7667  if(hAllocPool != VK_NULL_HANDLE)
    7668  {
    7669  pAllocBlockVector = &hAllocPool->GetBlockVector();
    7670  }
    7671  // This allocation belongs to general pool.
    7672  else
    7673  {
    7674  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    7675  }
    7676 
    7677  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    7678 
    7679  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    7680  &pAllocationsChanged[allocIndex] : VMA_NULL;
    7681  pDefragmentator->AddAllocation(hAlloc, pChanged);
    7682  }
    7683  }
    7684 
    7685  VkResult result = VK_SUCCESS;
    7686 
    7687  // ======== Main processing.
    7688 
    7689  VkDeviceSize maxBytesToMove = SIZE_MAX;
    7690  uint32_t maxAllocationsToMove = UINT32_MAX;
    7691  if(pDefragmentationInfo != VMA_NULL)
    7692  {
    7693  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    7694  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    7695  }
    7696 
    7697  // Process standard memory.
    7698  for(uint32_t memTypeIndex = 0;
    7699  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    7700  ++memTypeIndex)
    7701  {
    7702  // Only HOST_VISIBLE memory types can be defragmented.
    7703  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7704  {
    7705  result = m_pBlockVectors[memTypeIndex]->Defragment(
    7706  pDefragmentationStats,
    7707  maxBytesToMove,
    7708  maxAllocationsToMove);
    7709  }
    7710  }
    7711 
    7712  // Process custom pools.
    7713  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    7714  {
    7715  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
    7716  pDefragmentationStats,
    7717  maxBytesToMove,
    7718  maxAllocationsToMove);
    7719  }
    7720 
    7721  // ======== Destroy defragmentators.
    7722 
    7723  // Process custom pools.
    7724  for(size_t poolIndex = poolCount; poolIndex--; )
    7725  {
    7726  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
    7727  }
    7728 
    7729  // Process standard memory.
    7730  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    7731  {
    7732  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    7733  {
    7734  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    7735  }
    7736  }
    7737 
    7738  return result;
    7739 }
    7740 
    7741 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    7742 {
    7743  if(hAllocation->CanBecomeLost())
    7744  {
    7745  /*
    7746  Warning: This is a carefully designed algorithm.
    7747  Do not modify unless you really know what you're doing :)
    7748  */
    7749  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7750  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7751  for(;;)
    7752  {
    7753  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7754  {
    7755  pAllocationInfo->memoryType = UINT32_MAX;
    7756  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    7757  pAllocationInfo->offset = 0;
    7758  pAllocationInfo->size = hAllocation->GetSize();
    7759  pAllocationInfo->pMappedData = VMA_NULL;
    7760  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7761  return;
    7762  }
    7763  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7764  {
    7765  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7766  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7767  pAllocationInfo->offset = hAllocation->GetOffset();
    7768  pAllocationInfo->size = hAllocation->GetSize();
    7769  pAllocationInfo->pMappedData = VMA_NULL;
    7770  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7771  return;
    7772  }
    7773  else // Last use time earlier than current time.
    7774  {
    7775  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7776  {
    7777  localLastUseFrameIndex = localCurrFrameIndex;
    7778  }
    7779  }
    7780  }
    7781  }
    7782  else
    7783  {
    7784  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    7785  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    7786  pAllocationInfo->offset = hAllocation->GetOffset();
    7787  pAllocationInfo->size = hAllocation->GetSize();
    7788  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    7789  pAllocationInfo->pUserData = hAllocation->GetUserData();
    7790  }
    7791 }
    7792 
    7793 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    7794 {
    7795  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    7796  if(hAllocation->CanBecomeLost())
    7797  {
    7798  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    7799  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    7800  for(;;)
    7801  {
    7802  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7803  {
    7804  return false;
    7805  }
    7806  else if(localLastUseFrameIndex == localCurrFrameIndex)
    7807  {
    7808  return true;
    7809  }
    7810  else // Last use time earlier than current time.
    7811  {
    7812  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    7813  {
    7814  localLastUseFrameIndex = localCurrFrameIndex;
    7815  }
    7816  }
    7817  }
    7818  }
    7819  else
    7820  {
    7821  return true;
    7822  }
    7823 }
    7824 
    7825 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    7826 {
    7827  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
    7828 
    7829  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    7830 
    7831  if(newCreateInfo.maxBlockCount == 0)
    7832  {
    7833  newCreateInfo.maxBlockCount = SIZE_MAX;
    7834  }
    7835  if(newCreateInfo.blockSize == 0)
    7836  {
    7837  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    7838  }
    7839 
    7840  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
    7841 
    7842  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    7843  if(res != VK_SUCCESS)
    7844  {
    7845  vma_delete(this, *pPool);
    7846  *pPool = VMA_NULL;
    7847  return res;
    7848  }
    7849 
    7850  // Add to m_Pools.
    7851  {
    7852  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7853  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    7854  }
    7855 
    7856  return VK_SUCCESS;
    7857 }
    7858 
    7859 void VmaAllocator_T::DestroyPool(VmaPool pool)
    7860 {
    7861  // Remove from m_Pools.
    7862  {
    7863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    7864  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    7865  VMA_ASSERT(success && "Pool not found in Allocator.");
    7866  }
    7867 
    7868  vma_delete(this, pool);
    7869 }
    7870 
    7871 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    7872 {
    7873  pool->m_BlockVector.GetPoolStats(pPoolStats);
    7874 }
    7875 
    7876 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    7877 {
    7878  m_CurrentFrameIndex.store(frameIndex);
    7879 }
    7880 
    7881 void VmaAllocator_T::MakePoolAllocationsLost(
    7882  VmaPool hPool,
    7883  size_t* pLostAllocationCount)
    7884 {
    7885  hPool->m_BlockVector.MakePoolAllocationsLost(
    7886  m_CurrentFrameIndex.load(),
    7887  pLostAllocationCount);
    7888 }
    7889 
    7890 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    7891 {
    7892  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    7893  (*pAllocation)->InitLost();
    7894 }
    7895 
    7896 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    7897 {
    7898  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    7899 
    7900  VkResult res;
    7901  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7902  {
    7903  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7904  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    7905  {
    7906  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7907  if(res == VK_SUCCESS)
    7908  {
    7909  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    7910  }
    7911  }
    7912  else
    7913  {
    7914  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    7915  }
    7916  }
    7917  else
    7918  {
    7919  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    7920  }
    7921 
    7922  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    7923  {
    7924  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    7925  }
    7926 
    7927  return res;
    7928 }
    7929 
    7930 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    7931 {
    7932  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    7933  {
    7934  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    7935  }
    7936 
    7937  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    7938 
    7939  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    7940  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    7941  {
    7942  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    7943  m_HeapSizeLimit[heapIndex] += size;
    7944  }
    7945 }
    7946 
    7947 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    7948 {
    7949  if(hAllocation->CanBecomeLost())
    7950  {
    7951  return VK_ERROR_MEMORY_MAP_FAILED;
    7952  }
    7953 
    7954  switch(hAllocation->GetType())
    7955  {
    7956  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7957  {
    7958  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    7959  char *pBytes = VMA_NULL;
    7960  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    7961  if(res == VK_SUCCESS)
    7962  {
    7963  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    7964  hAllocation->BlockAllocMap();
    7965  }
    7966  return res;
    7967  }
    7968  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7969  return hAllocation->DedicatedAllocMap(this, ppData);
    7970  default:
    7971  VMA_ASSERT(0);
    7972  return VK_ERROR_MEMORY_MAP_FAILED;
    7973  }
    7974 }
    7975 
    7976 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    7977 {
    7978  switch(hAllocation->GetType())
    7979  {
    7980  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    7981  {
    7982  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    7983  hAllocation->BlockAllocUnmap();
    7984  pBlock->Unmap(this, 1);
    7985  }
    7986  break;
    7987  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    7988  hAllocation->DedicatedAllocUnmap(this);
    7989  break;
    7990  default:
    7991  VMA_ASSERT(0);
    7992  }
    7993 }
    7994 
    7995 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    7996 {
    7997  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    7998 
    7999  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    8000  {
    8001  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8002  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    8003  VMA_ASSERT(pDedicatedAllocations);
    8004  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    8005  VMA_ASSERT(success);
    8006  }
    8007 
    8008  VkDeviceMemory hMemory = allocation->GetMemory();
    8009 
    8010  if(allocation->GetMappedData() != VMA_NULL)
    8011  {
    8012  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    8013  }
    8014 
    8015  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    8016 
    8017  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    8018 }
    8019 
    8020 #if VMA_STATS_STRING_ENABLED
    8021 
    8022 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    8023 {
    8024  bool dedicatedAllocationsStarted = false;
    8025  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8026  {
    8027  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    8028  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    8029  VMA_ASSERT(pDedicatedAllocVector);
    8030  if(pDedicatedAllocVector->empty() == false)
    8031  {
    8032  if(dedicatedAllocationsStarted == false)
    8033  {
    8034  dedicatedAllocationsStarted = true;
    8035  json.WriteString("DedicatedAllocations");
    8036  json.BeginObject();
    8037  }
    8038 
    8039  json.BeginString("Type ");
    8040  json.ContinueString(memTypeIndex);
    8041  json.EndString();
    8042 
    8043  json.BeginArray();
    8044 
    8045  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    8046  {
    8047  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    8048  json.BeginObject(true);
    8049 
    8050  json.WriteString("Type");
    8051  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
    8052 
    8053  json.WriteString("Size");
    8054  json.WriteNumber(hAlloc->GetSize());
    8055 
    8056  const void* pUserData = hAlloc->GetUserData();
    8057  if(pUserData != VMA_NULL)
    8058  {
    8059  json.WriteString("UserData");
    8060  if(hAlloc->IsUserDataString())
    8061  {
    8062  json.WriteString((const char*)pUserData);
    8063  }
    8064  else
    8065  {
    8066  json.BeginString();
    8067  json.ContinueString_Pointer(pUserData);
    8068  json.EndString();
    8069  }
    8070  }
    8071 
    8072  json.EndObject();
    8073  }
    8074 
    8075  json.EndArray();
    8076  }
    8077  }
    8078  if(dedicatedAllocationsStarted)
    8079  {
    8080  json.EndObject();
    8081  }
    8082 
    8083  {
    8084  bool allocationsStarted = false;
    8085  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    8086  {
    8087  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    8088  {
    8089  if(allocationsStarted == false)
    8090  {
    8091  allocationsStarted = true;
    8092  json.WriteString("DefaultPools");
    8093  json.BeginObject();
    8094  }
    8095 
    8096  json.BeginString("Type ");
    8097  json.ContinueString(memTypeIndex);
    8098  json.EndString();
    8099 
    8100  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    8101  }
    8102  }
    8103  if(allocationsStarted)
    8104  {
    8105  json.EndObject();
    8106  }
    8107  }
    8108 
    8109  {
    8110  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    8111  const size_t poolCount = m_Pools.size();
    8112  if(poolCount > 0)
    8113  {
    8114  json.WriteString("Pools");
    8115  json.BeginArray();
    8116  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    8117  {
    8118  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    8119  }
    8120  json.EndArray();
    8121  }
    8122  }
    8123 }
    8124 
    8125 #endif // #if VMA_STATS_STRING_ENABLED
    8126 
    8127 static VkResult AllocateMemoryForImage(
    8128  VmaAllocator allocator,
    8129  VkImage image,
    8130  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8131  VmaSuballocationType suballocType,
    8132  VmaAllocation* pAllocation)
    8133 {
    8134  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
    8135 
    8136  VkMemoryRequirements vkMemReq = {};
    8137  bool requiresDedicatedAllocation = false;
    8138  bool prefersDedicatedAllocation = false;
    8139  allocator->GetImageMemoryRequirements(image, vkMemReq,
    8140  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8141 
    8142  return allocator->AllocateMemory(
    8143  vkMemReq,
    8144  requiresDedicatedAllocation,
    8145  prefersDedicatedAllocation,
    8146  VK_NULL_HANDLE, // dedicatedBuffer
    8147  image, // dedicatedImage
    8148  *pAllocationCreateInfo,
    8149  suballocType,
    8150  pAllocation);
    8151 }
    8152 
    8154 // Public interface
    8155 
    8156 VkResult vmaCreateAllocator(
    8157  const VmaAllocatorCreateInfo* pCreateInfo,
    8158  VmaAllocator* pAllocator)
    8159 {
    8160  VMA_ASSERT(pCreateInfo && pAllocator);
    8161  VMA_DEBUG_LOG("vmaCreateAllocator");
    8162  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    8163  return VK_SUCCESS;
    8164 }
    8165 
    8166 void vmaDestroyAllocator(
    8167  VmaAllocator allocator)
    8168 {
    8169  if(allocator != VK_NULL_HANDLE)
    8170  {
    8171  VMA_DEBUG_LOG("vmaDestroyAllocator");
    8172  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    8173  vma_delete(&allocationCallbacks, allocator);
    8174  }
    8175 }
    8176 
    8178  VmaAllocator allocator,
    8179  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    8180 {
    8181  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    8182  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    8183 }
    8184 
    8186  VmaAllocator allocator,
    8187  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    8188 {
    8189  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    8190  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    8191 }
    8192 
    8194  VmaAllocator allocator,
    8195  uint32_t memoryTypeIndex,
    8196  VkMemoryPropertyFlags* pFlags)
    8197 {
    8198  VMA_ASSERT(allocator && pFlags);
    8199  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    8200  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    8201 }
    8202 
    8204  VmaAllocator allocator,
    8205  uint32_t frameIndex)
    8206 {
    8207  VMA_ASSERT(allocator);
    8208  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    8209 
    8210  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8211 
    8212  allocator->SetCurrentFrameIndex(frameIndex);
    8213 }
    8214 
    8215 void vmaCalculateStats(
    8216  VmaAllocator allocator,
    8217  VmaStats* pStats)
    8218 {
    8219  VMA_ASSERT(allocator && pStats);
    8220  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8221  allocator->CalculateStats(pStats);
    8222 }
    8223 
    8224 #if VMA_STATS_STRING_ENABLED
    8225 
    8226 void vmaBuildStatsString(
    8227  VmaAllocator allocator,
    8228  char** ppStatsString,
    8229  VkBool32 detailedMap)
    8230 {
    8231  VMA_ASSERT(allocator && ppStatsString);
    8232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8233 
    8234  VmaStringBuilder sb(allocator);
    8235  {
    8236  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    8237  json.BeginObject();
    8238 
    8239  VmaStats stats;
    8240  allocator->CalculateStats(&stats);
    8241 
    8242  json.WriteString("Total");
    8243  VmaPrintStatInfo(json, stats.total);
    8244 
    8245  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    8246  {
    8247  json.BeginString("Heap ");
    8248  json.ContinueString(heapIndex);
    8249  json.EndString();
    8250  json.BeginObject();
    8251 
    8252  json.WriteString("Size");
    8253  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    8254 
    8255  json.WriteString("Flags");
    8256  json.BeginArray(true);
    8257  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    8258  {
    8259  json.WriteString("DEVICE_LOCAL");
    8260  }
    8261  json.EndArray();
    8262 
    8263  if(stats.memoryHeap[heapIndex].blockCount > 0)
    8264  {
    8265  json.WriteString("Stats");
    8266  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    8267  }
    8268 
    8269  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    8270  {
    8271  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    8272  {
    8273  json.BeginString("Type ");
    8274  json.ContinueString(typeIndex);
    8275  json.EndString();
    8276 
    8277  json.BeginObject();
    8278 
    8279  json.WriteString("Flags");
    8280  json.BeginArray(true);
    8281  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    8282  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    8283  {
    8284  json.WriteString("DEVICE_LOCAL");
    8285  }
    8286  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    8287  {
    8288  json.WriteString("HOST_VISIBLE");
    8289  }
    8290  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    8291  {
    8292  json.WriteString("HOST_COHERENT");
    8293  }
    8294  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    8295  {
    8296  json.WriteString("HOST_CACHED");
    8297  }
    8298  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    8299  {
    8300  json.WriteString("LAZILY_ALLOCATED");
    8301  }
    8302  json.EndArray();
    8303 
    8304  if(stats.memoryType[typeIndex].blockCount > 0)
    8305  {
    8306  json.WriteString("Stats");
    8307  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    8308  }
    8309 
    8310  json.EndObject();
    8311  }
    8312  }
    8313 
    8314  json.EndObject();
    8315  }
    8316  if(detailedMap == VK_TRUE)
    8317  {
    8318  allocator->PrintDetailedMap(json);
    8319  }
    8320 
    8321  json.EndObject();
    8322  }
    8323 
    8324  const size_t len = sb.GetLength();
    8325  char* const pChars = vma_new_array(allocator, char, len + 1);
    8326  if(len > 0)
    8327  {
    8328  memcpy(pChars, sb.GetData(), len);
    8329  }
    8330  pChars[len] = '\0';
    8331  *ppStatsString = pChars;
    8332 }
    8333 
    8334 void vmaFreeStatsString(
    8335  VmaAllocator allocator,
    8336  char* pStatsString)
    8337 {
    8338  if(pStatsString != VMA_NULL)
    8339  {
    8340  VMA_ASSERT(allocator);
    8341  size_t len = strlen(pStatsString);
    8342  vma_delete_array(allocator, pStatsString, len + 1);
    8343  }
    8344 }
    8345 
    8346 #endif // #if VMA_STATS_STRING_ENABLED
    8347 
    8348 /*
    8349 This function is not protected by any mutex because it just reads immutable data.
    8350 */
    8351 VkResult vmaFindMemoryTypeIndex(
    8352  VmaAllocator allocator,
    8353  uint32_t memoryTypeBits,
    8354  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8355  uint32_t* pMemoryTypeIndex)
    8356 {
    8357  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8358  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8359  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8360 
    8361  if(pAllocationCreateInfo->memoryTypeBits != 0)
    8362  {
    8363  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    8364  }
    8365 
    8366  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    8367  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    8368 
    8369  // Convert usage to requiredFlags and preferredFlags.
    8370  switch(pAllocationCreateInfo->usage)
    8371  {
    8373  break;
    8375  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    8376  break;
    8378  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    8379  break;
    8381  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8382  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    8383  break;
    8385  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    8386  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    8387  break;
    8388  default:
    8389  break;
    8390  }
    8391 
    8392  *pMemoryTypeIndex = UINT32_MAX;
    8393  uint32_t minCost = UINT32_MAX;
    8394  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    8395  memTypeIndex < allocator->GetMemoryTypeCount();
    8396  ++memTypeIndex, memTypeBit <<= 1)
    8397  {
    8398  // This memory type is acceptable according to memoryTypeBits bitmask.
    8399  if((memTypeBit & memoryTypeBits) != 0)
    8400  {
    8401  const VkMemoryPropertyFlags currFlags =
    8402  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    8403  // This memory type contains requiredFlags.
    8404  if((requiredFlags & ~currFlags) == 0)
    8405  {
    8406  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    8407  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    8408  // Remember memory type with lowest cost.
    8409  if(currCost < minCost)
    8410  {
    8411  *pMemoryTypeIndex = memTypeIndex;
    8412  if(currCost == 0)
    8413  {
    8414  return VK_SUCCESS;
    8415  }
    8416  minCost = currCost;
    8417  }
    8418  }
    8419  }
    8420  }
    8421  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    8422 }
    8423 
    8425  VmaAllocator allocator,
    8426  const VkBufferCreateInfo* pBufferCreateInfo,
    8427  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8428  uint32_t* pMemoryTypeIndex)
    8429 {
    8430  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8431  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    8432  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8433  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8434 
    8435  const VkDevice hDev = allocator->m_hDevice;
    8436  VkBuffer hBuffer = VK_NULL_HANDLE;
    8437  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    8438  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    8439  if(res == VK_SUCCESS)
    8440  {
    8441  VkMemoryRequirements memReq = {};
    8442  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    8443  hDev, hBuffer, &memReq);
    8444 
    8445  res = vmaFindMemoryTypeIndex(
    8446  allocator,
    8447  memReq.memoryTypeBits,
    8448  pAllocationCreateInfo,
    8449  pMemoryTypeIndex);
    8450 
    8451  allocator->GetVulkanFunctions().vkDestroyBuffer(
    8452  hDev, hBuffer, allocator->GetAllocationCallbacks());
    8453  }
    8454  return res;
    8455 }
    8456 
    8458  VmaAllocator allocator,
    8459  const VkImageCreateInfo* pImageCreateInfo,
    8460  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8461  uint32_t* pMemoryTypeIndex)
    8462 {
    8463  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    8464  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    8465  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    8466  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    8467 
    8468  const VkDevice hDev = allocator->m_hDevice;
    8469  VkImage hImage = VK_NULL_HANDLE;
    8470  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    8471  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    8472  if(res == VK_SUCCESS)
    8473  {
    8474  VkMemoryRequirements memReq = {};
    8475  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    8476  hDev, hImage, &memReq);
    8477 
    8478  res = vmaFindMemoryTypeIndex(
    8479  allocator,
    8480  memReq.memoryTypeBits,
    8481  pAllocationCreateInfo,
    8482  pMemoryTypeIndex);
    8483 
    8484  allocator->GetVulkanFunctions().vkDestroyImage(
    8485  hDev, hImage, allocator->GetAllocationCallbacks());
    8486  }
    8487  return res;
    8488 }
    8489 
    8490 VkResult vmaCreatePool(
    8491  VmaAllocator allocator,
    8492  const VmaPoolCreateInfo* pCreateInfo,
    8493  VmaPool* pPool)
    8494 {
    8495  VMA_ASSERT(allocator && pCreateInfo && pPool);
    8496 
    8497  VMA_DEBUG_LOG("vmaCreatePool");
    8498 
    8499  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8500 
    8501  return allocator->CreatePool(pCreateInfo, pPool);
    8502 }
    8503 
    8504 void vmaDestroyPool(
    8505  VmaAllocator allocator,
    8506  VmaPool pool)
    8507 {
    8508  VMA_ASSERT(allocator);
    8509 
    8510  if(pool == VK_NULL_HANDLE)
    8511  {
    8512  return;
    8513  }
    8514 
    8515  VMA_DEBUG_LOG("vmaDestroyPool");
    8516 
    8517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8518 
    8519  allocator->DestroyPool(pool);
    8520 }
    8521 
    8522 void vmaGetPoolStats(
    8523  VmaAllocator allocator,
    8524  VmaPool pool,
    8525  VmaPoolStats* pPoolStats)
    8526 {
    8527  VMA_ASSERT(allocator && pool && pPoolStats);
    8528 
    8529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8530 
    8531  allocator->GetPoolStats(pool, pPoolStats);
    8532 }
    8533 
    8535  VmaAllocator allocator,
    8536  VmaPool pool,
    8537  size_t* pLostAllocationCount)
    8538 {
    8539  VMA_ASSERT(allocator && pool);
    8540 
    8541  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8542 
    8543  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    8544 }
    8545 
    8546 VkResult vmaAllocateMemory(
    8547  VmaAllocator allocator,
    8548  const VkMemoryRequirements* pVkMemoryRequirements,
    8549  const VmaAllocationCreateInfo* pCreateInfo,
    8550  VmaAllocation* pAllocation,
    8551  VmaAllocationInfo* pAllocationInfo)
    8552 {
    8553  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    8554 
    8555  VMA_DEBUG_LOG("vmaAllocateMemory");
    8556 
    8557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8558 
    8559  VkResult result = allocator->AllocateMemory(
    8560  *pVkMemoryRequirements,
    8561  false, // requiresDedicatedAllocation
    8562  false, // prefersDedicatedAllocation
    8563  VK_NULL_HANDLE, // dedicatedBuffer
    8564  VK_NULL_HANDLE, // dedicatedImage
    8565  *pCreateInfo,
    8566  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    8567  pAllocation);
    8568 
    8569  if(pAllocationInfo && result == VK_SUCCESS)
    8570  {
    8571  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8572  }
    8573 
    8574  return result;
    8575 }
    8576 
    8578  VmaAllocator allocator,
    8579  VkBuffer buffer,
    8580  const VmaAllocationCreateInfo* pCreateInfo,
    8581  VmaAllocation* pAllocation,
    8582  VmaAllocationInfo* pAllocationInfo)
    8583 {
    8584  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    8585 
    8586  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    8587 
    8588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8589 
    8590  VkMemoryRequirements vkMemReq = {};
    8591  bool requiresDedicatedAllocation = false;
    8592  bool prefersDedicatedAllocation = false;
    8593  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    8594  requiresDedicatedAllocation,
    8595  prefersDedicatedAllocation);
    8596 
    8597  VkResult result = allocator->AllocateMemory(
    8598  vkMemReq,
    8599  requiresDedicatedAllocation,
    8600  prefersDedicatedAllocation,
    8601  buffer, // dedicatedBuffer
    8602  VK_NULL_HANDLE, // dedicatedImage
    8603  *pCreateInfo,
    8604  VMA_SUBALLOCATION_TYPE_BUFFER,
    8605  pAllocation);
    8606 
    8607  if(pAllocationInfo && result == VK_SUCCESS)
    8608  {
    8609  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8610  }
    8611 
    8612  return result;
    8613 }
    8614 
    8615 VkResult vmaAllocateMemoryForImage(
    8616  VmaAllocator allocator,
    8617  VkImage image,
    8618  const VmaAllocationCreateInfo* pCreateInfo,
    8619  VmaAllocation* pAllocation,
    8620  VmaAllocationInfo* pAllocationInfo)
    8621 {
    8622  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    8623 
    8624  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    8625 
    8626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8627 
    8628  VkResult result = AllocateMemoryForImage(
    8629  allocator,
    8630  image,
    8631  pCreateInfo,
    8632  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    8633  pAllocation);
    8634 
    8635  if(pAllocationInfo && result == VK_SUCCESS)
    8636  {
    8637  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8638  }
    8639 
    8640  return result;
    8641 }
    8642 
    8643 void vmaFreeMemory(
    8644  VmaAllocator allocator,
    8645  VmaAllocation allocation)
    8646 {
    8647  VMA_ASSERT(allocator && allocation);
    8648 
    8649  VMA_DEBUG_LOG("vmaFreeMemory");
    8650 
    8651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8652 
    8653  allocator->FreeMemory(allocation);
    8654 }
    8655 
    8657  VmaAllocator allocator,
    8658  VmaAllocation allocation,
    8659  VmaAllocationInfo* pAllocationInfo)
    8660 {
    8661  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    8662 
    8663  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8664 
    8665  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    8666 }
    8667 
    8668 VkBool32 vmaTouchAllocation(
    8669  VmaAllocator allocator,
    8670  VmaAllocation allocation)
    8671 {
    8672  VMA_ASSERT(allocator && allocation);
    8673 
    8674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8675 
    8676  return allocator->TouchAllocation(allocation);
    8677 }
    8678 
    8680  VmaAllocator allocator,
    8681  VmaAllocation allocation,
    8682  void* pUserData)
    8683 {
    8684  VMA_ASSERT(allocator && allocation);
    8685 
    8686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8687 
    8688  allocation->SetUserData(allocator, pUserData);
    8689 }
    8690 
    8692  VmaAllocator allocator,
    8693  VmaAllocation* pAllocation)
    8694 {
    8695  VMA_ASSERT(allocator && pAllocation);
    8696 
    8697  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    8698 
    8699  allocator->CreateLostAllocation(pAllocation);
    8700 }
    8701 
    8702 VkResult vmaMapMemory(
    8703  VmaAllocator allocator,
    8704  VmaAllocation allocation,
    8705  void** ppData)
    8706 {
    8707  VMA_ASSERT(allocator && allocation && ppData);
    8708 
    8709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8710 
    8711  return allocator->Map(allocation, ppData);
    8712 }
    8713 
    8714 void vmaUnmapMemory(
    8715  VmaAllocator allocator,
    8716  VmaAllocation allocation)
    8717 {
    8718  VMA_ASSERT(allocator && allocation);
    8719 
    8720  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8721 
    8722  allocator->Unmap(allocation);
    8723 }
    8724 
    8725 VkResult vmaDefragment(
    8726  VmaAllocator allocator,
    8727  VmaAllocation* pAllocations,
    8728  size_t allocationCount,
    8729  VkBool32* pAllocationsChanged,
    8730  const VmaDefragmentationInfo *pDefragmentationInfo,
    8731  VmaDefragmentationStats* pDefragmentationStats)
    8732 {
    8733  VMA_ASSERT(allocator && pAllocations);
    8734 
    8735  VMA_DEBUG_LOG("vmaDefragment");
    8736 
    8737  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8738 
    8739  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    8740 }
    8741 
    8742 VkResult vmaCreateBuffer(
    8743  VmaAllocator allocator,
    8744  const VkBufferCreateInfo* pBufferCreateInfo,
    8745  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8746  VkBuffer* pBuffer,
    8747  VmaAllocation* pAllocation,
    8748  VmaAllocationInfo* pAllocationInfo)
    8749 {
    8750  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    8751 
    8752  VMA_DEBUG_LOG("vmaCreateBuffer");
    8753 
    8754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8755 
    8756  *pBuffer = VK_NULL_HANDLE;
    8757  *pAllocation = VK_NULL_HANDLE;
    8758 
    8759  // 1. Create VkBuffer.
    8760  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    8761  allocator->m_hDevice,
    8762  pBufferCreateInfo,
    8763  allocator->GetAllocationCallbacks(),
    8764  pBuffer);
    8765  if(res >= 0)
    8766  {
    8767  // 2. vkGetBufferMemoryRequirements.
    8768  VkMemoryRequirements vkMemReq = {};
    8769  bool requiresDedicatedAllocation = false;
    8770  bool prefersDedicatedAllocation = false;
    8771  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    8772  requiresDedicatedAllocation, prefersDedicatedAllocation);
    8773 
    8774  // Make sure alignment requirements for specific buffer usages reported
    8775  // in Physical Device Properties are included in alignment reported by memory requirements.
    8776  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    8777  {
    8778  VMA_ASSERT(vkMemReq.alignment %
    8779  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    8780  }
    8781  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    8782  {
    8783  VMA_ASSERT(vkMemReq.alignment %
    8784  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    8785  }
    8786  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    8787  {
    8788  VMA_ASSERT(vkMemReq.alignment %
    8789  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    8790  }
    8791 
    8792  // 3. Allocate memory using allocator.
    8793  res = allocator->AllocateMemory(
    8794  vkMemReq,
    8795  requiresDedicatedAllocation,
    8796  prefersDedicatedAllocation,
    8797  *pBuffer, // dedicatedBuffer
    8798  VK_NULL_HANDLE, // dedicatedImage
    8799  *pAllocationCreateInfo,
    8800  VMA_SUBALLOCATION_TYPE_BUFFER,
    8801  pAllocation);
    8802  if(res >= 0)
    8803  {
    8804  // 3. Bind buffer with memory.
    8805  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
    8806  allocator->m_hDevice,
    8807  *pBuffer,
    8808  (*pAllocation)->GetMemory(),
    8809  (*pAllocation)->GetOffset());
    8810  if(res >= 0)
    8811  {
    8812  // All steps succeeded.
    8813  if(pAllocationInfo != VMA_NULL)
    8814  {
    8815  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8816  }
    8817  return VK_SUCCESS;
    8818  }
    8819  allocator->FreeMemory(*pAllocation);
    8820  *pAllocation = VK_NULL_HANDLE;
    8821  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8822  *pBuffer = VK_NULL_HANDLE;
    8823  return res;
    8824  }
    8825  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    8826  *pBuffer = VK_NULL_HANDLE;
    8827  return res;
    8828  }
    8829  return res;
    8830 }
    8831 
    8832 void vmaDestroyBuffer(
    8833  VmaAllocator allocator,
    8834  VkBuffer buffer,
    8835  VmaAllocation allocation)
    8836 {
    8837  if(buffer != VK_NULL_HANDLE)
    8838  {
    8839  VMA_ASSERT(allocator);
    8840 
    8841  VMA_DEBUG_LOG("vmaDestroyBuffer");
    8842 
    8843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8844 
    8845  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    8846 
    8847  allocator->FreeMemory(allocation);
    8848  }
    8849 }
    8850 
    8851 VkResult vmaCreateImage(
    8852  VmaAllocator allocator,
    8853  const VkImageCreateInfo* pImageCreateInfo,
    8854  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    8855  VkImage* pImage,
    8856  VmaAllocation* pAllocation,
    8857  VmaAllocationInfo* pAllocationInfo)
    8858 {
    8859  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    8860 
    8861  VMA_DEBUG_LOG("vmaCreateImage");
    8862 
    8863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8864 
    8865  *pImage = VK_NULL_HANDLE;
    8866  *pAllocation = VK_NULL_HANDLE;
    8867 
    8868  // 1. Create VkImage.
    8869  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    8870  allocator->m_hDevice,
    8871  pImageCreateInfo,
    8872  allocator->GetAllocationCallbacks(),
    8873  pImage);
    8874  if(res >= 0)
    8875  {
    8876  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    8877  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    8878  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    8879 
    8880  // 2. Allocate memory using allocator.
    8881  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
    8882  if(res >= 0)
    8883  {
    8884  // 3. Bind image with memory.
    8885  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
    8886  allocator->m_hDevice,
    8887  *pImage,
    8888  (*pAllocation)->GetMemory(),
    8889  (*pAllocation)->GetOffset());
    8890  if(res >= 0)
    8891  {
    8892  // All steps succeeded.
    8893  if(pAllocationInfo != VMA_NULL)
    8894  {
    8895  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    8896  }
    8897  return VK_SUCCESS;
    8898  }
    8899  allocator->FreeMemory(*pAllocation);
    8900  *pAllocation = VK_NULL_HANDLE;
    8901  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8902  *pImage = VK_NULL_HANDLE;
    8903  return res;
    8904  }
    8905  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    8906  *pImage = VK_NULL_HANDLE;
    8907  return res;
    8908  }
    8909  return res;
    8910 }
    8911 
    8912 void vmaDestroyImage(
    8913  VmaAllocator allocator,
    8914  VkImage image,
    8915  VmaAllocation allocation)
    8916 {
    8917  if(image != VK_NULL_HANDLE)
    8918  {
    8919  VMA_ASSERT(allocator);
    8920 
    8921  VMA_DEBUG_LOG("vmaDestroyImage");
    8922 
    8923  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    8924 
    8925  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    8926 
    8927  allocator->FreeMemory(allocation);
    8928  }
    8929 }
    8930 
    8931 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:896
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1150
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:921
    @@ -146,7 +146,6 @@ $(function() {
    Definition: vk_mem_alloc.h:1194
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1049
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:854
    -
    bool vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    TODO finish documentation...
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:1340
    Definition: vk_mem_alloc.h:1133
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    @@ -207,6 +206,7 @@ $(function() {
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:1308
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1049
    +
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    TODO finish documentation...
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1056
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 9c4d9a6..86f153f 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -1534,7 +1534,7 @@ void vmaGetAllocationInfo( /** \brief TODO finish documentation... */ -bool vmaTouchAllocation( +VkBool32 vmaTouchAllocation( VmaAllocator allocator, VmaAllocation allocation); @@ -8665,7 +8665,7 @@ void vmaGetAllocationInfo( allocator->GetAllocationInfo(allocation, pAllocationInfo); } -bool vmaTouchAllocation( +VkBool32 vmaTouchAllocation( VmaAllocator allocator, VmaAllocation allocation) {
    bool vmaTouchAllocation VkBool32 vmaTouchAllocation ( VmaAllocator  allocator,